#ifdef DWC_UTE_CFI
#include "dwc_otg_cfi.h"
-extern int init_cfi(cfiobject_t * cfiobj);
+extern int init_cfi(cfiobject_t *cfiobj);
#endif
/**
* Choose endpoint from ep arrays using usb_ep structure.
*/
-static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
+static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t *pcd, void *handle)
{
int i;
if (pcd->ep0.priv == handle) {
/**
* This function completes a request. It call's the request call back.
*/
-void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
+void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req,
int32_t status)
{
unsigned stopped = ep->stopped;
/**
* This function terminates all the requsts in the EP request queue.
*/
-void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
+void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep)
{
dwc_otg_pcd_request_t *req;
}
}
-void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
+void dwc_otg_pcd_start(dwc_otg_pcd_t *pcd,
const struct dwc_otg_pcd_function_ops *fops)
{
pcd->fops = fops;
* Initialized the Core for Device mode.
*/
if (dwc_otg_is_device_mode(core_if)) {
- //dwc_otg_core_dev_init(core_if);
+ /* dwc_otg_core_dev_init(core_if); */
/* Set core_if's lock pointer to the pcd->lock */
core_if->lock = pcd->lock;
}
/** CFI-specific buffer allocation function for EP */
#ifdef DWC_UTE_CFI
-uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
+uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
size_t buflen, int flags)
{
dwc_otg_pcd_ep_t *ep;
flags);
}
#else
-uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
+uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
size_t buflen, int flags);
#endif
static int32_t dwc_otg_pcd_stop_cb(void *p)
{
dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
- extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
+ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd);
dwc_otg_pcd_stop(pcd);
return 1;
* This function allocates a DMA Descriptor chain for the Endpoint
* buffer to be used for a transfer to/from the specified endpoint.
*/
-dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t * dma_desc_addr,
+dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t *dma_desc_addr,
uint32_t count)
{
return DWC_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
/**
* This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
*/
-void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t * desc_addr,
+void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t *desc_addr,
uint32_t dma_desc_addr, uint32_t count)
{
DWC_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
* @param dwc_ep The EP to start the transfer on.
*
*/
-void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
- dwc_ep_t * dwc_ep)
+void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if,
+ dwc_ep_t *dwc_ep)
{
dsts_data_t dsts = {.d32 = 0 };
dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
else
dwc_ep->desc_cnt =
- dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
+ dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
dwc_ep->bInterval;
/** Allocate descriptors for double buffering */
i += dwc_ep->pkt_per_frm) {
for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
- uint32_t len = (j + 1) * dwc_ep->maxpacket;
+ uint32_t len = (j + 1)*dwc_ep->maxpacket;
if (len > dwc_ep->data_per_frame)
data_per_desc =
dwc_ep->data_per_frame -
- j * dwc_ep->maxpacket;
+ j*dwc_ep->maxpacket;
else
data_per_desc = dwc_ep->maxpacket;
len = data_per_desc % 4;
}
for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
- uint32_t len = (j + 1) * dwc_ep->maxpacket;
+ uint32_t len = (j + 1)*dwc_ep->maxpacket;
if (len > dwc_ep->data_per_frame)
data_per_desc =
dwc_ep->data_per_frame -
- j * dwc_ep->maxpacket;
+ j*dwc_ep->maxpacket;
else
data_per_desc = dwc_ep->maxpacket;
len = data_per_desc % 4;
}
sts.b_iso_out.ioc = 1;
- len = (j + 1) * dwc_ep->maxpacket;
+ len = (j + 1)*dwc_ep->maxpacket;
if (len > dwc_ep->data_per_frame)
data_per_desc =
- dwc_ep->data_per_frame - j * dwc_ep->maxpacket;
+ dwc_ep->data_per_frame - j*dwc_ep->maxpacket;
else
data_per_desc = dwc_ep->maxpacket;
len = data_per_desc % 4;
for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
i += dwc_ep->pkt_per_frm) {
for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
- uint32_t len = (j + 1) * dwc_ep->maxpacket;
+ uint32_t len = (j + 1)*dwc_ep->maxpacket;
if (len > dwc_ep->data_per_frame)
data_per_desc =
dwc_ep->data_per_frame -
- j * dwc_ep->maxpacket;
+ j*dwc_ep->maxpacket;
else
data_per_desc = dwc_ep->maxpacket;
len = data_per_desc % 4;
}
for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
data_per_desc =
- ((j + 1) * dwc_ep->maxpacket >
+ ((j + 1)*dwc_ep->maxpacket >
dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
- j * dwc_ep->maxpacket : dwc_ep->maxpacket;
+ j*dwc_ep->maxpacket : dwc_ep->maxpacket;
data_per_desc +=
(data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
sts.b_iso_out.rxbytes = data_per_desc;
sts.b_iso_out.ioc = 1;
sts.b_iso_out.l = 1;
data_per_desc =
- ((j + 1) * dwc_ep->maxpacket >
+ ((j + 1)*dwc_ep->maxpacket >
dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
- j * dwc_ep->maxpacket : dwc_ep->maxpacket;
+ j*dwc_ep->maxpacket : dwc_ep->maxpacket;
data_per_desc +=
(data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
sts.b_iso_out.rxbytes = data_per_desc;
fifosize_data_t txfifosize, rxfifosize;
txfifosize.d32 =
- DWC_READ_REG32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->
- dtxfsts);
+ DWC_READ_REG32(&core_if->dev_if->
+ in_ep_regs[dwc_ep->num]->dtxfsts);
rxfifosize.d32 =
DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
* @param ep The EP to start the transfer on.
*
*/
-void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
- dwc_ep_t * ep)
+void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if,
+ dwc_ep_t *ep)
{
depctl_data_t depctl = {.d32 = 0 };
volatile uint32_t *addr;
deptsiz_data_t deptsiz = {.d32 = 0 };
ep->xfer_len =
- ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
+ ep->data_per_frame*ep->buf_proc_intrvl / ep->bInterval;
ep->pkt_cnt =
(ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
ep->xfer_count = 0;
/* Program the transfer size and packet count
* as follows: xfersize = N * maxpacket +
* short_packet pktcnt = N + (short_packet
- * exist ? 1 : 0)
+ * exist ? 1 : 0)
*/
deptsiz.b.mc = ep->pkt_per_frm;
deptsiz.b.xfersize = ep->xfer_len;
deptsiz.b.pktcnt =
(ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
- DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
- dieptsiz, deptsiz.d32);
+ DWC_WRITE_REG32(&core_if->dev_if->
+ in_ep_regs[ep->num]->dieptsiz,
+ deptsiz.d32);
/* Write the DMA register */
DWC_WRITE_REG32(&
- (core_if->dev_if->in_ep_regs[ep->num]->
- diepdma), (uint32_t) ep->dma_addr);
+ (core_if->dev_if->
+ in_ep_regs[ep->num]->diepdma),
+ (uint32_t) ep->dma_addr);
} else {
deptsiz.b.pktcnt =
(ep->xfer_len + (ep->maxpacket - 1)) /
ep->maxpacket;
- deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt*ep->maxpacket;
- DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
- doeptsiz, deptsiz.d32);
+ DWC_WRITE_REG32(&core_if->dev_if->
+ out_ep_regs[ep->num]->doeptsiz,
+ deptsiz.d32);
/* Write the DMA register */
DWC_WRITE_REG32(&
- (core_if->dev_if->out_ep_regs[ep->num]->
- doepdma), (uint32_t) ep->dma_addr);
+ (core_if->dev_if->
+ out_ep_regs[ep->num]->doepdma),
+ (uint32_t) ep->dma_addr);
}
/** Enable endpoint, clear nak */
* @param ep The EP to start the transfer on.
*/
-static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
- dwc_ep_t * ep)
+static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if,
+ dwc_ep_t *ep)
{
if (core_if->dma_enable) {
if (core_if->dma_desc_enable) {
dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
} else {
ep->cur_pkt_addr =
- (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
- xfer_buff0;
+ (ep->proc_buf_num) ? ep->
+ xfer_buff1 : ep->xfer_buff0;
ep->cur_pkt_dma_addr =
- (ep->proc_buf_num) ? ep->dma_addr1 : ep->
- dma_addr0;
+ (ep->proc_buf_num) ? ep->
+ dma_addr1 : ep->dma_addr0;
dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
}
}
/**
* This function stops transfer for an EP and
- * resets the ep's variables.
+ * resets the ep's variables.
*
* @param core_if Programming view of DWC_otg controller.
* @param ep The EP to start the transfer on.
*/
-void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
+void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
{
depctl_data_t depctl = {.d32 = 0 };
volatile uint32_t *addr;
ep->iso_dma_desc_addr = 0;
}
-int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
- uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
+int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t *pcd, void *ep_handle,
+ uint8_t *buf0, uint8_t *buf1, dwc_dma_t dma0,
dwc_dma_t dma1, int sync_frame, int dp_frame,
int data_per_frame, int start_frame,
int buf_proc_intrvl, void *req_handle,
if (!core_if->pti_enh_enable) {
dwc_ep->pkt_cnt =
- dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
+ dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
dwc_ep->bInterval;
} else {
dwc_ep->pkt_cnt =
if (core_if->dma_desc_enable) {
dwc_ep->desc_cnt =
- dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
+ dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
dwc_ep->bInterval;
}
if (atomic_alloc) {
dwc_ep->pkt_info =
- DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
+ DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
} else {
dwc_ep->pkt_info =
- DWC_ALLOC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
+ DWC_ALLOC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
}
if (!dwc_ep->pkt_info) {
DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
}
if (core_if->pti_enh_enable) {
dwc_memset(dwc_ep->pkt_info, 0,
- sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
+ sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
}
dwc_ep->cur_pkt = 0;
return 0;
}
-int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
+int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t *pcd, void *ep_handle,
void *req_handle)
{
dwc_irqflags_t flags = 0;
* - Every time a sync period completes this function is called to
* perform data exchange between PCD and gadget
*/
-void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
+void dwc_otg_iso_buffer_done(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep,
void *req_handle)
{
int i;
}
}
-int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
+int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t *pcd, void *ep_handle,
void *iso_req_handle)
{
dwc_otg_pcd_ep_t *ep;
return dwc_ep->pkt_cnt;
}
-void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
+void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t *pcd, void *ep_handle,
void *iso_req_handle, int packet,
int *status, int *actual, int *offset)
{
#endif /* DWC_EN_ISOC */
-static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
+static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *pcd_ep,
uint32_t is_in, uint32_t ep_num)
{
/* Init EP structure */
/**
* Initialize ep's
*/
-static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
+static void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd)
{
int i;
uint32_t hwcfg1;
/* Power off the core */
if (core_if->power_down == 2) {
gpwrdn.b.pwrdnswtch = 1;
- DWC_MODIFY_REG32(&core_if->
- core_global_regs->gpwrdn,
- gpwrdn.d32, 0);
+ DWC_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
}
gpwrdn.d32 = 0;
if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
- core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
+ p);
}
/* Clear Session Request */
* Tasklet
*
*/
-extern void start_next_request(dwc_otg_pcd_ep_t * ep);
+extern void start_next_request(dwc_otg_pcd_ep_t *ep);
static void start_xfer_tasklet_func(void *data)
{
if (pcd->ep0.queue_sof) {
pcd->ep0.queue_sof = 0;
start_next_request(&pcd->ep0);
- // break;
+ /* break; */
}
for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
if (pcd->in_ep[i].queue_sof) {
pcd->in_ep[i].queue_sof = 0;
start_next_request(&pcd->in_ep[i]);
- // break;
+ /* break; */
}
}
* This function initialized the PCD portion of the driver.
*
*/
-dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t * core_if)
+dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t *core_if)
{
dwc_otg_pcd_t *pcd = NULL;
dwc_otg_dev_if_t *dev_if;
}
/*
- * Initialized the Core for Device mode here if there is nod ADP support.
+ * Initialized the Core for Device mode here if there is nod ADP support.
* Otherwise it will be done later in dwc_otg_adp_start routine.
*/
- //if (dwc_otg_is_device_mode(core_if) /*&& !core_if->adp_enable */ ) {
- // dwc_otg_core_dev_init(core_if);
- //}
+ /* if (dwc_otg_is_device_mode(core_if) ) { */
+ /* dwc_otg_core_dev_init(core_if); */
+ /*} */
/*
* Register the PCD Callbacks.
if (GET_CORE_IF(pcd)->dma_enable) {
pcd->setup_pkt =
DWC_DMA_ALLOC_ATOMIC(sizeof(*pcd->setup_pkt) * 5,
- &pcd->setup_pkt_dma_handle);
+ &pcd->setup_pkt_dma_handle);
if (pcd->setup_pkt == NULL) {
DWC_FREE(pcd);
return NULL;
pcd->status_buf =
DWC_DMA_ALLOC_ATOMIC(sizeof(uint16_t),
- &pcd->status_buf_dma_handle);
+ &pcd->status_buf_dma_handle);
if (pcd->status_buf == NULL) {
DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
pcd->setup_pkt, pcd->setup_pkt_dma_handle);
core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
if (core_if->core_params->dev_out_nak) {
- /**
+ /**
* Initialize xfer timeout timer. Implemented for
* 2.93a feature "Device DDMA OUT NAK Enhancement"
*/
/**
* Remove PCD specific data
*/
-void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
+void dwc_otg_pcd_remove(dwc_otg_pcd_t *pcd)
{
dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
int i;
/**
* Returns whether registered pcd is dual speed or not
*/
-uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
+uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t *pcd)
{
dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
/**
* Returns whether registered pcd is OTG capable or not
*/
-uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
+uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t *pcd)
{
dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
gusbcfg_data_t usbcfg = {.d32 = 0 };
uint32_t retval = 0;
usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
return 0;
- else
+ else
return 1;
# else
if (!usbcfg.b.srpcap)
return 0;
- else
+ else
retval |= 1;
if (usbcfg.b.hnpcap)
retval |= 2;
-
- if (core_if->adp_enable)
+
+ if (core_if->adp_enable)
retval |= 4;
#endif
* This function assigns periodic Tx FIFO to an periodic EP
* in shared Tx FIFO mode
*/
-static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
+static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if)
{
uint32_t TxMsk = 1;
int i;
* This function assigns periodic Tx FIFO to an periodic EP
* in shared Tx FIFO mode
*/
-static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
+static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if)
{
uint32_t PerTxMsk = 1;
int i;
* This function releases periodic Tx FIFO
* in shared Tx FIFO mode
*/
-static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
+static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if,
uint32_t fifo_num)
{
core_if->p_tx_msk =
* This function releases periodic Tx FIFO
* in shared Tx FIFO mode
*/
-static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
+static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
{
core_if->tx_msk =
(core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
}
/**
- * This function is being called from gadget
+ * This function is being called from gadget
* to enable PCD endpoint.
*/
-int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
- const uint8_t * ep_desc, void *usb_ep)
+int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t *pcd,
+ const uint8_t *ep_desc, void *usb_ep)
{
int num, dir;
dwc_otg_pcd_ep_t *ep = NULL;
const usb_endpoint_descriptor_t *desc;
dwc_irqflags_t flags;
-// fifosize_data_t dptxfsiz = {.d32 = 0 };
-// gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
-// gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
+ /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
+ /* gdfifocfg_data_t gdfifocfg = {.d32 = 0 }; */
+ /* gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 }; */
int retval = 0;
int i, epcount;
}
/* Calculating EP info controller base address */
- #if 0
+#if 0
if (ep->dwc_ep.tx_fifo_num
&& GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
gdfifocfg.d32 =
- DWC_READ_REG32(&GET_CORE_IF(pcd)->
- core_global_regs->gdfifocfg);
+ DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
+ gdfifocfg);
gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
dptxfsiz.d32 =
(DWC_READ_REG32
- (&GET_CORE_IF(pcd)->core_global_regs->
- dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
+ (&GET_CORE_IF(pcd)->
+ core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num -
+ 1]) >> 16);
gdfifocfg.b.epinfobase =
gdfifocfgbase.d32 + dptxfsiz.d32;
if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
- DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
- core_global_regs->gdfifocfg,
- gdfifocfg.d32);
+ DWC_WRITE_REG32(&GET_CORE_IF
+ (pcd)->core_global_regs->
+ gdfifocfg, gdfifocfg.d32);
}
}
- #endif
+#endif
}
/* Set initial data PID. */
if (ep->dwc_ep.type == UE_BULK) {
if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
#endif
ep->dwc_ep.desc_addr =
- dwc_otg_ep_alloc_desc_chain(&ep->
- dwc_ep.dma_desc_addr,
+ dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.
+ dma_desc_addr,
MAX_DMA_DESC_CNT);
if (!ep->dwc_ep.desc_addr) {
DWC_WARN("%s, can't allocate DMA descriptor\n",
}
/**
- * This function is being called from gadget
+ * This function is being called from gadget
* to disable PCD endpoint.
*/
-int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
+int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t *pcd, void *ep_handle)
{
dwc_otg_pcd_ep_t *ep;
dwc_irqflags_t flags;
dwc_dma_t dma_desc_addr;
gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
-// fifosize_data_t dptxfsiz = {.d32 = 0 };
+ /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
ep = get_ep_from_handle(pcd, ep_handle);
}
release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
- #if 0
+#if 0
if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
/* Decreasing EPinfo Base Addr */
dptxfsiz.d32 =
(DWC_READ_REG32
- (&GET_CORE_IF(pcd)->
- core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num-1]) >> 16);
- gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
+ (&GET_CORE_IF(pcd)->core_global_regs->
+ dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
+ gdfifocfg.b.epinfobase =
+ gdfifocfgbase.d32 - dptxfsiz.d32;
if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
- DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
+ DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
+ core_global_regs->gdfifocfg,
gdfifocfg.d32);
}
}
- #endif
+#endif
}
/* Free DMA Descriptors */
* Free the request and its extended parts
*
*/
-void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req)
+void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req)
{
DWC_FREE(req->ext_req.per_io_frame_descs);
DWC_FREE(req);
* Start the next request in the endpoint's queue.
*
*/
-int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t * pcd,
- dwc_otg_pcd_ep_t * ep)
+int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t *pcd,
+ dwc_otg_pcd_ep_t *ep)
{
int i;
dwc_otg_pcd_request_t *req = NULL;
dwcep = &ep->dwc_ep;
if (dwcep->xiso_active_xfers > 0) {
-#if 0 //Disable this to decrease s/w overhead that is crucial for Isoc transfers
+#if 0
+ /* Disable this to decrease s/w overhead
+ * that is crucial for Isoc transfers */
DWC_WARN("There are currently active transfers for EP%d \
- (active=%d; queued=%d)", dwcep->num, dwcep->xiso_active_xfers,
- dwcep->xiso_queued_xfers);
+ (active=%d; queued=%d)", dwcep->num,
+ dwcep->xiso_active_xfers, dwcep->xiso_queued_xfers);
#endif
return 0;
}
if (dwcep->is_in) {
/* Setup DMA Descriptor chain for IN Isoc request */
for (i = 0; i < ereq->pio_pkt_count; i++) {
- //if ((i % (nat + 1)) == 0)
+ /* if ((i % (nat + 1)) == 0) */
if (i > 0)
dwcep->xiso_frame_num =
(dwcep->xiso_bInterval +
(ddesc_iso[i].length %
dwcep->maxpacket) ? 1 : 0;
dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
- dwcep->desc_addr[i].status.b_iso_in.pid = nat + 1;
+ dwcep->desc_addr[i].status.b_iso_in.pid =
+ nat + 1;
dwcep->desc_addr[i].status.b_iso_in.l = 0;
/* Process the last descriptor */
if (i == ereq->pio_pkt_count - 1) {
- dwcep->desc_addr[i].status.b_iso_in.ioc = 1;
- dwcep->desc_addr[i].status.b_iso_in.l = 1;
+ dwcep->desc_addr[i].status.b_iso_in.
+ ioc = 1;
+ dwcep->desc_addr[i].status.b_iso_in.l =
+ 1;
}
}
/* Setup and start the transfer for this endpoint */
dwcep->xiso_active_xfers++;
- DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
- in_ep_regs[dwcep->num]->diepdma,
+ DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
+ dev_if->in_ep_regs[dwcep->num]->diepdma,
dwcep->dma_desc_addr);
diepctl.d32 = 0;
diepctl.b.epena = 1;
diepctl.b.cnak = 1;
- DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
- in_ep_regs[dwcep->num]->diepctl, 0,
- diepctl.d32);
+ DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
+ dev_if->in_ep_regs[dwcep->num]->
+ diepctl, 0, diepctl.d32);
} else {
/* Setup DMA Descriptor chain for OUT Isoc request */
for (i = 0; i < ereq->pio_pkt_count; i++) {
- //if ((i % (nat + 1)) == 0)
- dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
- dwcep->xiso_frame_num) & 0x3FFF;
+ /* if ((i % (nat + 1)) == 0) */
+ dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
+ dwcep->
+ xiso_frame_num) &
+ 0x3FFF;
dwcep->desc_addr[i].buf =
req->dma + ddesc_iso[i].offset;
dwcep->desc_addr[i].status.b_iso_out.rxbytes =
(ddesc_iso[i].length %
dwcep->maxpacket) ? 1 : 0;
dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
- dwcep->desc_addr[i].status.b_iso_out.pid = nat + 1;
+ dwcep->desc_addr[i].status.b_iso_out.pid =
+ nat + 1;
dwcep->desc_addr[i].status.b_iso_out.l = 0;
-
+
/* Process the last descriptor */
if (i == ereq->pio_pkt_count - 1) {
- dwcep->desc_addr[i].status.b_iso_out.ioc = 1;
- dwcep->desc_addr[i].status.b_iso_out.l = 1;
- }
+ dwcep->desc_addr[i].status.b_iso_out.
+ ioc = 1;
+ dwcep->desc_addr[i].status.b_iso_out.l =
+ 1;
+ }
}
-
+
/* Setup and start the transfer for this endpoint */
dwcep->xiso_active_xfers++;
- DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
- dev_if->out_ep_regs[dwcep->num]->
- doepdma, dwcep->dma_desc_addr);
+ DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
+ out_ep_regs[dwcep->num]->doepdma,
+ dwcep->dma_desc_addr);
diepctl.d32 = 0;
diepctl.b.epena = 1;
diepctl.b.cnak = 1;
- DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
- dev_if->out_ep_regs[dwcep->num]->
- doepctl, 0, diepctl.d32);
+ DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
+ out_ep_regs[dwcep->num]->doepctl, 0,
+ diepctl.d32);
}
} else {
/**
* - Remove the request from the queue
*/
-void complete_xiso_ep(dwc_otg_pcd_ep_t * ep)
+void complete_xiso_ep(dwc_otg_pcd_ep_t *ep)
{
dwc_otg_pcd_request_t *req = NULL;
struct dwc_iso_xreq_port *ereq = NULL;
dwc_ep_t *dwcep = NULL;
int i;
- //DWC_DEBUG();
+ /* DWC_DEBUG(); */
dwcep = &ep->dwc_ep;
/* Get the first pending request from the queue */
* Create and initialize the Isoc pkt descriptors of the extended request.
*
*/
-static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t * req,
+static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t *req,
void *ereq_nonport,
int atomic_alloc)
{
/**
*
*/
-int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
- uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
+int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
+ uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
int zero, void *req_handle, int atomic_alloc,
void *ereq_nonport)
{
req->sent_zlp = zero;
req->priv = req_handle;
- //DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+ /* DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags); */
ep->dwc_ep.dma_addr = dma_buf;
ep->dwc_ep.start_xfer_buff = buf;
ep->dwc_ep.xfer_buff = buf;
DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
ep->dwc_ep.xiso_queued_xfers++;
-//DWC_DEBUG("CP_0");
-//DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
-//prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport);
-//prn_ext_request(&req->ext_req);
+ /* DWC_DEBUG("CP_0"); */
+ /* DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags); */
+ /* prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport); */
+ /* prn_ext_request(&req->ext_req); */
- //DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ /* DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags); */
/* If the req->status == ASAP then check if there is any active transfer
* for this endpoint. If no active transfers, then get the first entry
#endif
/* END ifdef DWC_UTE_PER_IO ***************************************************/
-int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
- uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
+int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
+ uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
int zero, void *req_handle, int atomic_alloc)
{
dwc_irqflags_t flags;
/*
* After adding request to the queue for IN ISOC wait for In Token Received
- * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
+ * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
* Received when EP is disabled interrupt to obtain starting microframe
* (odd/even) start transfer
*/
if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
if (req != 0) {
depctl_data_t depctl = {.d32 =
- DWC_READ_REG32(&pcd->core_if->dev_if->
- in_ep_regs[ep->dwc_ep.num]->
- diepctl) };
+ DWC_READ_REG32(&pcd->core_if->
+ dev_if->in_ep_regs[ep->
+ dwc_ep.
+ num]->diepctl)
+ };
++pcd->request_pending;
DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
if (ep->dwc_ep.is_in) {
depctl.b.cnak = 1;
- DWC_WRITE_REG32(&pcd->core_if->dev_if->
- in_ep_regs[ep->dwc_ep.num]->
- diepctl, depctl.d32);
+ DWC_WRITE_REG32(&pcd->core_if->
+ dev_if->in_ep_regs[ep->dwc_ep.
+ num]->diepctl,
+ depctl.d32);
}
DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
*/
if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
- //_req->zero = 1;
+ /* _req->zero = 1; */
}
/* Start the transfer */
dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
&ep->dwc_ep);
- } // non-ep0 endpoints
+ } /* non-ep0 endpoints */
else {
#ifdef DWC_UTE_CFI
if (ep->dwc_ep.buff_mode != BM_STANDARD) {
} else {
#endif
max_transfer =
- GET_CORE_IF(ep->pcd)->core_params->
- max_transfer_size;
+ GET_CORE_IF(ep->pcd)->
+ core_params->max_transfer_size;
/* Setup and start the Transfer */
if (req->dw_align_buf) {
diepmsk_data_t diepmsk = {.d32 = 0 };
diepmsk.b.intktxfemp = 1;
if (GET_CORE_IF(pcd)->multiproc_int_enable) {
- DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
- dev_if->dev_global_regs->diepeachintmsk
- [ep->dwc_ep.num], 0,
- diepmsk.d32);
+ DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
+ dev_global_regs->
+ diepeachintmsk[ep->dwc_ep.num],
+ 0, diepmsk.d32);
} else {
- DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
- dev_if->dev_global_regs->
- diepmsk, 0, diepmsk.d32);
+ DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
+ dev_global_regs->diepmsk, 0,
+ diepmsk.d32);
}
}
return 0;
}
-int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
+int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t *pcd, void *ep_handle,
void *req_handle)
{
dwc_irqflags_t flags;
}
-int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
+int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t *pcd, void *ep_handle, int value)
{
dwc_otg_pcd_ep_t *ep;
dwc_irqflags_t flags;
ep->dwc_ep.is_in ? "IN" : "OUT");
retval = -DWC_E_AGAIN;
} else if (value == 0) {
- ep->dwc_ep.stall_clear_flag = 0;
+ ep->dwc_ep.stall_clear_flag = 0;
dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
} else if (value == 1) {
- stall:
+stall:
if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
dtxfsts_data_t txstatus;
fifosize_data_t txfifosize;
txfifosize.d32 =
- DWC_READ_REG32(&GET_CORE_IF(pcd)->
- core_global_regs->dtxfsiz[ep->dwc_ep.
- tx_fifo_num]);
+ DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
+ dtxfsiz[ep->dwc_ep.tx_fifo_num]);
txstatus.d32 =
- DWC_READ_REG32(&GET_CORE_IF(pcd)->
- dev_if->in_ep_regs[ep->dwc_ep.num]->
- dtxfsts);
+ DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
+ in_ep_regs[ep->dwc_ep.num]->dtxfsts);
if (txstatus.b.txfspcavail < txfifosize.b.depth) {
DWC_WARN("%s() Data In Tx Fifo\n", __func__);
/**
* This function initiates remote wakeup of the host from suspend state.
*/
-void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
+void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t *pcd, int set)
{
dctl_data_t dctl = { 0 };
dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
/* Mask SRP detected interrupt from Power Down Logic */
gpwrdn.d32 = 0;
gpwrdn.b.srp_det_msk = 1;
- DWC_MODIFY_REG32(&core_if->
- core_global_regs->gpwrdn,
- gpwrdn.d32, 0);
+ DWC_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
/* Disable Power Down Logic */
gpwrdn.d32 = 0;
gpwrdn.b.pmuactv = 1;
- DWC_MODIFY_REG32(&core_if->
- core_global_regs->gpwrdn,
- gpwrdn.d32, 0);
+ DWC_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
/*
* Initialize the Core for Device mode.
}
dctl.b.rmtwkupsig = 1;
- DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
- dctl, 0, dctl.d32);
+ DWC_MODIFY_REG32(&core_if->dev_if->
+ dev_global_regs->dctl, 0, dctl.d32);
DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
dwc_mdelay(2);
- DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
- dctl, dctl.d32, 0);
+ DWC_MODIFY_REG32(&core_if->dev_if->
+ dev_global_regs->dctl, dctl.d32, 0);
DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
}
} else {
/**
* This function initiates remote wakeup of the host from L1 sleep state.
*/
-void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
+void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t *pcd, int set)
{
glpmcfg_data_t lpmcfg;
pcgcctl_data_t pcgcctl = {.d32 = 0 };
-
+
dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
lpmcfg.b.en_utmi_sleep = 0;
lpmcfg.b.hird_thres &= (~(1 << 4));
-
+
/* Clear Enbl_L1Gating bit. */
pcgcctl.b.enbl_sleep_gating = 1;
- DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
-
+ DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+
DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
if (set) {
/**
* Performs remote wakeup.
*/
-void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
+void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set)
{
dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
dwc_irqflags_t flags;
return;
}
-void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t * pcd, int no_of_usecs)
+void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t *pcd, int no_of_usecs)
{
dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
dctl_data_t dctl = { 0 };
if (dwc_otg_is_device_mode(core_if)) {
dctl.b.sftdiscon = 1;
- DWC_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
- DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ DWC_PRINTF("Soft disconnect for %d useconds\n", no_of_usecs);
+ DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
+ dctl.d32);
dwc_udelay(no_of_usecs);
- DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
-
- } else{
+ DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32, 0);
+
+ } else {
DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
}
return;
}
-int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_wakeup(dwc_otg_pcd_t *pcd)
{
dsts_data_t dsts;
gotgctl_data_t gotgctl;
/* Check if suspend state */
dsts.d32 =
DWC_READ_REG32(&
- (GET_CORE_IF(pcd)->dev_if->
- dev_global_regs->dsts));
+ (GET_CORE_IF(pcd)->dev_if->dev_global_regs->
+ dsts));
if (dsts.b.suspsts) {
dwc_otg_pcd_remote_wakeup(pcd, 1);
}
}
-
/**
* Implement Soft-Connect and Soft-Disconnect function
*/
-void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t * pcd)
+void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t *pcd)
{
- if(pcd);
- DWC_MODIFY_REG32( &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),2,0 );
+ if (pcd)
+ DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 2,
+ 0);
}
-void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t * pcd)
+void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t *pcd)
{
- if(pcd);
- DWC_MODIFY_REG32( &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),0,2 );
+ if (pcd)
+ DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 0,
+ 2);
}
void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
{
- dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
- dwc_otg_disable_global_interrupts(core_if);
+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ dwc_otg_disable_global_interrupts(core_if);
dwc_otg_core_init(core_if);
- dwc_otg_pcd_reinit(pcd);
- dwc_otg_core_dev_init(core_if);
- dwc_otg_enable_global_interrupts(core_if);
+ dwc_otg_pcd_reinit(pcd);
+ dwc_otg_core_dev_init(core_if);
+ dwc_otg_enable_global_interrupts(core_if);
}
/**
*
* @param pcd the pcd structure.
*/
-void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
+void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd)
{
dwc_irqflags_t flags;
DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
}
-int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t *pcd)
{
return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
}
-int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t *pcd)
{
return GET_CORE_IF(pcd)->core_params->lpm_enable;
}
-int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t *pcd)
{
return GET_CORE_IF(pcd)->core_params->besl_enable;
}
-int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t *pcd)
{
return GET_CORE_IF(pcd)->core_params->baseline_besl;
}
-int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t *pcd)
{
return GET_CORE_IF(pcd)->core_params->deep_besl;
}
-uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
+uint32_t get_b_hnp_enable(dwc_otg_pcd_t *pcd)
{
return pcd->b_hnp_enable;
}
-uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
+uint32_t get_a_hnp_support(dwc_otg_pcd_t *pcd)
{
return pcd->a_hnp_support;
}
-uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
+uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t *pcd)
{
return pcd->a_alt_hnp_support;
}
-int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
+int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t *pcd)
{
return pcd->remote_wakeup_enable;
}