1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
36 * This file implements PCD Core. All code in this file is portable and doesn't
37 * use any OS specific functions.
38 * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39 * header file, which can be used to implement OS specific PCD interface.
41 * An important function of the PCD is managing interrupts generated
42 * by the DWC_otg controller. The implementation of the DWC_otg device
43 * mode interrupt service routines is in dwc_otg_pcd_intr.c.
45 * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46 * @todo Does it work when the request size is greater than DEPTSIZ
51 #include "dwc_otg_pcd.h"
54 #include "dwc_otg_cfi.h"
56 extern int init_cfi(cfiobject_t * cfiobj);
60 * Choose endpoint from ep arrays using usb_ep structure.
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
65 if (pcd->ep0.priv == handle) {
68 for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69 if (pcd->in_ep[i].priv == handle)
70 return &pcd->in_ep[i];
71 if (pcd->out_ep[i].priv == handle)
72 return &pcd->out_ep[i];
79 * This function completes a request. It call's the request call back.
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
84 unsigned stopped = ep->stopped;
86 DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
89 /* don't modify queue heads during completion callback */
91 /* spin_unlock/spin_lock now done in fops->complete() */
92 ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
95 if (ep->pcd->request_pending > 0) {
96 --ep->pcd->request_pending;
99 ep->stopped = stopped;
104 * This function terminates all the requsts in the EP request queue.
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
108 dwc_otg_pcd_request_t *req;
112 /* called with irqs blocked?? */
113 while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
119 void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
120 const struct dwc_otg_pcd_function_ops *fops)
126 * PCD Callback function for initializing the PCD when switching to
129 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
131 static int32_t dwc_otg_pcd_start_cb(void *p)
133 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
137 * Initialized the Core for Device mode.
139 if (dwc_otg_is_device_mode(core_if)) {
140 //dwc_otg_core_dev_init(core_if);
141 /* Set core_if's lock pointer to the pcd->lock */
142 core_if->lock = pcd->lock;
147 /** CFI-specific buffer allocation function for EP */
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
150 size_t buflen, int flags)
152 dwc_otg_pcd_ep_t *ep;
153 ep = get_ep_from_handle(pcd, pep);
155 DWC_WARN("bad ep\n");
156 return -DWC_E_INVALID;
159 return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
164 size_t buflen, int flags);
168 * PCD Callback function for notifying the PCD when resuming from
171 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
175 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
177 if (pcd->fops->resume) {
178 pcd->fops->resume(pcd);
181 /* Stop the SRP timeout timer. */
182 if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183 || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184 if (GET_CORE_IF(pcd)->srp_timer_started) {
185 GET_CORE_IF(pcd)->srp_timer_started = 0;
186 DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
193 * PCD Callback function for notifying the PCD device is suspended.
195 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
199 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
201 if (pcd->fops->suspend) {
202 DWC_SPINUNLOCK(pcd->lock);
203 pcd->fops->suspend(pcd);
204 DWC_SPINLOCK(pcd->lock);
211 * PCD Callback function for stopping the PCD when switching to Host
214 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
218 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219 extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
221 dwc_otg_pcd_stop(pcd);
226 * PCD Callback structure for handling mode switching.
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229 .start = dwc_otg_pcd_start_cb,
230 .stop = dwc_otg_pcd_stop_cb,
231 .suspend = dwc_otg_pcd_suspend_cb,
232 .resume_wakeup = dwc_otg_pcd_resume_cb,
233 .p = 0, /* Set at registration */
237 * This function allocates a DMA Descriptor chain for the Endpoint
238 * buffer to be used for a transfer to/from the specified endpoint.
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t * dma_desc_addr,
243 return DWC_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
248 * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
250 void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t * desc_addr,
251 uint32_t dma_desc_addr, uint32_t count)
253 DWC_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
260 * This function initializes a descriptor chain for Isochronous transfer
262 * @param core_if Programming view of DWC_otg controller.
263 * @param dwc_ep The EP to start the transfer on.
266 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
270 dsts_data_t dsts = {.d32 = 0 };
271 depctl_data_t depctl = {.d32 = 0 };
272 volatile uint32_t *addr;
277 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
280 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
283 /** Allocate descriptors for double buffering */
284 dwc_ep->iso_desc_addr =
285 dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
286 dwc_ep->desc_cnt * 2);
287 if (dwc_ep->desc_addr) {
288 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
292 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
295 if (dwc_ep->is_in == 0) {
296 dev_dma_desc_sts_t sts = {.d32 = 0 };
297 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
299 uint32_t data_per_desc;
300 dwc_otg_dev_out_ep_regs_t *out_regs =
301 core_if->dev_if->out_ep_regs[dwc_ep->num];
304 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
305 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
307 /** Buffer 0 descriptors setup */
308 dma_ad = dwc_ep->dma_addr0;
310 sts.b_iso_out.bs = BS_HOST_READY;
311 sts.b_iso_out.rxsts = 0;
313 sts.b_iso_out.sp = 0;
314 sts.b_iso_out.ioc = 0;
315 sts.b_iso_out.pid = 0;
316 sts.b_iso_out.framenum = 0;
319 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
320 i += dwc_ep->pkt_per_frm) {
322 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
323 uint32_t len = (j + 1) * dwc_ep->maxpacket;
324 if (len > dwc_ep->data_per_frame)
326 dwc_ep->data_per_frame -
327 j * dwc_ep->maxpacket;
329 data_per_desc = dwc_ep->maxpacket;
330 len = data_per_desc % 4;
332 data_per_desc += 4 - len;
334 sts.b_iso_out.rxbytes = data_per_desc;
335 dma_desc->buf = dma_ad;
336 dma_desc->status.d32 = sts.d32;
338 offset += data_per_desc;
340 dma_ad += data_per_desc;
344 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
345 uint32_t len = (j + 1) * dwc_ep->maxpacket;
346 if (len > dwc_ep->data_per_frame)
348 dwc_ep->data_per_frame -
349 j * dwc_ep->maxpacket;
351 data_per_desc = dwc_ep->maxpacket;
352 len = data_per_desc % 4;
354 data_per_desc += 4 - len;
355 sts.b_iso_out.rxbytes = data_per_desc;
356 dma_desc->buf = dma_ad;
357 dma_desc->status.d32 = sts.d32;
359 offset += data_per_desc;
361 dma_ad += data_per_desc;
364 sts.b_iso_out.ioc = 1;
365 len = (j + 1) * dwc_ep->maxpacket;
366 if (len > dwc_ep->data_per_frame)
368 dwc_ep->data_per_frame - j * dwc_ep->maxpacket;
370 data_per_desc = dwc_ep->maxpacket;
371 len = data_per_desc % 4;
373 data_per_desc += 4 - len;
374 sts.b_iso_out.rxbytes = data_per_desc;
376 dma_desc->buf = dma_ad;
377 dma_desc->status.d32 = sts.d32;
380 /** Buffer 1 descriptors setup */
381 sts.b_iso_out.ioc = 0;
382 dma_ad = dwc_ep->dma_addr1;
385 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
386 i += dwc_ep->pkt_per_frm) {
387 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
388 uint32_t len = (j + 1) * dwc_ep->maxpacket;
389 if (len > dwc_ep->data_per_frame)
391 dwc_ep->data_per_frame -
392 j * dwc_ep->maxpacket;
394 data_per_desc = dwc_ep->maxpacket;
395 len = data_per_desc % 4;
397 data_per_desc += 4 - len;
400 sts.b_iso_out.rxbytes = data_per_desc;
401 dma_desc->buf = dma_ad;
402 dma_desc->status.d32 = sts.d32;
404 offset += data_per_desc;
406 dma_ad += data_per_desc;
409 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
411 ((j + 1) * dwc_ep->maxpacket >
412 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
413 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
415 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
416 sts.b_iso_out.rxbytes = data_per_desc;
417 dma_desc->buf = dma_ad;
418 dma_desc->status.d32 = sts.d32;
420 offset += data_per_desc;
422 dma_ad += data_per_desc;
425 sts.b_iso_out.ioc = 1;
428 ((j + 1) * dwc_ep->maxpacket >
429 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
430 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
432 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
433 sts.b_iso_out.rxbytes = data_per_desc;
435 dma_desc->buf = dma_ad;
436 dma_desc->status.d32 = sts.d32;
438 dwc_ep->next_frame = 0;
440 /** Write dma_ad into DOEPDMA register */
441 DWC_WRITE_REG32(&(out_regs->doepdma),
442 (uint32_t) dwc_ep->iso_dma_desc_addr);
447 dev_dma_desc_sts_t sts = {.d32 = 0 };
448 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
450 dwc_otg_dev_in_ep_regs_t *in_regs =
451 core_if->dev_if->in_ep_regs[dwc_ep->num];
452 unsigned int frmnumber;
453 fifosize_data_t txfifosize, rxfifosize;
456 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->
459 DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
461 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
463 dma_ad = dwc_ep->dma_addr0;
466 DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
468 sts.b_iso_in.bs = BS_HOST_READY;
469 sts.b_iso_in.txsts = 0;
471 (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
472 sts.b_iso_in.ioc = 0;
473 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
475 frmnumber = dwc_ep->next_frame;
477 sts.b_iso_in.framenum = frmnumber;
478 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
481 /** Buffer 0 descriptors setup */
482 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
483 dma_desc->buf = dma_ad;
484 dma_desc->status.d32 = sts.d32;
487 dma_ad += dwc_ep->data_per_frame;
488 sts.b_iso_in.framenum += dwc_ep->bInterval;
491 sts.b_iso_in.ioc = 1;
492 dma_desc->buf = dma_ad;
493 dma_desc->status.d32 = sts.d32;
496 /** Buffer 1 descriptors setup */
497 sts.b_iso_in.ioc = 0;
498 dma_ad = dwc_ep->dma_addr1;
500 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
501 i += dwc_ep->pkt_per_frm) {
502 dma_desc->buf = dma_ad;
503 dma_desc->status.d32 = sts.d32;
506 dma_ad += dwc_ep->data_per_frame;
507 sts.b_iso_in.framenum += dwc_ep->bInterval;
509 sts.b_iso_in.ioc = 0;
511 sts.b_iso_in.ioc = 1;
514 dma_desc->buf = dma_ad;
515 dma_desc->status.d32 = sts.d32;
517 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
519 /** Write dma_ad into diepdma register */
520 DWC_WRITE_REG32(&(in_regs->diepdma),
521 (uint32_t) dwc_ep->iso_dma_desc_addr);
523 /** Enable endpoint, clear nak */
526 depctl.b.usbactep = 1;
529 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
530 depctl.d32 = DWC_READ_REG32(addr);
534 * This function initializes a descriptor chain for Isochronous transfer
536 * @param core_if Programming view of DWC_otg controller.
537 * @param ep The EP to start the transfer on.
540 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
543 depctl_data_t depctl = {.d32 = 0 };
544 volatile uint32_t *addr;
547 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
549 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
552 if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
555 deptsiz_data_t deptsiz = {.d32 = 0 };
558 ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
560 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
563 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
565 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
568 /* Program the transfer size and packet count
569 * as follows: xfersize = N * maxpacket +
570 * short_packet pktcnt = N + (short_packet
573 deptsiz.b.mc = ep->pkt_per_frm;
574 deptsiz.b.xfersize = ep->xfer_len;
576 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
577 DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
578 dieptsiz, deptsiz.d32);
580 /* Write the DMA register */
582 (core_if->dev_if->in_ep_regs[ep->num]->
583 diepdma), (uint32_t) ep->dma_addr);
587 (ep->xfer_len + (ep->maxpacket - 1)) /
589 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
591 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
592 doeptsiz, deptsiz.d32);
594 /* Write the DMA register */
596 (core_if->dev_if->out_ep_regs[ep->num]->
597 doepdma), (uint32_t) ep->dma_addr);
600 /** Enable endpoint, clear nak */
605 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
610 * This function does the setup for a data transfer for an EP and
611 * starts the transfer. For an IN transfer, the packets will be
612 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
613 * the packets are unloaded from the Rx FIFO in the ISR.
615 * @param core_if Programming view of DWC_otg controller.
616 * @param ep The EP to start the transfer on.
619 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
622 if (core_if->dma_enable) {
623 if (core_if->dma_desc_enable) {
625 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
627 ep->desc_cnt = ep->pkt_cnt;
629 dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
631 if (core_if->pti_enh_enable) {
632 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
635 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
637 ep->cur_pkt_dma_addr =
638 (ep->proc_buf_num) ? ep->dma_addr1 : ep->
640 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
645 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
646 ep->cur_pkt_dma_addr =
647 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
648 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
653 * This function stops transfer for an EP and
654 * resets the ep's variables.
656 * @param core_if Programming view of DWC_otg controller.
657 * @param ep The EP to start the transfer on.
660 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
662 depctl_data_t depctl = {.d32 = 0 };
663 volatile uint32_t *addr;
665 if (ep->is_in == 1) {
666 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
668 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
672 depctl.d32 = DWC_READ_REG32(addr);
677 DWC_WRITE_REG32(addr, depctl.d32);
679 if (core_if->dma_desc_enable &&
680 ep->iso_desc_addr && ep->iso_dma_desc_addr) {
681 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
682 ep->iso_dma_desc_addr,
686 /* reset varibales */
691 ep->data_per_frame = 0;
692 ep->data_pattern_frame = 0;
694 ep->buf_proc_intrvl = 0;
696 ep->proc_buf_num = 0;
700 ep->iso_desc_addr = 0;
701 ep->iso_dma_desc_addr = 0;
704 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
705 uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
706 dwc_dma_t dma1, int sync_frame, int dp_frame,
707 int data_per_frame, int start_frame,
708 int buf_proc_intrvl, void *req_handle,
711 dwc_otg_pcd_ep_t *ep;
712 dwc_irqflags_t flags = 0;
716 dwc_otg_core_if_t *core_if;
718 ep = get_ep_from_handle(pcd, ep_handle);
720 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
721 DWC_WARN("bad ep\n");
722 return -DWC_E_INVALID;
725 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
726 core_if = GET_CORE_IF(pcd);
727 dwc_ep = &ep->dwc_ep;
729 if (ep->iso_req_handle) {
730 DWC_WARN("ISO request in progress\n");
733 dwc_ep->dma_addr0 = dma0;
734 dwc_ep->dma_addr1 = dma1;
736 dwc_ep->xfer_buff0 = buf0;
737 dwc_ep->xfer_buff1 = buf1;
739 dwc_ep->data_per_frame = data_per_frame;
741 /** @todo - pattern data support is to be implemented in the future */
742 dwc_ep->data_pattern_frame = dp_frame;
743 dwc_ep->sync_frame = sync_frame;
745 dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
747 dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
749 dwc_ep->proc_buf_num = 0;
751 dwc_ep->pkt_per_frm = 0;
752 frm_data = ep->dwc_ep.data_per_frame;
753 while (frm_data > 0) {
754 dwc_ep->pkt_per_frm++;
755 frm_data -= ep->dwc_ep.maxpacket;
758 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
760 if (start_frame == -1) {
761 dwc_ep->next_frame = dsts.b.soffn + 1;
762 if (dwc_ep->bInterval != 1) {
764 dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
769 dwc_ep->next_frame = start_frame;
772 if (!core_if->pti_enh_enable) {
774 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
778 (dwc_ep->data_per_frame *
779 (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
780 - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
783 if (core_if->dma_desc_enable) {
785 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
791 DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
794 DWC_ALLOC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
796 if (!dwc_ep->pkt_info) {
797 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
798 return -DWC_E_NO_MEMORY;
800 if (core_if->pti_enh_enable) {
801 dwc_memset(dwc_ep->pkt_info, 0,
802 sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
806 ep->iso_req_handle = req_handle;
808 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
809 dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
813 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
816 dwc_irqflags_t flags = 0;
817 dwc_otg_pcd_ep_t *ep;
820 ep = get_ep_from_handle(pcd, ep_handle);
821 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
822 DWC_WARN("bad ep\n");
823 return -DWC_E_INVALID;
825 dwc_ep = &ep->dwc_ep;
827 dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
829 DWC_FREE(dwc_ep->pkt_info);
830 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
831 if (ep->iso_req_handle != req_handle) {
832 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
833 return -DWC_E_INVALID;
836 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
838 ep->iso_req_handle = 0;
843 * This function is used for perodical data exchnage between PCD and gadget drivers.
844 * for Isochronous EPs
846 * - Every time a sync period completes this function is called to
847 * perform data exchange between PCD and gadget
849 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
855 dwc_ep = &ep->dwc_ep;
857 DWC_SPINUNLOCK(ep->pcd->lock);
858 pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
859 dwc_ep->proc_buf_num ^ 0x1);
860 DWC_SPINLOCK(ep->pcd->lock);
862 for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
863 dwc_ep->pkt_info[i].status = 0;
864 dwc_ep->pkt_info[i].offset = 0;
865 dwc_ep->pkt_info[i].length = 0;
869 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
870 void *iso_req_handle)
872 dwc_otg_pcd_ep_t *ep;
875 ep = get_ep_from_handle(pcd, ep_handle);
876 if (!ep->desc || ep->dwc_ep.num == 0) {
877 DWC_WARN("bad ep\n");
878 return -DWC_E_INVALID;
880 dwc_ep = &ep->dwc_ep;
882 return dwc_ep->pkt_cnt;
885 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
886 void *iso_req_handle, int packet,
887 int *status, int *actual, int *offset)
889 dwc_otg_pcd_ep_t *ep;
892 ep = get_ep_from_handle(pcd, ep_handle);
894 DWC_WARN("bad ep\n");
896 dwc_ep = &ep->dwc_ep;
898 *status = dwc_ep->pkt_info[packet].status;
899 *actual = dwc_ep->pkt_info[packet].length;
900 *offset = dwc_ep->pkt_info[packet].offset;
903 #endif /* DWC_EN_ISOC */
905 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
906 uint32_t is_in, uint32_t ep_num)
908 /* Init EP structure */
912 pcd_ep->queue_sof = 0;
914 /* Init DWC ep structure */
915 pcd_ep->dwc_ep.is_in = is_in;
916 pcd_ep->dwc_ep.num = ep_num;
917 pcd_ep->dwc_ep.active = 0;
918 pcd_ep->dwc_ep.tx_fifo_num = 0;
919 /* Control until ep is actvated */
920 pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
921 pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
922 pcd_ep->dwc_ep.dma_addr = 0;
923 pcd_ep->dwc_ep.start_xfer_buff = 0;
924 pcd_ep->dwc_ep.xfer_buff = 0;
925 pcd_ep->dwc_ep.xfer_len = 0;
926 pcd_ep->dwc_ep.xfer_count = 0;
927 pcd_ep->dwc_ep.sent_zlp = 0;
928 pcd_ep->dwc_ep.total_len = 0;
929 pcd_ep->dwc_ep.desc_addr = 0;
930 pcd_ep->dwc_ep.dma_desc_addr = 0;
931 DWC_CIRCLEQ_INIT(&pcd_ep->queue);
937 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
941 dwc_otg_pcd_ep_t *ep;
942 int in_ep_cntr, out_ep_cntr;
943 uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
944 uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
946 * Initialize the EP0 structure.
949 dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
952 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
953 for (i = 1; in_ep_cntr < num_in_eps; i++) {
954 if ((hwcfg1 & 0x1) == 0) {
955 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
958 * @todo NGS: Add direction to EP, based on contents
959 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
962 dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
964 DWC_CIRCLEQ_INIT(&ep->queue);
970 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
971 for (i = 1; out_ep_cntr < num_out_eps; i++) {
972 if ((hwcfg1 & 0x1) == 0) {
973 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
976 * @todo NGS: Add direction to EP, based on contents
977 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
980 dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
981 DWC_CIRCLEQ_INIT(&ep->queue);
986 pcd->ep0state = EP0_DISCONNECT;
987 pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
988 pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
992 * This function is called when the SRP timer expires. The SRP should
993 * complete within 6 seconds.
995 static void srp_timeout(void *ptr)
997 gotgctl_data_t gotgctl;
998 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
999 volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1001 gotgctl.d32 = DWC_READ_REG32(addr);
1003 core_if->srp_timer_started = 0;
1005 if (core_if->adp_enable) {
1006 if (gotgctl.b.bsesvld == 0) {
1007 gpwrdn_data_t gpwrdn = {.d32 = 0 };
1008 DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1009 /* Power off the core */
1010 if (core_if->power_down == 2) {
1011 gpwrdn.b.pwrdnswtch = 1;
1012 DWC_MODIFY_REG32(&core_if->
1013 core_global_regs->gpwrdn,
1018 gpwrdn.b.pmuintsel = 1;
1019 gpwrdn.b.pmuactv = 1;
1020 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1022 dwc_otg_adp_probe_start(core_if);
1024 DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1025 core_if->op_state = B_PERIPHERAL;
1026 dwc_otg_core_init(core_if);
1027 dwc_otg_enable_global_interrupts(core_if);
1028 cil_pcd_start(core_if);
1032 if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1033 (core_if->core_params->i2c_enable)) {
1034 DWC_PRINTF("SRP Timeout\n");
1036 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1037 if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1038 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
1041 /* Clear Session Request */
1043 gotgctl.b.sesreq = 1;
1044 DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1047 core_if->srp_success = 0;
1049 __DWC_ERROR("Device not connected/responding\n");
1050 gotgctl.b.sesreq = 0;
1051 DWC_WRITE_REG32(addr, gotgctl.d32);
1053 } else if (gotgctl.b.sesreq) {
1054 DWC_PRINTF("SRP Timeout\n");
1056 __DWC_ERROR("Device not connected/responding\n");
1057 gotgctl.b.sesreq = 0;
1058 DWC_WRITE_REG32(addr, gotgctl.d32);
1060 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1068 extern void start_next_request(dwc_otg_pcd_ep_t * ep);
1070 static void start_xfer_tasklet_func(void *data)
1072 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1073 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1076 depctl_data_t diepctl;
1078 DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1080 diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1082 if (pcd->ep0.queue_sof) {
1083 pcd->ep0.queue_sof = 0;
1084 start_next_request(&pcd->ep0);
1088 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1089 depctl_data_t diepctl;
1091 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1093 if (pcd->in_ep[i].queue_sof) {
1094 pcd->in_ep[i].queue_sof = 0;
1095 start_next_request(&pcd->in_ep[i]);
1104 * This function initialized the PCD portion of the driver.
1107 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t * core_if)
1109 dwc_otg_pcd_t *pcd = NULL;
1110 dwc_otg_dev_if_t *dev_if;
1114 * Allocate PCD structure
1116 pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1122 pcd->lock = DWC_SPINLOCK_ALLOC();
1124 DWC_ERROR("Could not allocate lock for pcd");
1128 /* Set core_if's lock pointer to hcd->lock */
1129 core_if->lock = pcd->lock;
1130 pcd->core_if = core_if;
1132 dev_if = core_if->dev_if;
1133 dev_if->isoc_ep = NULL;
1135 if (core_if->hwcfg4.b.ded_fifo_en) {
1136 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1138 DWC_PRINTF("Shared Tx FIFO mode\n");
1142 * Initialized the Core for Device mode here if there is nod ADP support.
1143 * Otherwise it will be done later in dwc_otg_adp_start routine.
1145 //if (dwc_otg_is_device_mode(core_if) /*&& !core_if->adp_enable */ ) {
1146 // dwc_otg_core_dev_init(core_if);
1150 * Register the PCD Callbacks.
1152 dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1155 * Initialize the DMA buffer for SETUP packets
1157 if (GET_CORE_IF(pcd)->dma_enable) {
1159 DWC_DMA_ALLOC(sizeof(*pcd->setup_pkt) * 5,
1160 &pcd->setup_pkt_dma_handle);
1161 if (pcd->setup_pkt == NULL) {
1167 DWC_DMA_ALLOC(sizeof(uint16_t),
1168 &pcd->status_buf_dma_handle);
1169 if (pcd->status_buf == NULL) {
1170 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1171 pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1176 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1177 dev_if->setup_desc_addr[0] =
1178 dwc_otg_ep_alloc_desc_chain
1179 (&dev_if->dma_setup_desc_addr[0], 1);
1180 dev_if->setup_desc_addr[1] =
1181 dwc_otg_ep_alloc_desc_chain
1182 (&dev_if->dma_setup_desc_addr[1], 1);
1183 dev_if->in_desc_addr =
1184 dwc_otg_ep_alloc_desc_chain
1185 (&dev_if->dma_in_desc_addr, 1);
1186 dev_if->out_desc_addr =
1187 dwc_otg_ep_alloc_desc_chain
1188 (&dev_if->dma_out_desc_addr, 1);
1189 pcd->data_terminated = 0;
1191 if (dev_if->setup_desc_addr[0] == 0
1192 || dev_if->setup_desc_addr[1] == 0
1193 || dev_if->in_desc_addr == 0
1194 || dev_if->out_desc_addr == 0) {
1196 if (dev_if->out_desc_addr)
1197 dwc_otg_ep_free_desc_chain
1198 (dev_if->out_desc_addr,
1199 dev_if->dma_out_desc_addr, 1);
1200 if (dev_if->in_desc_addr)
1201 dwc_otg_ep_free_desc_chain
1202 (dev_if->in_desc_addr,
1203 dev_if->dma_in_desc_addr, 1);
1204 if (dev_if->setup_desc_addr[1])
1205 dwc_otg_ep_free_desc_chain
1206 (dev_if->setup_desc_addr[1],
1207 dev_if->dma_setup_desc_addr[1], 1);
1208 if (dev_if->setup_desc_addr[0])
1209 dwc_otg_ep_free_desc_chain
1210 (dev_if->setup_desc_addr[0],
1211 dev_if->dma_setup_desc_addr[0], 1);
1213 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1215 pcd->setup_pkt_dma_handle);
1216 DWC_DMA_FREE(sizeof(*pcd->status_buf),
1218 pcd->status_buf_dma_handle);
1226 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1227 if (pcd->setup_pkt == NULL) {
1232 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1233 if (pcd->status_buf == NULL) {
1234 DWC_FREE(pcd->setup_pkt);
1240 dwc_otg_pcd_reinit(pcd);
1242 /* Allocate the cfi object for the PCD */
1244 pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1245 if (NULL == pcd->cfi)
1247 if (init_cfi(pcd->cfi)) {
1248 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1253 /* Initialize tasklets */
1254 pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1255 start_xfer_tasklet_func, pcd);
1256 pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1259 /* Initialize SRP timer */
1260 core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1262 if (core_if->core_params->dev_out_nak) {
1264 * Initialize xfer timeout timer. Implemented for
1265 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1267 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1268 pcd->core_if->ep_xfer_timer[i] =
1269 DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1270 &pcd->core_if->ep_xfer_info[i]);
1279 DWC_FREE(pcd->setup_pkt);
1280 if (pcd->status_buf)
1281 DWC_FREE(pcd->status_buf);
1293 * Remove PCD specific data
1295 void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
1297 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1299 if (pcd->core_if->core_params->dev_out_nak) {
1300 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1301 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1302 pcd->core_if->ep_xfer_info[i].state = 0;
1306 if (GET_CORE_IF(pcd)->dma_enable) {
1307 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1308 pcd->setup_pkt_dma_handle);
1309 DWC_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
1310 pcd->status_buf_dma_handle);
1311 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1312 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1313 dev_if->dma_setup_desc_addr
1315 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1316 dev_if->dma_setup_desc_addr
1318 dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1319 dev_if->dma_in_desc_addr, 1);
1320 dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1321 dev_if->dma_out_desc_addr,
1325 DWC_FREE(pcd->setup_pkt);
1326 DWC_FREE(pcd->status_buf);
1328 DWC_SPINLOCK_FREE(pcd->lock);
1329 /* Set core_if's lock pointer to NULL */
1330 pcd->core_if->lock = NULL;
1332 DWC_TASK_FREE(pcd->start_xfer_tasklet);
1333 DWC_TASK_FREE(pcd->test_mode_tasklet);
1334 if (pcd->core_if->core_params->dev_out_nak) {
1335 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1336 if (pcd->core_if->ep_xfer_timer[i]) {
1337 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1342 /* Release the CFI object's dynamic memory */
1344 if (pcd->cfi->ops.release) {
1345 pcd->cfi->ops.release(pcd->cfi);
1353 * Returns whether registered pcd is dual speed or not
1355 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
1357 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1359 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1360 ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1361 (core_if->hwcfg2.b.fs_phy_type == 1) &&
1362 (core_if->core_params->ulpi_fs_ls))) {
1370 * Returns whether registered pcd is OTG capable or not
1372 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
1374 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1375 gusbcfg_data_t usbcfg = {.d32 = 0 };
1376 uint32_t retval = 0;
1378 usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1379 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
1380 if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
1385 if (!usbcfg.b.srpcap)
1390 if (usbcfg.b.hnpcap)
1393 if (core_if->adp_enable)
1401 * This function assigns periodic Tx FIFO to an periodic EP
1402 * in shared Tx FIFO mode
1404 static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
1409 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1410 if ((TxMsk & core_if->tx_msk) == 0) {
1411 core_if->tx_msk |= TxMsk;
1420 * This function assigns periodic Tx FIFO to an periodic EP
1421 * in shared Tx FIFO mode
1423 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
1425 uint32_t PerTxMsk = 1;
1427 for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1428 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1429 core_if->p_tx_msk |= PerTxMsk;
1438 * This function releases periodic Tx FIFO
1439 * in shared Tx FIFO mode
1441 static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
1445 (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1449 * This function releases periodic Tx FIFO
1450 * in shared Tx FIFO mode
1452 static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
1455 (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1459 * This function is being called from gadget
1460 * to enable PCD endpoint.
1462 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
1463 const uint8_t * ep_desc, void *usb_ep)
1466 dwc_otg_pcd_ep_t *ep = NULL;
1467 const usb_endpoint_descriptor_t *desc;
1468 dwc_irqflags_t flags;
1469 // fifosize_data_t dptxfsiz = {.d32 = 0 };
1470 // gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1471 // gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1475 desc = (const usb_endpoint_descriptor_t *)ep_desc;
1478 pcd->ep0.priv = usb_ep;
1480 retval = -DWC_E_INVALID;
1484 num = UE_GET_ADDR(desc->bEndpointAddress);
1485 dir = UE_GET_DIR(desc->bEndpointAddress);
1487 if (!desc->wMaxPacketSize) {
1488 DWC_WARN("bad maxpacketsize\n");
1489 retval = -DWC_E_INVALID;
1493 if (dir == UE_DIR_IN) {
1494 epcount = pcd->core_if->dev_if->num_in_eps;
1495 for (i = 0; i < epcount; i++) {
1496 if (num == pcd->in_ep[i].dwc_ep.num) {
1497 ep = &pcd->in_ep[i];
1502 epcount = pcd->core_if->dev_if->num_out_eps;
1503 for (i = 0; i < epcount; i++) {
1504 if (num == pcd->out_ep[i].dwc_ep.num) {
1505 ep = &pcd->out_ep[i];
1512 DWC_WARN("bad address\n");
1513 retval = -DWC_E_INVALID;
1517 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1527 ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1528 ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1530 ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1532 if (ep->dwc_ep.is_in) {
1533 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1534 ep->dwc_ep.tx_fifo_num = 0;
1536 if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1538 * if ISOC EP then assign a Periodic Tx FIFO.
1540 ep->dwc_ep.tx_fifo_num =
1541 assign_perio_tx_fifo(GET_CORE_IF(pcd));
1545 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1547 ep->dwc_ep.tx_fifo_num =
1548 assign_tx_fifo(GET_CORE_IF(pcd));
1551 /* Calculating EP info controller base address */
1553 if (ep->dwc_ep.tx_fifo_num
1554 && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1556 DWC_READ_REG32(&GET_CORE_IF(pcd)->
1557 core_global_regs->gdfifocfg);
1558 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1561 (&GET_CORE_IF(pcd)->core_global_regs->
1562 dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1563 gdfifocfg.b.epinfobase =
1564 gdfifocfgbase.d32 + dptxfsiz.d32;
1565 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1566 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1567 core_global_regs->gdfifocfg,
1573 /* Set initial data PID. */
1574 if (ep->dwc_ep.type == UE_BULK) {
1575 ep->dwc_ep.data_pid_start = 0;
1578 /* Alloc DMA Descriptors */
1579 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1580 #ifndef DWC_UTE_PER_IO
1581 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1583 ep->dwc_ep.desc_addr =
1584 dwc_otg_ep_alloc_desc_chain(&ep->
1585 dwc_ep.dma_desc_addr,
1587 if (!ep->dwc_ep.desc_addr) {
1588 DWC_WARN("%s, can't allocate DMA descriptor\n",
1590 retval = -DWC_E_SHUTDOWN;
1591 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1594 #ifndef DWC_UTE_PER_IO
1599 DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1600 (ep->dwc_ep.is_in ? "IN" : "OUT"),
1601 ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1602 #ifdef DWC_UTE_PER_IO
1603 ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1605 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1606 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1607 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1610 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1613 if (pcd->cfi->ops.ep_enable) {
1614 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1618 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1625 * This function is being called from gadget
1626 * to disable PCD endpoint.
1628 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
1630 dwc_otg_pcd_ep_t *ep;
1631 dwc_irqflags_t flags;
1632 dwc_otg_dev_dma_desc_t *desc_addr;
1633 dwc_dma_t dma_desc_addr;
1634 gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1635 gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1636 // fifosize_data_t dptxfsiz = {.d32 = 0 };
1638 ep = get_ep_from_handle(pcd, ep_handle);
1640 if (!ep || !ep->desc) {
1641 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1642 return -DWC_E_INVALID;
1645 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1647 dwc_otg_request_nuke(ep);
1649 dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1650 if (pcd->core_if->core_params->dev_out_nak) {
1651 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1652 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1658 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1659 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1661 if (ep->dwc_ep.is_in) {
1662 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1663 /* Flush the Tx FIFO */
1664 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1665 ep->dwc_ep.tx_fifo_num);
1667 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1668 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1670 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1671 /* Decreasing EPinfo Base Addr */
1674 (&GET_CORE_IF(pcd)->
1675 core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num-1]) >> 16);
1676 gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
1677 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1678 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
1685 /* Free DMA Descriptors */
1686 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1687 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1688 desc_addr = ep->dwc_ep.desc_addr;
1689 dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1691 /* Cannot call dma_free_coherent() with IRQs disabled */
1692 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1693 dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1699 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1702 DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1703 ep->dwc_ep.is_in ? "IN" : "OUT");
1708 /******************************************************************************/
1709 #ifdef DWC_UTE_PER_IO
1712 * Free the request and its extended parts
1715 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req)
1717 DWC_FREE(req->ext_req.per_io_frame_descs);
1722 * Start the next request in the endpoint's queue.
1725 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t * pcd,
1726 dwc_otg_pcd_ep_t * ep)
1729 dwc_otg_pcd_request_t *req = NULL;
1730 dwc_ep_t *dwcep = NULL;
1731 struct dwc_iso_xreq_port *ereq = NULL;
1732 struct dwc_iso_pkt_desc_port *ddesc_iso;
1734 depctl_data_t diepctl;
1736 dwcep = &ep->dwc_ep;
1738 if (dwcep->xiso_active_xfers > 0) {
1739 #if 0 //Disable this to decrease s/w overhead that is crucial for Isoc transfers
1740 DWC_WARN("There are currently active transfers for EP%d \
1741 (active=%d; queued=%d)", dwcep->num, dwcep->xiso_active_xfers,
1742 dwcep->xiso_queued_xfers);
1747 nat = UGETW(ep->desc->wMaxPacketSize);
1748 nat = (nat >> 11) & 0x03;
1750 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1751 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1752 ereq = &req->ext_req;
1755 /* Get the frame number */
1756 dwcep->xiso_frame_num =
1757 dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1758 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1760 ddesc_iso = ereq->per_io_frame_descs;
1763 /* Setup DMA Descriptor chain for IN Isoc request */
1764 for (i = 0; i < ereq->pio_pkt_count; i++) {
1765 //if ((i % (nat + 1)) == 0)
1767 dwcep->xiso_frame_num =
1768 (dwcep->xiso_bInterval +
1769 dwcep->xiso_frame_num) & 0x3FFF;
1770 dwcep->desc_addr[i].buf =
1771 req->dma + ddesc_iso[i].offset;
1772 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1773 ddesc_iso[i].length;
1774 dwcep->desc_addr[i].status.b_iso_in.framenum =
1775 dwcep->xiso_frame_num;
1776 dwcep->desc_addr[i].status.b_iso_in.bs =
1778 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1779 dwcep->desc_addr[i].status.b_iso_in.sp =
1780 (ddesc_iso[i].length %
1781 dwcep->maxpacket) ? 1 : 0;
1782 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1783 dwcep->desc_addr[i].status.b_iso_in.pid = nat + 1;
1784 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1786 /* Process the last descriptor */
1787 if (i == ereq->pio_pkt_count - 1) {
1788 dwcep->desc_addr[i].status.b_iso_in.ioc = 1;
1789 dwcep->desc_addr[i].status.b_iso_in.l = 1;
1793 /* Setup and start the transfer for this endpoint */
1794 dwcep->xiso_active_xfers++;
1795 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1796 in_ep_regs[dwcep->num]->diepdma,
1797 dwcep->dma_desc_addr);
1799 diepctl.b.epena = 1;
1801 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1802 in_ep_regs[dwcep->num]->diepctl, 0,
1805 /* Setup DMA Descriptor chain for OUT Isoc request */
1806 for (i = 0; i < ereq->pio_pkt_count; i++) {
1807 //if ((i % (nat + 1)) == 0)
1808 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1809 dwcep->xiso_frame_num) & 0x3FFF;
1810 dwcep->desc_addr[i].buf =
1811 req->dma + ddesc_iso[i].offset;
1812 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1813 ddesc_iso[i].length;
1814 dwcep->desc_addr[i].status.b_iso_out.framenum =
1815 dwcep->xiso_frame_num;
1816 dwcep->desc_addr[i].status.b_iso_out.bs =
1818 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1819 dwcep->desc_addr[i].status.b_iso_out.sp =
1820 (ddesc_iso[i].length %
1821 dwcep->maxpacket) ? 1 : 0;
1822 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1823 dwcep->desc_addr[i].status.b_iso_out.pid = nat + 1;
1824 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1826 /* Process the last descriptor */
1827 if (i == ereq->pio_pkt_count - 1) {
1828 dwcep->desc_addr[i].status.b_iso_out.ioc = 1;
1829 dwcep->desc_addr[i].status.b_iso_out.l = 1;
1833 /* Setup and start the transfer for this endpoint */
1834 dwcep->xiso_active_xfers++;
1835 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1836 dev_if->out_ep_regs[dwcep->num]->
1837 doepdma, dwcep->dma_desc_addr);
1839 diepctl.b.epena = 1;
1841 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1842 dev_if->out_ep_regs[dwcep->num]->
1843 doepctl, 0, diepctl.d32);
1854 * - Remove the request from the queue
1856 void complete_xiso_ep(dwc_otg_pcd_ep_t * ep)
1858 dwc_otg_pcd_request_t *req = NULL;
1859 struct dwc_iso_xreq_port *ereq = NULL;
1860 struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
1861 dwc_ep_t *dwcep = NULL;
1865 dwcep = &ep->dwc_ep;
1867 /* Get the first pending request from the queue */
1868 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1869 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1871 DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
1874 dwcep->xiso_active_xfers--;
1875 dwcep->xiso_queued_xfers--;
1876 /* Remove this request from the queue */
1877 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
1879 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
1884 ereq = &req->ext_req;
1885 ddesc_iso = ereq->per_io_frame_descs;
1887 if (dwcep->xiso_active_xfers < 0) {
1888 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
1889 dwcep->xiso_active_xfers);
1892 /* Fill the Isoc descs of portable extended req from dma descriptors */
1893 for (i = 0; i < ereq->pio_pkt_count; i++) {
1894 if (dwcep->is_in) { /* IN endpoints */
1895 ddesc_iso[i].actual_length = ddesc_iso[i].length -
1896 dwcep->desc_addr[i].status.b_iso_in.txbytes;
1897 ddesc_iso[i].status =
1898 dwcep->desc_addr[i].status.b_iso_in.txsts;
1899 } else { /* OUT endpoints */
1900 ddesc_iso[i].actual_length = ddesc_iso[i].length -
1901 dwcep->desc_addr[i].status.b_iso_out.rxbytes;
1902 ddesc_iso[i].status =
1903 dwcep->desc_addr[i].status.b_iso_out.rxsts;
1907 DWC_SPINUNLOCK(ep->pcd->lock);
1909 /* Call the completion function in the non-portable logic */
1910 ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
1913 DWC_SPINLOCK(ep->pcd->lock);
1915 /* Free the request - specific freeing needed for extended request object */
1916 dwc_pcd_xiso_ereq_free(ep, req);
1918 /* Start the next request */
1919 dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
1925 * Create and initialize the Isoc pkt descriptors of the extended request.
1928 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t * req,
1932 struct dwc_iso_xreq_port *ereq = NULL;
1933 struct dwc_iso_xreq_port *req_mapped = NULL;
1934 struct dwc_iso_pkt_desc_port *ipds = NULL; /* To be created in this function */
1938 ereq = &req->ext_req;
1939 req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
1940 pkt_count = req_mapped->pio_pkt_count;
1942 /* Create the isoc descs */
1944 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
1946 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
1950 DWC_ERROR("Failed to allocate isoc descriptors");
1951 return -DWC_E_NO_MEMORY;
1954 /* Initialize the extended request fields */
1955 ereq->per_io_frame_descs = ipds;
1956 ereq->error_count = 0;
1957 ereq->pio_alloc_pkt_count = pkt_count;
1958 ereq->pio_pkt_count = pkt_count;
1959 ereq->tr_sub_flags = req_mapped->tr_sub_flags;
1961 /* Init the Isoc descriptors */
1962 for (i = 0; i < pkt_count; i++) {
1963 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
1964 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
1965 ipds[i].status = req_mapped->per_io_frame_descs[i].status; /* 0 */
1966 ipds[i].actual_length =
1967 req_mapped->per_io_frame_descs[i].actual_length;
1973 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
1975 struct dwc_iso_pkt_desc_port *xfd = NULL;
1978 DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
1979 DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
1980 DWC_DEBUG("error_count=%d", ereq->error_count);
1981 DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
1982 DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
1983 DWC_DEBUG("res=%d", ereq->res);
1985 for (i = 0; i < ereq->pio_pkt_count; i++) {
1986 xfd = &ereq->per_io_frame_descs[0];
1987 DWC_DEBUG("FD #%d", i);
1989 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
1990 DWC_DEBUG("xfd->length=%d", xfd->length);
1991 DWC_DEBUG("xfd->offset=%d", xfd->offset);
1992 DWC_DEBUG("xfd->status=%d", xfd->status);
1999 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2000 uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2001 int zero, void *req_handle, int atomic_alloc,
2004 dwc_otg_pcd_request_t *req = NULL;
2005 dwc_otg_pcd_ep_t *ep;
2006 dwc_irqflags_t flags;
2009 ep = get_ep_from_handle(pcd, ep_handle);
2011 DWC_WARN("bad ep\n");
2012 return -DWC_E_INVALID;
2015 /* We support this extension only for DDMA mode */
2016 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2017 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2018 return -DWC_E_INVALID;
2020 /* Create a dwc_otg_pcd_request_t object */
2022 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2024 req = DWC_ALLOC(sizeof(*req));
2028 return -DWC_E_NO_MEMORY;
2031 /* Create the Isoc descs for this request which shall be the exact match
2032 * of the structure sent to us from the non-portable logic */
2034 dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2036 DWC_WARN("Failed to init the Isoc descriptors");
2041 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2043 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2046 req->length = buflen;
2047 req->sent_zlp = zero;
2048 req->priv = req_handle;
2050 //DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2051 ep->dwc_ep.dma_addr = dma_buf;
2052 ep->dwc_ep.start_xfer_buff = buf;
2053 ep->dwc_ep.xfer_buff = buf;
2054 ep->dwc_ep.xfer_len = 0;
2055 ep->dwc_ep.xfer_count = 0;
2056 ep->dwc_ep.sent_zlp = 0;
2057 ep->dwc_ep.total_len = buflen;
2059 /* Add this request to the tail */
2060 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2061 ep->dwc_ep.xiso_queued_xfers++;
2063 //DWC_DEBUG("CP_0");
2064 //DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
2065 //prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport);
2066 //prn_ext_request(&req->ext_req);
2068 //DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2070 /* If the req->status == ASAP then check if there is any active transfer
2071 * for this endpoint. If no active transfers, then get the first entry
2072 * from the queue and start that transfer
2074 if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2075 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2077 DWC_WARN("Failed to start the next Isoc transfer");
2078 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2084 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2089 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2090 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2091 uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2092 int zero, void *req_handle, int atomic_alloc)
2094 dwc_irqflags_t flags;
2095 dwc_otg_pcd_request_t *req;
2096 dwc_otg_pcd_ep_t *ep;
2097 uint32_t max_transfer;
2099 ep = get_ep_from_handle(pcd, ep_handle);
2100 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2101 DWC_WARN("bad ep\n");
2102 return -DWC_E_INVALID;
2106 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2108 req = DWC_ALLOC(sizeof(*req));
2112 return -DWC_E_NO_MEMORY;
2114 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2115 if (!GET_CORE_IF(pcd)->core_params->opt) {
2116 if (ep->dwc_ep.num != 0) {
2117 DWC_ERROR("queue req %p, len %d buf %p\n",
2118 req_handle, buflen, buf);
2124 req->length = buflen;
2125 req->sent_zlp = zero;
2126 req->priv = req_handle;
2127 req->dw_align_buf = NULL;
2128 if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2129 && !GET_CORE_IF(pcd)->dma_desc_enable)
2130 req->dw_align_buf = DWC_DMA_ALLOC(buflen,
2131 &req->dw_align_buf_dma);
2132 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2135 * After adding request to the queue for IN ISOC wait for In Token Received
2136 * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2137 * Received when EP is disabled interrupt to obtain starting microframe
2138 * (odd/even) start transfer
2140 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2142 depctl_data_t depctl = {.d32 =
2143 DWC_READ_REG32(&pcd->core_if->dev_if->
2144 in_ep_regs[ep->dwc_ep.num]->
2146 ++pcd->request_pending;
2148 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2149 if (ep->dwc_ep.is_in) {
2151 DWC_WRITE_REG32(&pcd->core_if->dev_if->
2152 in_ep_regs[ep->dwc_ep.num]->
2153 diepctl, depctl.d32);
2156 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2162 * For EP0 IN without premature status, zlp is required?
2164 if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2165 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2169 /* Start the transfer */
2170 if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2172 if (ep->dwc_ep.num == 0) {
2173 switch (pcd->ep0state) {
2174 case EP0_IN_DATA_PHASE:
2175 DWC_DEBUGPL(DBG_PCD,
2176 "%s ep0: EP0_IN_DATA_PHASE\n",
2180 case EP0_OUT_DATA_PHASE:
2181 DWC_DEBUGPL(DBG_PCD,
2182 "%s ep0: EP0_OUT_DATA_PHASE\n",
2184 if (pcd->request_config) {
2185 /* Complete STATUS PHASE */
2186 ep->dwc_ep.is_in = 1;
2187 pcd->ep0state = EP0_IN_STATUS_PHASE;
2191 case EP0_IN_STATUS_PHASE:
2192 DWC_DEBUGPL(DBG_PCD,
2193 "%s ep0: EP0_IN_STATUS_PHASE\n",
2198 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2200 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2201 return -DWC_E_SHUTDOWN;
2204 ep->dwc_ep.dma_addr = dma_buf;
2205 ep->dwc_ep.start_xfer_buff = buf;
2206 ep->dwc_ep.xfer_buff = buf;
2207 ep->dwc_ep.xfer_len = buflen;
2208 ep->dwc_ep.xfer_count = 0;
2209 ep->dwc_ep.sent_zlp = 0;
2210 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2213 if ((ep->dwc_ep.xfer_len %
2214 ep->dwc_ep.maxpacket == 0)
2215 && (ep->dwc_ep.xfer_len != 0)) {
2216 ep->dwc_ep.sent_zlp = 1;
2221 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2223 } // non-ep0 endpoints
2226 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2227 /* store the request length */
2228 ep->dwc_ep.cfi_req_len = buflen;
2229 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2234 GET_CORE_IF(ep->pcd)->core_params->
2237 /* Setup and start the Transfer */
2238 if (req->dw_align_buf) {
2239 if (ep->dwc_ep.is_in)
2240 dwc_memcpy(req->dw_align_buf,
2242 ep->dwc_ep.dma_addr =
2243 req->dw_align_buf_dma;
2244 ep->dwc_ep.start_xfer_buff =
2246 ep->dwc_ep.xfer_buff =
2249 ep->dwc_ep.dma_addr = dma_buf;
2250 ep->dwc_ep.start_xfer_buff = buf;
2251 ep->dwc_ep.xfer_buff = buf;
2253 ep->dwc_ep.xfer_len = 0;
2254 ep->dwc_ep.xfer_count = 0;
2255 ep->dwc_ep.sent_zlp = 0;
2256 ep->dwc_ep.total_len = buflen;
2258 ep->dwc_ep.maxxfer = max_transfer;
2259 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2260 uint32_t out_max_xfer =
2261 DDMA_MAX_TRANSFER_SIZE -
2262 (DDMA_MAX_TRANSFER_SIZE % 4);
2263 if (ep->dwc_ep.is_in) {
2264 if (ep->dwc_ep.maxxfer >
2265 DDMA_MAX_TRANSFER_SIZE) {
2266 ep->dwc_ep.maxxfer =
2267 DDMA_MAX_TRANSFER_SIZE;
2270 if (ep->dwc_ep.maxxfer >
2272 ep->dwc_ep.maxxfer =
2277 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2278 ep->dwc_ep.maxxfer -=
2279 (ep->dwc_ep.maxxfer %
2280 ep->dwc_ep.maxpacket);
2284 if ((ep->dwc_ep.total_len %
2285 ep->dwc_ep.maxpacket == 0)
2286 && (ep->dwc_ep.total_len != 0)) {
2287 ep->dwc_ep.sent_zlp = 1;
2293 dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2299 ++pcd->request_pending;
2300 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2301 if (ep->dwc_ep.is_in && ep->stopped
2302 && !(GET_CORE_IF(pcd)->dma_enable)) {
2303 /** @todo NGS Create a function for this. */
2304 diepmsk_data_t diepmsk = {.d32 = 0 };
2305 diepmsk.b.intktxfemp = 1;
2306 if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2307 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2308 dev_if->dev_global_regs->diepeachintmsk
2309 [ep->dwc_ep.num], 0,
2312 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2313 dev_if->dev_global_regs->
2314 diepmsk, 0, diepmsk.d32);
2319 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2324 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
2327 dwc_irqflags_t flags;
2328 dwc_otg_pcd_request_t *req;
2329 dwc_otg_pcd_ep_t *ep;
2331 ep = get_ep_from_handle(pcd, ep_handle);
2332 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2333 DWC_WARN("bad argument\n");
2334 return -DWC_E_INVALID;
2337 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2339 /* make sure it's actually queued on this endpoint */
2340 DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2341 if (req->priv == (void *)req_handle) {
2346 if (req->priv != (void *)req_handle) {
2347 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2348 return -DWC_E_INVALID;
2351 if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2352 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2357 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2359 return req ? 0 : -DWC_E_SHUTDOWN;
2363 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
2365 dwc_otg_pcd_ep_t *ep;
2366 dwc_irqflags_t flags;
2369 ep = get_ep_from_handle(pcd, ep_handle);
2371 if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2372 (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2373 DWC_WARN("%s, bad ep\n", __func__);
2374 return -DWC_E_INVALID;
2377 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2378 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2379 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2380 ep->dwc_ep.is_in ? "IN" : "OUT");
2381 retval = -DWC_E_AGAIN;
2382 } else if (value == 0) {
2383 ep->dwc_ep.stall_clear_flag = 0;
2384 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2385 } else if (value == 1) {
2387 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2388 dtxfsts_data_t txstatus;
2389 fifosize_data_t txfifosize;
2392 DWC_READ_REG32(&GET_CORE_IF(pcd)->
2393 core_global_regs->dtxfsiz[ep->dwc_ep.
2396 DWC_READ_REG32(&GET_CORE_IF(pcd)->
2397 dev_if->in_ep_regs[ep->dwc_ep.num]->
2400 if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2401 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2402 retval = -DWC_E_AGAIN;
2404 if (ep->dwc_ep.num == 0) {
2405 pcd->ep0state = EP0_STALL;
2409 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2413 if (ep->dwc_ep.num == 0) {
2414 pcd->ep0state = EP0_STALL;
2418 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2420 } else if (value == 2) {
2421 ep->dwc_ep.stall_clear_flag = 0;
2422 } else if (value == 3) {
2423 ep->dwc_ep.stall_clear_flag = 1;
2427 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2433 * This function initiates remote wakeup of the host from suspend state.
2435 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
2437 dctl_data_t dctl = { 0 };
2438 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2441 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2442 if (!dsts.b.suspsts) {
2443 DWC_WARN("Remote wakeup while is not in suspend state\n");
2445 /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2446 if (pcd->remote_wakeup_enable) {
2449 if (core_if->adp_enable) {
2450 gpwrdn_data_t gpwrdn;
2452 dwc_otg_adp_probe_stop(core_if);
2454 /* Mask SRP detected interrupt from Power Down Logic */
2456 gpwrdn.b.srp_det_msk = 1;
2457 DWC_MODIFY_REG32(&core_if->
2458 core_global_regs->gpwrdn,
2461 /* Disable Power Down Logic */
2463 gpwrdn.b.pmuactv = 1;
2464 DWC_MODIFY_REG32(&core_if->
2465 core_global_regs->gpwrdn,
2469 * Initialize the Core for Device mode.
2471 core_if->op_state = B_PERIPHERAL;
2472 dwc_otg_core_init(core_if);
2473 dwc_otg_enable_global_interrupts(core_if);
2474 cil_pcd_start(core_if);
2476 dwc_otg_initiate_srp(core_if);
2479 dctl.b.rmtwkupsig = 1;
2480 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2482 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2485 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2487 DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2490 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2494 #ifdef CONFIG_USB_DWC_OTG_LPM
2496 * This function initiates remote wakeup of the host from L1 sleep state.
2498 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
2500 glpmcfg_data_t lpmcfg;
2501 pcgcctl_data_t pcgcctl = {.d32 = 0 };
2503 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2505 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2507 /* Check if we are in L1 state */
2508 if (!lpmcfg.b.prt_sleep_sts) {
2509 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2513 /* Check if host allows remote wakeup */
2514 if (!lpmcfg.b.rem_wkup_en) {
2515 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2519 /* Check if Resume OK */
2520 if (!lpmcfg.b.sleep_state_resumeok) {
2521 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2525 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2526 lpmcfg.b.en_utmi_sleep = 0;
2527 lpmcfg.b.hird_thres &= (~(1 << 4));
2529 /* Clear Enbl_L1Gating bit. */
2530 pcgcctl.b.enbl_sleep_gating = 1;
2531 DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
2533 DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2536 dctl_data_t dctl = {.d32 = 0 };
2537 dctl.b.rmtwkupsig = 1;
2538 /* Set RmtWkUpSig bit to start remote wakup signaling.
2539 * Hardware will automatically clear this bit.
2541 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2543 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2550 * Performs remote wakeup.
2552 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
2554 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2555 dwc_irqflags_t flags;
2556 if (dwc_otg_is_device_mode(core_if)) {
2557 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2558 #ifdef CONFIG_USB_DWC_OTG_LPM
2559 if (core_if->lx_state == DWC_OTG_L1) {
2560 dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2563 dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2564 #ifdef CONFIG_USB_DWC_OTG_LPM
2567 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2572 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t * pcd, int no_of_usecs)
2574 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2575 dctl_data_t dctl = { 0 };
2577 if (dwc_otg_is_device_mode(core_if)) {
2578 dctl.b.sftdiscon = 1;
2579 DWC_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
2580 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
2581 dwc_udelay(no_of_usecs);
2582 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
2585 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2591 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
2594 gotgctl_data_t gotgctl;
2597 * This function starts the Protocol if no session is in progress. If
2598 * a session is already in progress, but the device is suspended,
2599 * remote wakeup signaling is started.
2602 /* Check if valid session */
2604 DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2605 if (gotgctl.b.bsesvld) {
2606 /* Check if suspend state */
2609 (GET_CORE_IF(pcd)->dev_if->
2610 dev_global_regs->dsts));
2611 if (dsts.b.suspsts) {
2612 dwc_otg_pcd_remote_wakeup(pcd, 1);
2615 dwc_otg_pcd_initiate_srp(pcd);
2624 * Implement Soft-Connect and Soft-Disconnect function
2627 void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t * pcd)
2630 DWC_MODIFY_REG32( &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),2,0 );
2633 void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t * pcd)
2636 DWC_MODIFY_REG32( &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),0,2 );
2639 void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
2641 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2642 dwc_otg_disable_global_interrupts(core_if);
2643 dwc_otg_core_init(core_if);
2644 dwc_otg_pcd_reinit(pcd);
2645 dwc_otg_core_dev_init(core_if);
2646 dwc_otg_enable_global_interrupts(core_if);
2650 * Start the SRP timer to detect when the SRP does not complete within
2653 * @param pcd the pcd structure.
2655 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
2657 dwc_irqflags_t flags;
2658 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2659 dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2660 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2663 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
2665 return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2668 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
2670 return GET_CORE_IF(pcd)->core_params->lpm_enable;
2673 int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t * pcd)
2675 return GET_CORE_IF(pcd)->core_params->besl_enable;
2678 int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t * pcd)
2680 return GET_CORE_IF(pcd)->core_params->baseline_besl;
2683 int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t * pcd)
2685 return GET_CORE_IF(pcd)->core_params->deep_besl;
2688 uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
2690 return pcd->b_hnp_enable;
2693 uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
2695 return pcd->a_hnp_support;
2698 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
2700 return pcd->a_alt_hnp_support;
2703 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
2705 return pcd->remote_wakeup_enable;
2708 #endif /* DWC_HOST_ONLY */