1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
36 * This file implements PCD Core. All code in this file is portable and doesn't
37 * use any OS specific functions.
38 * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39 * header file, which can be used to implement OS specific PCD interface.
41 * An important function of the PCD is managing interrupts generated
42 * by the DWC_otg controller. The implementation of the DWC_otg device
43 * mode interrupt service routines is in dwc_otg_pcd_intr.c.
45 * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46 * @todo Does it work when the request size is greater than DEPTSIZ
51 #include "dwc_otg_pcd.h"
54 #include "dwc_otg_cfi.h"
56 extern int init_cfi(cfiobject_t *cfiobj);
60 * Choose endpoint from ep arrays using usb_ep structure.
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t *pcd, void *handle)
65 if (pcd->ep0.priv == handle) {
68 for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69 if (pcd->in_ep[i].priv == handle)
70 return &pcd->in_ep[i];
71 if (pcd->out_ep[i].priv == handle)
72 return &pcd->out_ep[i];
79 * This function completes a request. It call's the request call back.
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req,
84 unsigned stopped = ep->stopped;
86 DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
89 /* don't modify queue heads during completion callback */
91 /* spin_unlock/spin_lock now done in fops->complete() */
92 ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
95 if (ep->pcd->request_pending > 0) {
96 --ep->pcd->request_pending;
99 ep->stopped = stopped;
104 * This function terminates all the requsts in the EP request queue.
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep)
108 dwc_otg_pcd_request_t *req;
112 /* called with irqs blocked?? */
113 while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
119 void dwc_otg_pcd_start(dwc_otg_pcd_t *pcd,
120 const struct dwc_otg_pcd_function_ops *fops)
126 * PCD Callback function for initializing the PCD when switching to
129 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
131 static int32_t dwc_otg_pcd_start_cb(void *p)
133 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
137 * Initialized the Core for Device mode.
139 if (dwc_otg_is_device_mode(core_if)) {
140 /* dwc_otg_core_dev_init(core_if); */
141 /* Set core_if's lock pointer to the pcd->lock */
142 core_if->lock = pcd->lock;
147 /** CFI-specific buffer allocation function for EP */
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
150 size_t buflen, int flags)
152 dwc_otg_pcd_ep_t *ep;
153 ep = get_ep_from_handle(pcd, pep);
155 DWC_WARN("bad ep\n");
156 return -DWC_E_INVALID;
159 return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
164 size_t buflen, int flags);
168 * PCD Callback function for notifying the PCD when resuming from
171 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
175 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
177 if (pcd->fops->resume) {
178 pcd->fops->resume(pcd);
181 /* Stop the SRP timeout timer. */
182 if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183 || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184 if (GET_CORE_IF(pcd)->srp_timer_started) {
185 GET_CORE_IF(pcd)->srp_timer_started = 0;
186 DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
193 * PCD Callback function for notifying the PCD device is suspended.
195 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
199 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
201 if (pcd->fops->suspend) {
202 DWC_SPINUNLOCK(pcd->lock);
203 pcd->fops->suspend(pcd);
204 DWC_SPINLOCK(pcd->lock);
211 * PCD Callback function for stopping the PCD when switching to Host
214 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
218 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219 extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd);
221 dwc_otg_pcd_stop(pcd);
226 * PCD Callback structure for handling mode switching.
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229 .start = dwc_otg_pcd_start_cb,
230 .stop = dwc_otg_pcd_stop_cb,
231 .suspend = dwc_otg_pcd_suspend_cb,
232 .resume_wakeup = dwc_otg_pcd_resume_cb,
233 .p = 0, /* Set at registration */
237 * This function allocates a DMA Descriptor chain for the Endpoint
238 * buffer to be used for a transfer to/from the specified endpoint.
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t *dma_desc_addr,
243 return DWC_DEV_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
248 * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
250 void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t *desc_addr,
251 uint32_t dma_desc_addr, uint32_t count)
253 DWC_DEV_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
260 * This function initializes a descriptor chain for Isochronous transfer
262 * @param core_if Programming view of DWC_otg controller.
263 * @param dwc_ep The EP to start the transfer on.
266 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if,
270 dsts_data_t dsts = {.d32 = 0 };
271 depctl_data_t depctl = {.d32 = 0 };
272 volatile uint32_t *addr;
277 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
280 dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
283 /** Allocate descriptors for double buffering */
284 dwc_ep->iso_desc_addr =
285 dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
286 dwc_ep->desc_cnt * 2);
287 if (dwc_ep->desc_addr) {
288 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
292 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
295 if (dwc_ep->is_in == 0) {
296 dev_dma_desc_sts_t sts = {.d32 = 0 };
297 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
299 uint32_t data_per_desc;
300 dwc_otg_dev_out_ep_regs_t *out_regs =
301 core_if->dev_if->out_ep_regs[dwc_ep->num];
304 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
305 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
307 /** Buffer 0 descriptors setup */
308 dma_ad = dwc_ep->dma_addr0;
310 sts.b_iso_out.bs = BS_HOST_READY;
311 sts.b_iso_out.rxsts = 0;
313 sts.b_iso_out.sp = 0;
314 sts.b_iso_out.ioc = 0;
315 sts.b_iso_out.pid = 0;
316 sts.b_iso_out.framenum = 0;
319 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
320 i += dwc_ep->pkt_per_frm) {
322 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
323 uint32_t len = (j + 1)*dwc_ep->maxpacket;
324 if (len > dwc_ep->data_per_frame)
326 dwc_ep->data_per_frame -
329 data_per_desc = dwc_ep->maxpacket;
330 len = data_per_desc % 4;
332 data_per_desc += 4 - len;
334 sts.b_iso_out.rxbytes = data_per_desc;
335 dma_desc->buf = dma_ad;
336 dma_desc->status.d32 = sts.d32;
338 offset += data_per_desc;
340 dma_ad += data_per_desc;
344 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
345 uint32_t len = (j + 1)*dwc_ep->maxpacket;
346 if (len > dwc_ep->data_per_frame)
348 dwc_ep->data_per_frame -
351 data_per_desc = dwc_ep->maxpacket;
352 len = data_per_desc % 4;
354 data_per_desc += 4 - len;
355 sts.b_iso_out.rxbytes = data_per_desc;
356 dma_desc->buf = dma_ad;
357 dma_desc->status.d32 = sts.d32;
359 offset += data_per_desc;
361 dma_ad += data_per_desc;
364 sts.b_iso_out.ioc = 1;
365 len = (j + 1)*dwc_ep->maxpacket;
366 if (len > dwc_ep->data_per_frame)
368 dwc_ep->data_per_frame - j*dwc_ep->maxpacket;
370 data_per_desc = dwc_ep->maxpacket;
371 len = data_per_desc % 4;
373 data_per_desc += 4 - len;
374 sts.b_iso_out.rxbytes = data_per_desc;
376 dma_desc->buf = dma_ad;
377 dma_desc->status.d32 = sts.d32;
380 /** Buffer 1 descriptors setup */
381 sts.b_iso_out.ioc = 0;
382 dma_ad = dwc_ep->dma_addr1;
385 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
386 i += dwc_ep->pkt_per_frm) {
387 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
388 uint32_t len = (j + 1)*dwc_ep->maxpacket;
389 if (len > dwc_ep->data_per_frame)
391 dwc_ep->data_per_frame -
394 data_per_desc = dwc_ep->maxpacket;
395 len = data_per_desc % 4;
397 data_per_desc += 4 - len;
400 sts.b_iso_out.rxbytes = data_per_desc;
401 dma_desc->buf = dma_ad;
402 dma_desc->status.d32 = sts.d32;
404 offset += data_per_desc;
406 dma_ad += data_per_desc;
409 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
411 ((j + 1)*dwc_ep->maxpacket >
412 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
413 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
415 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
416 sts.b_iso_out.rxbytes = data_per_desc;
417 dma_desc->buf = dma_ad;
418 dma_desc->status.d32 = sts.d32;
420 offset += data_per_desc;
422 dma_ad += data_per_desc;
425 sts.b_iso_out.ioc = 1;
428 ((j + 1)*dwc_ep->maxpacket >
429 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
430 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
432 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
433 sts.b_iso_out.rxbytes = data_per_desc;
435 dma_desc->buf = dma_ad;
436 dma_desc->status.d32 = sts.d32;
438 dwc_ep->next_frame = 0;
440 /** Write dma_ad into DOEPDMA register */
441 DWC_WRITE_REG32(&(out_regs->doepdma),
442 (uint32_t) dwc_ep->iso_dma_desc_addr);
447 dev_dma_desc_sts_t sts = {.d32 = 0 };
448 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
450 dwc_otg_dev_in_ep_regs_t *in_regs =
451 core_if->dev_if->in_ep_regs[dwc_ep->num];
452 unsigned int frmnumber;
453 fifosize_data_t txfifosize, rxfifosize;
456 DWC_READ_REG32(&core_if->dev_if->
457 in_ep_regs[dwc_ep->num]->dtxfsts);
459 DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
461 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
463 dma_ad = dwc_ep->dma_addr0;
466 DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
468 sts.b_iso_in.bs = BS_HOST_READY;
469 sts.b_iso_in.txsts = 0;
471 (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
472 sts.b_iso_in.ioc = 0;
473 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
475 frmnumber = dwc_ep->next_frame;
477 sts.b_iso_in.framenum = frmnumber;
478 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
481 /** Buffer 0 descriptors setup */
482 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
483 dma_desc->buf = dma_ad;
484 dma_desc->status.d32 = sts.d32;
487 dma_ad += dwc_ep->data_per_frame;
488 sts.b_iso_in.framenum += dwc_ep->bInterval;
491 sts.b_iso_in.ioc = 1;
492 dma_desc->buf = dma_ad;
493 dma_desc->status.d32 = sts.d32;
496 /** Buffer 1 descriptors setup */
497 sts.b_iso_in.ioc = 0;
498 dma_ad = dwc_ep->dma_addr1;
500 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
501 i += dwc_ep->pkt_per_frm) {
502 dma_desc->buf = dma_ad;
503 dma_desc->status.d32 = sts.d32;
506 dma_ad += dwc_ep->data_per_frame;
507 sts.b_iso_in.framenum += dwc_ep->bInterval;
509 sts.b_iso_in.ioc = 0;
511 sts.b_iso_in.ioc = 1;
514 dma_desc->buf = dma_ad;
515 dma_desc->status.d32 = sts.d32;
517 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
519 /** Write dma_ad into diepdma register */
520 DWC_WRITE_REG32(&(in_regs->diepdma),
521 (uint32_t) dwc_ep->iso_dma_desc_addr);
523 /** Enable endpoint, clear nak */
526 depctl.b.usbactep = 1;
529 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
530 depctl.d32 = DWC_READ_REG32(addr);
534 * This function initializes a descriptor chain for Isochronous transfer
536 * @param core_if Programming view of DWC_otg controller.
537 * @param ep The EP to start the transfer on.
540 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if,
543 depctl_data_t depctl = {.d32 = 0 };
544 volatile uint32_t *addr;
547 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
549 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
552 if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
555 deptsiz_data_t deptsiz = {.d32 = 0 };
558 ep->data_per_frame*ep->buf_proc_intrvl / ep->bInterval;
560 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
563 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
565 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
568 /* Program the transfer size and packet count
569 * as follows: xfersize = N * maxpacket +
570 * short_packet pktcnt = N + (short_packet
573 deptsiz.b.mc = ep->pkt_per_frm;
574 deptsiz.b.xfersize = ep->xfer_len;
576 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
577 DWC_WRITE_REG32(&core_if->dev_if->
578 in_ep_regs[ep->num]->dieptsiz,
581 /* Write the DMA register */
584 in_ep_regs[ep->num]->diepdma),
585 (uint32_t) ep->dma_addr);
589 (ep->xfer_len + (ep->maxpacket - 1)) /
591 deptsiz.b.xfersize = deptsiz.b.pktcnt*ep->maxpacket;
593 DWC_WRITE_REG32(&core_if->dev_if->
594 out_ep_regs[ep->num]->doeptsiz,
597 /* Write the DMA register */
600 out_ep_regs[ep->num]->doepdma),
601 (uint32_t) ep->dma_addr);
604 /** Enable endpoint, clear nak */
609 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
614 * This function does the setup for a data transfer for an EP and
615 * starts the transfer. For an IN transfer, the packets will be
616 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
617 * the packets are unloaded from the Rx FIFO in the ISR.
619 * @param core_if Programming view of DWC_otg controller.
620 * @param ep The EP to start the transfer on.
623 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if,
626 if (core_if->dma_enable) {
627 if (core_if->dma_desc_enable) {
629 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
631 ep->desc_cnt = ep->pkt_cnt;
633 dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
635 if (core_if->pti_enh_enable) {
636 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
639 (ep->proc_buf_num) ? ep->
640 xfer_buff1 : ep->xfer_buff0;
641 ep->cur_pkt_dma_addr =
642 (ep->proc_buf_num) ? ep->
643 dma_addr1 : ep->dma_addr0;
644 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
649 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
650 ep->cur_pkt_dma_addr =
651 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
652 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
657 * This function stops transfer for an EP and
658 * resets the ep's variables.
660 * @param core_if Programming view of DWC_otg controller.
661 * @param ep The EP to start the transfer on.
664 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
666 depctl_data_t depctl = {.d32 = 0 };
667 volatile uint32_t *addr;
669 if (ep->is_in == 1) {
670 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
672 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
676 depctl.d32 = DWC_READ_REG32(addr);
681 DWC_WRITE_REG32(addr, depctl.d32);
683 if (core_if->dma_desc_enable &&
684 ep->iso_desc_addr && ep->iso_dma_desc_addr) {
685 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
686 ep->iso_dma_desc_addr,
690 /* reset varibales */
695 ep->data_per_frame = 0;
696 ep->data_pattern_frame = 0;
698 ep->buf_proc_intrvl = 0;
700 ep->proc_buf_num = 0;
704 ep->iso_desc_addr = 0;
705 ep->iso_dma_desc_addr = 0;
708 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t *pcd, void *ep_handle,
709 uint8_t *buf0, uint8_t *buf1, dwc_dma_t dma0,
710 dwc_dma_t dma1, int sync_frame, int dp_frame,
711 int data_per_frame, int start_frame,
712 int buf_proc_intrvl, void *req_handle,
715 dwc_otg_pcd_ep_t *ep;
716 dwc_irqflags_t flags = 0;
720 dwc_otg_core_if_t *core_if;
722 ep = get_ep_from_handle(pcd, ep_handle);
724 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
725 DWC_WARN("bad ep\n");
726 return -DWC_E_INVALID;
729 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
730 core_if = GET_CORE_IF(pcd);
731 dwc_ep = &ep->dwc_ep;
733 if (ep->iso_req_handle) {
734 DWC_WARN("ISO request in progress\n");
737 dwc_ep->dma_addr0 = dma0;
738 dwc_ep->dma_addr1 = dma1;
740 dwc_ep->xfer_buff0 = buf0;
741 dwc_ep->xfer_buff1 = buf1;
743 dwc_ep->data_per_frame = data_per_frame;
745 /** @todo - pattern data support is to be implemented in the future */
746 dwc_ep->data_pattern_frame = dp_frame;
747 dwc_ep->sync_frame = sync_frame;
749 dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
751 dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
753 dwc_ep->proc_buf_num = 0;
755 dwc_ep->pkt_per_frm = 0;
756 frm_data = ep->dwc_ep.data_per_frame;
757 while (frm_data > 0) {
758 dwc_ep->pkt_per_frm++;
759 frm_data -= ep->dwc_ep.maxpacket;
762 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
764 if (start_frame == -1) {
765 dwc_ep->next_frame = dsts.b.soffn + 1;
766 if (dwc_ep->bInterval != 1) {
768 dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
773 dwc_ep->next_frame = start_frame;
776 if (!core_if->pti_enh_enable) {
778 dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
782 (dwc_ep->data_per_frame *
783 (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
784 - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
787 if (core_if->dma_desc_enable) {
789 dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
795 DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
798 DWC_ALLOC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
800 if (!dwc_ep->pkt_info) {
801 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
802 return -DWC_E_NO_MEMORY;
804 if (core_if->pti_enh_enable) {
805 dwc_memset(dwc_ep->pkt_info, 0,
806 sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
810 ep->iso_req_handle = req_handle;
812 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
813 dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
817 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t *pcd, void *ep_handle,
820 dwc_irqflags_t flags = 0;
821 dwc_otg_pcd_ep_t *ep;
824 ep = get_ep_from_handle(pcd, ep_handle);
825 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
826 DWC_WARN("bad ep\n");
827 return -DWC_E_INVALID;
829 dwc_ep = &ep->dwc_ep;
831 dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
833 DWC_FREE(dwc_ep->pkt_info);
834 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
835 if (ep->iso_req_handle != req_handle) {
836 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
837 return -DWC_E_INVALID;
840 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
842 ep->iso_req_handle = 0;
847 * This function is used for perodical data exchnage between PCD and gadget drivers.
848 * for Isochronous EPs
850 * - Every time a sync period completes this function is called to
851 * perform data exchange between PCD and gadget
853 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep,
859 dwc_ep = &ep->dwc_ep;
861 DWC_SPINUNLOCK(ep->pcd->lock);
862 pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
863 dwc_ep->proc_buf_num ^ 0x1);
864 DWC_SPINLOCK(ep->pcd->lock);
866 for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
867 dwc_ep->pkt_info[i].status = 0;
868 dwc_ep->pkt_info[i].offset = 0;
869 dwc_ep->pkt_info[i].length = 0;
873 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t *pcd, void *ep_handle,
874 void *iso_req_handle)
876 dwc_otg_pcd_ep_t *ep;
879 ep = get_ep_from_handle(pcd, ep_handle);
880 if (!ep->desc || ep->dwc_ep.num == 0) {
881 DWC_WARN("bad ep\n");
882 return -DWC_E_INVALID;
884 dwc_ep = &ep->dwc_ep;
886 return dwc_ep->pkt_cnt;
889 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t *pcd, void *ep_handle,
890 void *iso_req_handle, int packet,
891 int *status, int *actual, int *offset)
893 dwc_otg_pcd_ep_t *ep;
896 ep = get_ep_from_handle(pcd, ep_handle);
898 DWC_WARN("bad ep\n");
900 dwc_ep = &ep->dwc_ep;
902 *status = dwc_ep->pkt_info[packet].status;
903 *actual = dwc_ep->pkt_info[packet].length;
904 *offset = dwc_ep->pkt_info[packet].offset;
907 #endif /* DWC_EN_ISOC */
909 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *pcd_ep,
910 uint32_t is_in, uint32_t ep_num)
912 /* Init EP structure */
916 pcd_ep->queue_sof = 0;
918 /* Init DWC ep structure */
919 pcd_ep->dwc_ep.is_in = is_in;
920 pcd_ep->dwc_ep.num = ep_num;
921 pcd_ep->dwc_ep.active = 0;
922 pcd_ep->dwc_ep.tx_fifo_num = 0;
923 /* Control until ep is actvated */
924 pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
925 pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
926 pcd_ep->dwc_ep.dma_addr = 0;
927 pcd_ep->dwc_ep.start_xfer_buff = 0;
928 pcd_ep->dwc_ep.xfer_buff = 0;
929 pcd_ep->dwc_ep.xfer_len = 0;
930 pcd_ep->dwc_ep.xfer_count = 0;
931 pcd_ep->dwc_ep.sent_zlp = 0;
932 pcd_ep->dwc_ep.total_len = 0;
933 pcd_ep->dwc_ep.desc_addr = 0;
934 pcd_ep->dwc_ep.dma_desc_addr = 0;
935 DWC_CIRCLEQ_INIT(&pcd_ep->queue);
941 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd)
945 dwc_otg_pcd_ep_t *ep;
946 int in_ep_cntr, out_ep_cntr;
947 uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
948 uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
950 * Initialize the EP0 structure.
953 dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
956 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
957 for (i = 1; in_ep_cntr < num_in_eps; i++) {
958 if ((hwcfg1 & 0x1) == 0) {
959 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
962 * @todo NGS: Add direction to EP, based on contents
963 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
966 dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
968 DWC_CIRCLEQ_INIT(&ep->queue);
974 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
975 for (i = 1; out_ep_cntr < num_out_eps; i++) {
976 if ((hwcfg1 & 0x1) == 0) {
977 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
980 * @todo NGS: Add direction to EP, based on contents
981 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
984 dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
985 DWC_CIRCLEQ_INIT(&ep->queue);
990 pcd->ep0state = EP0_DISCONNECT;
991 pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
992 pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
996 * This function is called when the SRP timer expires. The SRP should
997 * complete within 6 seconds.
999 static void srp_timeout(void *ptr)
1001 gotgctl_data_t gotgctl;
1002 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1003 volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1005 gotgctl.d32 = DWC_READ_REG32(addr);
1007 core_if->srp_timer_started = 0;
1009 if (core_if->adp_enable) {
1010 if (gotgctl.b.bsesvld == 0) {
1011 gpwrdn_data_t gpwrdn = {.d32 = 0 };
1012 DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1013 /* Power off the core */
1014 if (core_if->power_down == 2) {
1015 gpwrdn.b.pwrdnswtch = 1;
1016 DWC_MODIFY_REG32(&core_if->core_global_regs->
1017 gpwrdn, gpwrdn.d32, 0);
1021 gpwrdn.b.pmuintsel = 1;
1022 gpwrdn.b.pmuactv = 1;
1023 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1025 dwc_otg_adp_probe_start(core_if);
1027 DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1028 core_if->op_state = B_PERIPHERAL;
1029 dwc_otg_core_init(core_if);
1030 dwc_otg_enable_global_interrupts(core_if);
1031 cil_pcd_start(core_if);
1035 if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1036 (core_if->core_params->i2c_enable)) {
1037 DWC_PRINTF("SRP Timeout\n");
1039 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1040 if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1041 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
1045 /* Clear Session Request */
1047 gotgctl.b.sesreq = 1;
1048 DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1051 core_if->srp_success = 0;
1053 __DWC_ERROR("Device not connected/responding\n");
1054 gotgctl.b.sesreq = 0;
1055 DWC_WRITE_REG32(addr, gotgctl.d32);
1057 } else if (gotgctl.b.sesreq) {
1058 DWC_PRINTF("SRP Timeout\n");
1060 __DWC_ERROR("Device not connected/responding\n");
1061 gotgctl.b.sesreq = 0;
1062 DWC_WRITE_REG32(addr, gotgctl.d32);
1064 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1072 extern void start_next_request(dwc_otg_pcd_ep_t *ep);
1074 static void start_xfer_tasklet_func(void *data)
1076 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1077 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1080 depctl_data_t diepctl;
1082 DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1084 diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1086 if (pcd->ep0.queue_sof) {
1087 pcd->ep0.queue_sof = 0;
1088 start_next_request(&pcd->ep0);
1092 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1093 depctl_data_t diepctl;
1095 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1097 if (pcd->in_ep[i].queue_sof) {
1098 pcd->in_ep[i].queue_sof = 0;
1099 start_next_request(&pcd->in_ep[i]);
1108 * This function initialized the PCD portion of the driver.
1111 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t *core_if)
1113 dwc_otg_pcd_t *pcd = NULL;
1114 dwc_otg_dev_if_t *dev_if;
1118 * Allocate PCD structure
1120 pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1126 pcd->lock = DWC_SPINLOCK_ALLOC();
1128 DWC_ERROR("Could not allocate lock for pcd");
1132 /* Set core_if's lock pointer to hcd->lock */
1133 core_if->lock = pcd->lock;
1134 pcd->core_if = core_if;
1136 dev_if = core_if->dev_if;
1137 dev_if->isoc_ep = NULL;
1139 if (core_if->hwcfg4.b.ded_fifo_en) {
1140 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1142 DWC_PRINTF("Shared Tx FIFO mode\n");
1146 * Initialized the Core for Device mode here if there is nod ADP support.
1147 * Otherwise it will be done later in dwc_otg_adp_start routine.
1149 /* if (dwc_otg_is_device_mode(core_if) ) { */
1150 /* dwc_otg_core_dev_init(core_if); */
1154 * Register the PCD Callbacks.
1156 dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1159 * Initialize the DMA buffer for SETUP packets
1161 if (GET_CORE_IF(pcd)->dma_enable) {
1163 DWC_DEV_DMA_ALLOC_ATOMIC(sizeof(*pcd->setup_pkt) * 5,
1164 &pcd->setup_pkt_dma_handle);
1165 if (pcd->setup_pkt == NULL) {
1171 DWC_DEV_DMA_ALLOC_ATOMIC(sizeof(uint16_t),
1172 &pcd->status_buf_dma_handle);
1173 if (pcd->status_buf == NULL) {
1174 DWC_DEV_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1176 pcd->setup_pkt_dma_handle);
1181 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1182 dev_if->setup_desc_addr[0] =
1183 dwc_otg_ep_alloc_desc_chain
1184 (&dev_if->dma_setup_desc_addr[0], 1);
1185 dev_if->setup_desc_addr[1] =
1186 dwc_otg_ep_alloc_desc_chain
1187 (&dev_if->dma_setup_desc_addr[1], 1);
1188 dev_if->in_desc_addr =
1189 dwc_otg_ep_alloc_desc_chain
1190 (&dev_if->dma_in_desc_addr, 1);
1191 dev_if->out_desc_addr =
1192 dwc_otg_ep_alloc_desc_chain
1193 (&dev_if->dma_out_desc_addr, 1);
1194 pcd->data_terminated = 0;
1196 if (dev_if->setup_desc_addr[0] == 0
1197 || dev_if->setup_desc_addr[1] == 0
1198 || dev_if->in_desc_addr == 0
1199 || dev_if->out_desc_addr == 0) {
1201 if (dev_if->out_desc_addr)
1202 dwc_otg_ep_free_desc_chain
1203 (dev_if->out_desc_addr,
1204 dev_if->dma_out_desc_addr, 1);
1205 if (dev_if->in_desc_addr)
1206 dwc_otg_ep_free_desc_chain
1207 (dev_if->in_desc_addr,
1208 dev_if->dma_in_desc_addr, 1);
1209 if (dev_if->setup_desc_addr[1])
1210 dwc_otg_ep_free_desc_chain
1211 (dev_if->setup_desc_addr[1],
1212 dev_if->dma_setup_desc_addr[1], 1);
1213 if (dev_if->setup_desc_addr[0])
1214 dwc_otg_ep_free_desc_chain
1215 (dev_if->setup_desc_addr[0],
1216 dev_if->dma_setup_desc_addr[0], 1);
1218 DWC_DEV_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1220 pcd->setup_pkt_dma_handle);
1221 DWC_DEV_DMA_FREE(sizeof(*pcd->status_buf),
1223 pcd->status_buf_dma_handle);
1231 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1232 if (pcd->setup_pkt == NULL) {
1237 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1238 if (pcd->status_buf == NULL) {
1239 DWC_FREE(pcd->setup_pkt);
1245 dwc_otg_pcd_reinit(pcd);
1247 /* Allocate the cfi object for the PCD */
1249 pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1250 if (NULL == pcd->cfi)
1252 if (init_cfi(pcd->cfi)) {
1253 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1258 /* Initialize tasklets */
1259 pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1260 start_xfer_tasklet_func, pcd);
1261 pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1264 /* Initialize SRP timer */
1265 core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1267 if (core_if->core_params->dev_out_nak) {
1269 * Initialize xfer timeout timer. Implemented for
1270 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1272 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1273 pcd->core_if->ep_xfer_timer[i] =
1274 DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1275 &pcd->core_if->ep_xfer_info[i]);
1284 DWC_FREE(pcd->setup_pkt);
1285 if (pcd->status_buf)
1286 DWC_FREE(pcd->status_buf);
1298 * Remove PCD specific data
1300 void dwc_otg_pcd_remove(dwc_otg_pcd_t *pcd)
1302 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1304 if (pcd->core_if->core_params->dev_out_nak) {
1305 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1306 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1307 pcd->core_if->ep_xfer_info[i].state = 0;
1311 if (GET_CORE_IF(pcd)->dma_enable) {
1312 DWC_DEV_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1313 pcd->setup_pkt_dma_handle);
1314 DWC_DEV_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
1315 pcd->status_buf_dma_handle);
1316 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1317 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1318 dev_if->dma_setup_desc_addr
1320 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1321 dev_if->dma_setup_desc_addr
1323 dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1324 dev_if->dma_in_desc_addr, 1);
1325 dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1326 dev_if->dma_out_desc_addr,
1330 DWC_FREE(pcd->setup_pkt);
1331 DWC_FREE(pcd->status_buf);
1333 DWC_SPINLOCK_FREE(pcd->lock);
1334 /* Set core_if's lock pointer to NULL */
1335 pcd->core_if->lock = NULL;
1337 DWC_TASK_FREE(pcd->start_xfer_tasklet);
1338 DWC_TASK_FREE(pcd->test_mode_tasklet);
1339 if (pcd->core_if->core_params->dev_out_nak) {
1340 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1341 if (pcd->core_if->ep_xfer_timer[i]) {
1342 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1347 /* Release the CFI object's dynamic memory */
1349 if (pcd->cfi->ops.release) {
1350 pcd->cfi->ops.release(pcd->cfi);
1358 * Returns whether registered pcd is dual speed or not
1360 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t *pcd)
1362 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1364 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1365 ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1366 (core_if->hwcfg2.b.fs_phy_type == 1) &&
1367 (core_if->core_params->ulpi_fs_ls))) {
1375 * Returns whether registered pcd is OTG capable or not
1377 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t *pcd)
1379 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1380 gusbcfg_data_t usbcfg = {.d32 = 0 };
1381 uint32_t retval = 0;
1383 usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1384 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
1385 if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
1390 if (!usbcfg.b.srpcap)
1395 if (usbcfg.b.hnpcap)
1398 if (core_if->adp_enable)
1406 * This function assigns periodic Tx FIFO to an periodic EP
1407 * in shared Tx FIFO mode
1409 static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if)
1414 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1415 if ((TxMsk & core_if->tx_msk) == 0) {
1416 core_if->tx_msk |= TxMsk;
1425 * This function assigns periodic Tx FIFO to an periodic EP
1426 * in shared Tx FIFO mode
1428 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if)
1430 uint32_t PerTxMsk = 1;
1432 for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1433 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1434 core_if->p_tx_msk |= PerTxMsk;
1443 * This function releases periodic Tx FIFO
1444 * in shared Tx FIFO mode
1446 static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if,
1450 (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1454 * This function releases periodic Tx FIFO
1455 * in shared Tx FIFO mode
1457 static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
1460 (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1464 * This function is being called from gadget
1465 * to enable PCD endpoint.
1467 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t *pcd,
1468 const uint8_t *ep_desc, void *usb_ep)
1471 dwc_otg_pcd_ep_t *ep = NULL;
1472 const usb_endpoint_descriptor_t *desc;
1473 dwc_irqflags_t flags;
1474 /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1475 /* gdfifocfg_data_t gdfifocfg = {.d32 = 0 }; */
1476 /* gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 }; */
1480 desc = (const usb_endpoint_descriptor_t *)ep_desc;
1483 pcd->ep0.priv = usb_ep;
1485 retval = -DWC_E_INVALID;
1489 num = UE_GET_ADDR(desc->bEndpointAddress);
1490 dir = UE_GET_DIR(desc->bEndpointAddress);
1492 if (!desc->wMaxPacketSize) {
1493 DWC_WARN("bad maxpacketsize\n");
1494 retval = -DWC_E_INVALID;
1498 if (dir == UE_DIR_IN) {
1499 epcount = pcd->core_if->dev_if->num_in_eps;
1500 for (i = 0; i < epcount; i++) {
1501 if (num == pcd->in_ep[i].dwc_ep.num) {
1502 ep = &pcd->in_ep[i];
1507 epcount = pcd->core_if->dev_if->num_out_eps;
1508 for (i = 0; i < epcount; i++) {
1509 if (num == pcd->out_ep[i].dwc_ep.num) {
1510 ep = &pcd->out_ep[i];
1517 DWC_WARN("bad address\n");
1518 retval = -DWC_E_INVALID;
1522 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1532 ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1533 ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1535 ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1537 if (ep->dwc_ep.is_in) {
1538 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1539 ep->dwc_ep.tx_fifo_num = 0;
1541 if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1543 * if ISOC EP then assign a Periodic Tx FIFO.
1545 ep->dwc_ep.tx_fifo_num =
1546 assign_perio_tx_fifo(GET_CORE_IF(pcd));
1550 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1552 ep->dwc_ep.tx_fifo_num =
1553 assign_tx_fifo(GET_CORE_IF(pcd));
1556 /* Calculating EP info controller base address */
1558 if (ep->dwc_ep.tx_fifo_num
1559 && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1561 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
1563 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1566 (&GET_CORE_IF(pcd)->
1567 core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num -
1569 gdfifocfg.b.epinfobase =
1570 gdfifocfgbase.d32 + dptxfsiz.d32;
1571 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1572 DWC_WRITE_REG32(&GET_CORE_IF
1573 (pcd)->core_global_regs->
1574 gdfifocfg, gdfifocfg.d32);
1579 /* Set initial data PID. */
1580 if (ep->dwc_ep.type == UE_BULK) {
1581 ep->dwc_ep.data_pid_start = 0;
1584 /* Alloc DMA Descriptors */
1585 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1586 #ifndef DWC_UTE_PER_IO
1587 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1589 ep->dwc_ep.desc_addr =
1590 dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.
1593 if (!ep->dwc_ep.desc_addr) {
1594 DWC_WARN("%s, can't allocate DMA descriptor\n",
1596 retval = -DWC_E_SHUTDOWN;
1597 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1600 #ifndef DWC_UTE_PER_IO
1605 DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1606 (ep->dwc_ep.is_in ? "IN" : "OUT"),
1607 ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1608 #ifdef DWC_UTE_PER_IO
1609 ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1611 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1612 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1613 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1616 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1619 if (pcd->cfi->ops.ep_enable) {
1620 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1624 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1631 * This function is being called from gadget
1632 * to disable PCD endpoint.
1634 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t *pcd, void *ep_handle)
1636 dwc_otg_pcd_ep_t *ep;
1637 dwc_irqflags_t flags;
1638 dwc_otg_dev_dma_desc_t *desc_addr;
1639 dwc_dma_t dma_desc_addr;
1640 gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1641 gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1642 /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1644 ep = get_ep_from_handle(pcd, ep_handle);
1646 if (!ep || !ep->desc) {
1647 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1648 return -DWC_E_INVALID;
1651 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1653 dwc_otg_request_nuke(ep);
1655 dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1656 if (pcd->core_if->core_params->dev_out_nak) {
1657 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1658 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1664 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1665 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1667 if (ep->dwc_ep.is_in) {
1668 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1669 /* Flush the Tx FIFO */
1670 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1671 ep->dwc_ep.tx_fifo_num);
1673 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1674 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1676 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1677 /* Decreasing EPinfo Base Addr */
1680 (&GET_CORE_IF(pcd)->core_global_regs->
1681 dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1682 gdfifocfg.b.epinfobase =
1683 gdfifocfgbase.d32 - dptxfsiz.d32;
1684 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1685 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1686 core_global_regs->gdfifocfg,
1693 /* Free DMA Descriptors */
1694 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1695 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1696 desc_addr = ep->dwc_ep.desc_addr;
1697 dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1699 /* Cannot call dma_free_coherent() with IRQs disabled */
1700 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1701 dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1707 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1710 DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1711 ep->dwc_ep.is_in ? "IN" : "OUT");
1716 static int dwc_otg_wait_bit_set(volatile uint32_t *reg,
1717 uint32_t bit, uint32_t timeout)
1721 for (i = 0; i < timeout; i++) {
1722 if (DWC_READ_REG32(reg) & bit)
1731 static void dwc_otg_pcd_ep_stop_transfer(dwc_otg_core_if_t
1732 *core_if, dwc_ep_t *ep)
1734 depctl_data_t depctl = {.d32 = 0 };
1736 /* Read DEPCTLn register */
1738 depctl.d32 = DWC_READ_REG32(&core_if->dev_if->
1739 in_ep_regs[ep->num]->
1742 depctl.d32 = DWC_READ_REG32(&core_if->dev_if->
1743 out_ep_regs[ep->num]->
1746 if (ep->is_in == 1) {
1747 diepint_data_t diepint = {.d32 = 0 };
1750 DWC_WRITE_REG32(&core_if->dev_if->
1751 in_ep_regs[ep->num]->diepctl,
1754 diepint.b.inepnakeff = 1;
1755 /* Wait for Nak effect */
1756 if (dwc_otg_wait_bit_set(&core_if->dev_if->
1761 DWC_WARN("%s: timeout diepctl.snak\n",
1764 DWC_WRITE_REG32(&core_if->dev_if->
1765 in_ep_regs[ep->num]->
1766 diepint, diepint.d32);
1771 DWC_WRITE_REG32(&core_if->dev_if->
1772 in_ep_regs[ep->num]->diepctl,
1776 diepint.b.epdisabled = 1;
1777 if (dwc_otg_wait_bit_set(&core_if->dev_if->
1782 DWC_WARN("%s: timeout diepctl.epdis\n",
1785 DWC_WRITE_REG32(&core_if->dev_if->
1786 in_ep_regs[ep->num]->
1787 diepint, diepint.d32);
1790 dctl_data_t dctl = {.d32 = 0 };
1791 gintmsk_data_t gintsts = {.d32 = 0 };
1792 doepint_data_t doepint = {.d32 = 0 };
1794 dctl.b.sgoutnak = 1;
1795 DWC_MODIFY_REG32(&core_if->dev_if->
1796 dev_global_regs->dctl, 0, dctl.d32);
1798 /* Wait for global nak to take effect */
1800 gintsts.b.goutnakeff = 1;
1801 if (dwc_otg_wait_bit_set(&core_if->core_global_regs->
1802 gintsts, gintsts.d32,
1804 DWC_WARN("%s: timeout dctl.sgoutnak\n",
1807 DWC_WRITE_REG32(&core_if->core_global_regs
1808 ->gintsts, gintsts.d32);
1814 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
1815 doepctl, depctl.d32);
1817 doepint.b.epdisabled = 1;
1818 if (dwc_otg_wait_bit_set(&core_if->dev_if
1819 ->out_ep_regs[ep->num]
1820 ->doepint, doepint.d32,
1822 DWC_WARN("%s: timeout doepctl.epdis\n",
1825 DWC_WRITE_REG32(&core_if->dev_if->
1826 out_ep_regs[ep->num]->
1827 doepint, doepint.d32);
1831 dctl.b.cgoutnak = 1;
1832 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
1837 /******************************************************************************/
1838 #ifdef DWC_UTE_PER_IO
1841 * Free the request and its extended parts
1844 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req)
1846 DWC_FREE(req->ext_req.per_io_frame_descs);
1851 * Start the next request in the endpoint's queue.
1854 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t *pcd,
1855 dwc_otg_pcd_ep_t *ep)
1858 dwc_otg_pcd_request_t *req = NULL;
1859 dwc_ep_t *dwcep = NULL;
1860 struct dwc_iso_xreq_port *ereq = NULL;
1861 struct dwc_iso_pkt_desc_port *ddesc_iso;
1863 depctl_data_t diepctl;
1865 dwcep = &ep->dwc_ep;
1867 if (dwcep->xiso_active_xfers > 0) {
1869 /* Disable this to decrease s/w overhead
1870 * that is crucial for Isoc transfers */
1871 DWC_WARN("There are currently active transfers for EP%d \
1872 (active=%d; queued=%d)", dwcep->num,
1873 dwcep->xiso_active_xfers, dwcep->xiso_queued_xfers);
1878 nat = UGETW(ep->desc->wMaxPacketSize);
1879 nat = (nat >> 11) & 0x03;
1881 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1882 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1883 ereq = &req->ext_req;
1886 /* Get the frame number */
1887 dwcep->xiso_frame_num =
1888 dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1889 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1891 ddesc_iso = ereq->per_io_frame_descs;
1894 /* Setup DMA Descriptor chain for IN Isoc request */
1895 for (i = 0; i < ereq->pio_pkt_count; i++) {
1896 /* if ((i % (nat + 1)) == 0) */
1898 dwcep->xiso_frame_num =
1899 (dwcep->xiso_bInterval +
1900 dwcep->xiso_frame_num) & 0x3FFF;
1901 dwcep->desc_addr[i].buf =
1902 req->dma + ddesc_iso[i].offset;
1903 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1904 ddesc_iso[i].length;
1905 dwcep->desc_addr[i].status.b_iso_in.framenum =
1906 dwcep->xiso_frame_num;
1907 dwcep->desc_addr[i].status.b_iso_in.bs =
1909 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1910 dwcep->desc_addr[i].status.b_iso_in.sp =
1911 (ddesc_iso[i].length %
1912 dwcep->maxpacket) ? 1 : 0;
1913 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1914 dwcep->desc_addr[i].status.b_iso_in.pid =
1916 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1918 /* Process the last descriptor */
1919 if (i == ereq->pio_pkt_count - 1) {
1920 dwcep->desc_addr[i].status.b_iso_in.
1922 dwcep->desc_addr[i].status.b_iso_in.l =
1927 /* Setup and start the transfer for this endpoint */
1928 dwcep->xiso_active_xfers++;
1929 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1930 dev_if->in_ep_regs[dwcep->num]->diepdma,
1931 dwcep->dma_desc_addr);
1933 diepctl.b.epena = 1;
1935 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1936 dev_if->in_ep_regs[dwcep->num]->
1937 diepctl, 0, diepctl.d32);
1939 /* Setup DMA Descriptor chain for OUT Isoc request */
1940 for (i = 0; i < ereq->pio_pkt_count; i++) {
1941 /* if ((i % (nat + 1)) == 0) */
1942 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1946 dwcep->desc_addr[i].buf =
1947 req->dma + ddesc_iso[i].offset;
1948 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1949 ddesc_iso[i].length;
1950 dwcep->desc_addr[i].status.b_iso_out.framenum =
1951 dwcep->xiso_frame_num;
1952 dwcep->desc_addr[i].status.b_iso_out.bs =
1954 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1955 dwcep->desc_addr[i].status.b_iso_out.sp =
1956 (ddesc_iso[i].length %
1957 dwcep->maxpacket) ? 1 : 0;
1958 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1959 dwcep->desc_addr[i].status.b_iso_out.pid =
1961 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1963 /* Process the last descriptor */
1964 if (i == ereq->pio_pkt_count - 1) {
1965 dwcep->desc_addr[i].status.b_iso_out.
1967 dwcep->desc_addr[i].status.b_iso_out.l =
1972 /* Setup and start the transfer for this endpoint */
1973 dwcep->xiso_active_xfers++;
1974 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1975 out_ep_regs[dwcep->num]->doepdma,
1976 dwcep->dma_desc_addr);
1978 diepctl.b.epena = 1;
1980 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1981 out_ep_regs[dwcep->num]->doepctl, 0,
1993 * - Remove the request from the queue
1995 void complete_xiso_ep(dwc_otg_pcd_ep_t *ep)
1997 dwc_otg_pcd_request_t *req = NULL;
1998 struct dwc_iso_xreq_port *ereq = NULL;
1999 struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
2000 dwc_ep_t *dwcep = NULL;
2004 dwcep = &ep->dwc_ep;
2006 /* Get the first pending request from the queue */
2007 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2008 req = DWC_CIRCLEQ_FIRST(&ep->queue);
2010 DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
2013 dwcep->xiso_active_xfers--;
2014 dwcep->xiso_queued_xfers--;
2015 /* Remove this request from the queue */
2016 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
2018 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
2023 ereq = &req->ext_req;
2024 ddesc_iso = ereq->per_io_frame_descs;
2026 if (dwcep->xiso_active_xfers < 0) {
2027 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
2028 dwcep->xiso_active_xfers);
2031 /* Fill the Isoc descs of portable extended req from dma descriptors */
2032 for (i = 0; i < ereq->pio_pkt_count; i++) {
2033 if (dwcep->is_in) { /* IN endpoints */
2034 ddesc_iso[i].actual_length = ddesc_iso[i].length -
2035 dwcep->desc_addr[i].status.b_iso_in.txbytes;
2036 ddesc_iso[i].status =
2037 dwcep->desc_addr[i].status.b_iso_in.txsts;
2038 } else { /* OUT endpoints */
2039 ddesc_iso[i].actual_length = ddesc_iso[i].length -
2040 dwcep->desc_addr[i].status.b_iso_out.rxbytes;
2041 ddesc_iso[i].status =
2042 dwcep->desc_addr[i].status.b_iso_out.rxsts;
2046 DWC_SPINUNLOCK(ep->pcd->lock);
2048 /* Call the completion function in the non-portable logic */
2049 ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
2052 DWC_SPINLOCK(ep->pcd->lock);
2054 /* Free the request - specific freeing needed for extended request object */
2055 dwc_pcd_xiso_ereq_free(ep, req);
2057 /* Start the next request */
2058 dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
2064 * Create and initialize the Isoc pkt descriptors of the extended request.
2067 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t *req,
2071 struct dwc_iso_xreq_port *ereq = NULL;
2072 struct dwc_iso_xreq_port *req_mapped = NULL;
2073 struct dwc_iso_pkt_desc_port *ipds = NULL; /* To be created in this function */
2077 ereq = &req->ext_req;
2078 req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
2079 pkt_count = req_mapped->pio_pkt_count;
2081 /* Create the isoc descs */
2083 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
2085 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
2089 DWC_ERROR("Failed to allocate isoc descriptors");
2090 return -DWC_E_NO_MEMORY;
2093 /* Initialize the extended request fields */
2094 ereq->per_io_frame_descs = ipds;
2095 ereq->error_count = 0;
2096 ereq->pio_alloc_pkt_count = pkt_count;
2097 ereq->pio_pkt_count = pkt_count;
2098 ereq->tr_sub_flags = req_mapped->tr_sub_flags;
2100 /* Init the Isoc descriptors */
2101 for (i = 0; i < pkt_count; i++) {
2102 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
2103 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
2104 ipds[i].status = req_mapped->per_io_frame_descs[i].status; /* 0 */
2105 ipds[i].actual_length =
2106 req_mapped->per_io_frame_descs[i].actual_length;
2112 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
2114 struct dwc_iso_pkt_desc_port *xfd = NULL;
2117 DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
2118 DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
2119 DWC_DEBUG("error_count=%d", ereq->error_count);
2120 DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
2121 DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
2122 DWC_DEBUG("res=%d", ereq->res);
2124 for (i = 0; i < ereq->pio_pkt_count; i++) {
2125 xfd = &ereq->per_io_frame_descs[0];
2126 DWC_DEBUG("FD #%d", i);
2128 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
2129 DWC_DEBUG("xfd->length=%d", xfd->length);
2130 DWC_DEBUG("xfd->offset=%d", xfd->offset);
2131 DWC_DEBUG("xfd->status=%d", xfd->status);
2138 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2139 uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2140 int zero, void *req_handle, int atomic_alloc,
2143 dwc_otg_pcd_request_t *req = NULL;
2144 dwc_otg_pcd_ep_t *ep;
2145 dwc_irqflags_t flags;
2148 ep = get_ep_from_handle(pcd, ep_handle);
2150 DWC_WARN("bad ep\n");
2151 return -DWC_E_INVALID;
2154 /* We support this extension only for DDMA mode */
2155 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2156 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2157 return -DWC_E_INVALID;
2159 /* Create a dwc_otg_pcd_request_t object */
2161 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2163 req = DWC_ALLOC(sizeof(*req));
2167 return -DWC_E_NO_MEMORY;
2170 /* Create the Isoc descs for this request which shall be the exact match
2171 * of the structure sent to us from the non-portable logic */
2173 dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2175 DWC_WARN("Failed to init the Isoc descriptors");
2180 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2182 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2185 req->length = buflen;
2186 req->sent_zlp = zero;
2187 req->priv = req_handle;
2189 /* DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags); */
2190 ep->dwc_ep.dma_addr = dma_buf;
2191 ep->dwc_ep.start_xfer_buff = buf;
2192 ep->dwc_ep.xfer_buff = buf;
2193 ep->dwc_ep.xfer_len = 0;
2194 ep->dwc_ep.xfer_count = 0;
2195 ep->dwc_ep.sent_zlp = 0;
2196 ep->dwc_ep.total_len = buflen;
2198 /* Add this request to the tail */
2199 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2200 ep->dwc_ep.xiso_queued_xfers++;
2202 /* DWC_DEBUG("CP_0"); */
2203 /* DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags); */
2204 /* prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport); */
2205 /* prn_ext_request(&req->ext_req); */
2207 /* DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags); */
2209 /* If the req->status == ASAP then check if there is any active transfer
2210 * for this endpoint. If no active transfers, then get the first entry
2211 * from the queue and start that transfer
2213 if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2214 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2216 DWC_WARN("Failed to start the next Isoc transfer");
2217 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2223 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2228 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2229 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2230 uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2231 int zero, void *req_handle, int atomic_alloc)
2233 dwc_irqflags_t flags;
2234 dwc_otg_pcd_request_t *req;
2235 dwc_otg_pcd_ep_t *ep;
2236 uint32_t max_transfer;
2238 ep = get_ep_from_handle(pcd, ep_handle);
2239 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2240 DWC_WARN("bad ep\n");
2241 return -DWC_E_INVALID;
2245 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2247 req = DWC_ALLOC(sizeof(*req));
2251 return -DWC_E_NO_MEMORY;
2253 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2254 if (!GET_CORE_IF(pcd)->core_params->opt) {
2255 if (ep->dwc_ep.num != 0) {
2256 DWC_ERROR("queue req %p, len %d buf %p\n",
2257 req_handle, buflen, buf);
2263 req->length = buflen;
2264 req->sent_zlp = zero;
2265 req->priv = req_handle;
2266 req->dw_align_buf = NULL;
2267 if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2268 && !GET_CORE_IF(pcd)->dma_desc_enable)
2269 req->dw_align_buf = DWC_DEV_DMA_ALLOC_ATOMIC(buflen,
2272 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2275 * After adding request to the queue for IN ISOC wait for In Token Received
2276 * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2277 * Received when EP is disabled interrupt to obtain starting microframe
2278 * (odd/even) start transfer
2280 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2282 depctl_data_t depctl = {.d32 =
2283 DWC_READ_REG32(&pcd->core_if->
2284 dev_if->in_ep_regs[ep->
2288 ++pcd->request_pending;
2290 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2291 if (ep->dwc_ep.is_in) {
2293 DWC_WRITE_REG32(&pcd->core_if->
2294 dev_if->in_ep_regs[ep->dwc_ep.
2299 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2305 * For EP0 IN without premature status, zlp is required?
2307 if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2308 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2309 /* _req->zero = 1; */
2312 /* Start the transfer */
2313 if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2315 if (ep->dwc_ep.num == 0) {
2316 switch (pcd->ep0state) {
2317 case EP0_IN_DATA_PHASE:
2318 DWC_DEBUGPL(DBG_PCD,
2319 "%s ep0: EP0_IN_DATA_PHASE\n",
2323 case EP0_OUT_DATA_PHASE:
2324 DWC_DEBUGPL(DBG_PCD,
2325 "%s ep0: EP0_OUT_DATA_PHASE\n",
2327 if (pcd->request_config) {
2328 /* Complete STATUS PHASE */
2329 ep->dwc_ep.is_in = 1;
2330 pcd->ep0state = EP0_IN_STATUS_PHASE;
2334 case EP0_IN_STATUS_PHASE:
2335 DWC_DEBUGPL(DBG_PCD,
2336 "%s ep0: EP0_IN_STATUS_PHASE\n",
2341 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2343 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2344 return -DWC_E_SHUTDOWN;
2347 ep->dwc_ep.dma_addr = dma_buf;
2348 ep->dwc_ep.start_xfer_buff = buf;
2349 ep->dwc_ep.xfer_buff = buf;
2350 ep->dwc_ep.xfer_len = buflen;
2351 ep->dwc_ep.xfer_count = 0;
2352 ep->dwc_ep.sent_zlp = 0;
2353 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2356 if ((ep->dwc_ep.xfer_len %
2357 ep->dwc_ep.maxpacket == 0)
2358 && (ep->dwc_ep.xfer_len != 0)) {
2359 ep->dwc_ep.sent_zlp = 1;
2364 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2366 } /* non-ep0 endpoints */
2369 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2370 /* store the request length */
2371 ep->dwc_ep.cfi_req_len = buflen;
2372 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2377 GET_CORE_IF(ep->pcd)->
2378 core_params->max_transfer_size;
2380 /* Setup and start the Transfer */
2381 if (req->dw_align_buf) {
2382 if (ep->dwc_ep.is_in)
2383 dwc_memcpy(req->dw_align_buf,
2385 ep->dwc_ep.dma_addr =
2386 req->dw_align_buf_dma;
2387 ep->dwc_ep.start_xfer_buff =
2389 ep->dwc_ep.xfer_buff =
2392 ep->dwc_ep.dma_addr = dma_buf;
2393 ep->dwc_ep.start_xfer_buff = buf;
2394 ep->dwc_ep.xfer_buff = buf;
2396 ep->dwc_ep.xfer_len = 0;
2397 ep->dwc_ep.xfer_count = 0;
2398 ep->dwc_ep.sent_zlp = 0;
2399 ep->dwc_ep.total_len = buflen;
2401 ep->dwc_ep.maxxfer = max_transfer;
2402 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2403 uint32_t out_max_xfer =
2404 DDMA_MAX_TRANSFER_SIZE -
2405 (DDMA_MAX_TRANSFER_SIZE % 4);
2406 if (ep->dwc_ep.is_in) {
2407 if (ep->dwc_ep.maxxfer >
2408 DDMA_MAX_TRANSFER_SIZE) {
2409 ep->dwc_ep.maxxfer =
2410 DDMA_MAX_TRANSFER_SIZE;
2413 if (ep->dwc_ep.maxxfer >
2415 ep->dwc_ep.maxxfer =
2420 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2421 ep->dwc_ep.maxxfer -=
2422 (ep->dwc_ep.maxxfer %
2423 ep->dwc_ep.maxpacket);
2427 if ((ep->dwc_ep.total_len %
2428 ep->dwc_ep.maxpacket == 0)
2429 && (ep->dwc_ep.total_len != 0)) {
2430 ep->dwc_ep.sent_zlp = 1;
2436 dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2442 ++pcd->request_pending;
2443 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2444 if (ep->dwc_ep.is_in && ep->stopped
2445 && !(GET_CORE_IF(pcd)->dma_enable)) {
2446 /** @todo NGS Create a function for this. */
2447 diepmsk_data_t diepmsk = {.d32 = 0 };
2448 diepmsk.b.intktxfemp = 1;
2449 if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2450 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2452 diepeachintmsk[ep->dwc_ep.num],
2455 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2456 dev_global_regs->diepmsk, 0,
2462 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2467 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t *pcd, void *ep_handle,
2470 dwc_irqflags_t flags;
2471 dwc_otg_pcd_request_t *req;
2472 dwc_otg_pcd_ep_t *ep;
2474 ep = get_ep_from_handle(pcd, ep_handle);
2475 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2476 DWC_WARN("bad argument\n");
2477 return -DWC_E_INVALID;
2480 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2482 /* make sure it's actually queued on this endpoint */
2483 DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2484 if (req->priv == (void *)req_handle) {
2489 if (req->priv != (void *)req_handle) {
2490 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2491 return -DWC_E_INVALID;
2494 if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2495 dwc_otg_pcd_ep_stop_transfer(GET_CORE_IF(pcd),
2497 /* Flush the Tx FIFO */
2498 if (ep->dwc_ep.is_in) {
2499 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
2500 ep->dwc_ep.tx_fifo_num);
2501 release_perio_tx_fifo(GET_CORE_IF(pcd),
2502 ep->dwc_ep.tx_fifo_num);
2503 release_tx_fifo(GET_CORE_IF(pcd),
2504 ep->dwc_ep.tx_fifo_num);
2507 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2512 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2514 return req ? 0 : -DWC_E_SHUTDOWN;
2518 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t *pcd, void *ep_handle, int value)
2520 dwc_otg_pcd_ep_t *ep;
2521 dwc_irqflags_t flags;
2524 ep = get_ep_from_handle(pcd, ep_handle);
2526 if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2527 (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2528 DWC_WARN("%s, bad ep\n", __func__);
2529 return -DWC_E_INVALID;
2532 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2533 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2534 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2535 ep->dwc_ep.is_in ? "IN" : "OUT");
2536 retval = -DWC_E_AGAIN;
2537 } else if (value == 0) {
2538 ep->dwc_ep.stall_clear_flag = 0;
2539 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2540 } else if (value == 1) {
2542 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2543 dtxfsts_data_t txstatus;
2544 fifosize_data_t txfifosize;
2547 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
2548 dtxfsiz[ep->dwc_ep.tx_fifo_num]);
2550 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
2551 in_ep_regs[ep->dwc_ep.num]->dtxfsts);
2553 if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2554 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2555 retval = -DWC_E_AGAIN;
2557 if (ep->dwc_ep.num == 0) {
2558 pcd->ep0state = EP0_STALL;
2562 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2566 if (ep->dwc_ep.num == 0) {
2567 pcd->ep0state = EP0_STALL;
2571 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2573 } else if (value == 2) {
2574 ep->dwc_ep.stall_clear_flag = 0;
2575 } else if (value == 3) {
2576 ep->dwc_ep.stall_clear_flag = 1;
2580 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2586 * This function initiates remote wakeup of the host from suspend state.
2588 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t *pcd, int set)
2590 dctl_data_t dctl = { 0 };
2591 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2594 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2595 if (!dsts.b.suspsts) {
2596 DWC_WARN("Remote wakeup while is not in suspend state\n");
2598 /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2599 if (pcd->remote_wakeup_enable) {
2602 if (core_if->adp_enable) {
2603 gpwrdn_data_t gpwrdn;
2605 dwc_otg_adp_probe_stop(core_if);
2607 /* Mask SRP detected interrupt from Power Down Logic */
2609 gpwrdn.b.srp_det_msk = 1;
2610 DWC_MODIFY_REG32(&core_if->core_global_regs->
2611 gpwrdn, gpwrdn.d32, 0);
2613 /* Disable Power Down Logic */
2615 gpwrdn.b.pmuactv = 1;
2616 DWC_MODIFY_REG32(&core_if->core_global_regs->
2617 gpwrdn, gpwrdn.d32, 0);
2620 * Initialize the Core for Device mode.
2622 core_if->op_state = B_PERIPHERAL;
2623 dwc_otg_core_init(core_if);
2624 dwc_otg_enable_global_interrupts(core_if);
2625 cil_pcd_start(core_if);
2627 dwc_otg_initiate_srp(core_if);
2630 dctl.b.rmtwkupsig = 1;
2631 DWC_MODIFY_REG32(&core_if->dev_if->
2632 dev_global_regs->dctl, 0, dctl.d32);
2633 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2636 DWC_MODIFY_REG32(&core_if->dev_if->
2637 dev_global_regs->dctl, dctl.d32, 0);
2638 DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2641 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2645 #ifdef CONFIG_USB_DWC_OTG_LPM
2647 * This function initiates remote wakeup of the host from L1 sleep state.
2649 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t *pcd, int set)
2651 glpmcfg_data_t lpmcfg;
2652 pcgcctl_data_t pcgcctl = {.d32 = 0 };
2654 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2656 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2658 /* Check if we are in L1 state */
2659 if (!lpmcfg.b.prt_sleep_sts) {
2660 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2664 /* Check if host allows remote wakeup */
2665 if (!lpmcfg.b.rem_wkup_en) {
2666 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2670 /* Check if Resume OK */
2671 if (!lpmcfg.b.sleep_state_resumeok) {
2672 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2676 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2677 lpmcfg.b.en_utmi_sleep = 0;
2678 lpmcfg.b.hird_thres &= (~(1 << 4));
2680 /* Clear Enbl_L1Gating bit. */
2681 pcgcctl.b.enbl_sleep_gating = 1;
2682 DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
2684 DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2687 dctl_data_t dctl = {.d32 = 0 };
2688 dctl.b.rmtwkupsig = 1;
2689 /* Set RmtWkUpSig bit to start remote wakup signaling.
2690 * Hardware will automatically clear this bit.
2692 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2694 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2701 * Performs remote wakeup.
2703 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set)
2705 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2706 dwc_irqflags_t flags;
2707 if (dwc_otg_is_device_mode(core_if)) {
2708 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2709 #ifdef CONFIG_USB_DWC_OTG_LPM
2710 if (core_if->lx_state == DWC_OTG_L1) {
2711 dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2714 dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2715 #ifdef CONFIG_USB_DWC_OTG_LPM
2718 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2723 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t *pcd, int no_of_usecs)
2725 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2726 dctl_data_t dctl = { 0 };
2728 if (dwc_otg_is_device_mode(core_if)) {
2729 dctl.b.sftdiscon = 1;
2730 DWC_PRINTF("Soft disconnect for %d useconds\n", no_of_usecs);
2731 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
2733 dwc_udelay(no_of_usecs);
2734 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2738 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2744 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t *pcd)
2747 gotgctl_data_t gotgctl;
2750 * This function starts the Protocol if no session is in progress. If
2751 * a session is already in progress, but the device is suspended,
2752 * remote wakeup signaling is started.
2755 /* Check if valid session */
2757 DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2758 if (gotgctl.b.bsesvld) {
2759 /* Check if suspend state */
2762 (GET_CORE_IF(pcd)->dev_if->dev_global_regs->
2764 if (dsts.b.suspsts) {
2765 dwc_otg_pcd_remote_wakeup(pcd, 1);
2768 dwc_otg_pcd_initiate_srp(pcd);
2776 * Implement Soft-Connect and Soft-Disconnect function
2779 void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t *pcd)
2782 DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 2,
2786 void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t *pcd)
2789 DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 0,
2793 void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
2795 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2796 dwc_otg_disable_global_interrupts(core_if);
2797 dwc_otg_core_init(core_if);
2798 dwc_otg_pcd_reinit(pcd);
2799 dwc_otg_core_dev_init(core_if);
2800 dwc_otg_enable_global_interrupts(core_if);
2804 * Start the SRP timer to detect when the SRP does not complete within
2807 * @param pcd the pcd structure.
2809 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd)
2811 dwc_irqflags_t flags;
2812 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2813 dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2814 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2817 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t *pcd)
2819 return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2822 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t *pcd)
2824 return GET_CORE_IF(pcd)->core_params->lpm_enable;
2827 int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t *pcd)
2829 return GET_CORE_IF(pcd)->core_params->besl_enable;
2832 int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t *pcd)
2834 return GET_CORE_IF(pcd)->core_params->baseline_besl;
2837 int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t *pcd)
2839 return GET_CORE_IF(pcd)->core_params->deep_besl;
2842 uint32_t get_b_hnp_enable(dwc_otg_pcd_t *pcd)
2844 return pcd->b_hnp_enable;
2847 uint32_t get_a_hnp_support(dwc_otg_pcd_t *pcd)
2849 return pcd->a_hnp_support;
2852 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t *pcd)
2854 return pcd->a_alt_hnp_support;
2857 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t *pcd)
2859 return pcd->remote_wakeup_enable;
2862 #endif /* DWC_HOST_ONLY */