1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
35 #include "dwc_otg_pcd.h"
36 #include "dwc_otg_driver.h"
39 #include "dwc_otg_cfi.h"
43 extern void complete_xiso_ep(dwc_otg_pcd_ep_t *ep);
45 /* #define PRINT_CFI_DMA_DESCS */
50 * This function updates OTG.
52 static void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset)
56 pcd->b_hnp_enable = 0;
57 pcd->a_hnp_support = 0;
58 pcd->a_alt_hnp_support = 0;
61 if (pcd->fops->hnp_changed) {
62 pcd->fops->hnp_changed(pcd);
67 * This file contains the implementation of the PCD Interrupt handlers.
69 * The PCD handles the device interrupts. Many conditions can cause a
70 * device interrupt. When an interrupt occurs, the device interrupt
71 * service routine determines the cause of the interrupt and
72 * dispatches handling to the appropriate function. These interrupt
73 * handling functions are described below.
74 * All interrupt registers are processed from LSB to MSB.
78 * This function prints the ep0 state for debug purposes.
80 static inline void print_ep0_state(dwc_otg_pcd_t *pcd)
85 switch (pcd->ep0state) {
87 dwc_strcpy(str, "EP0_DISCONNECT");
90 dwc_strcpy(str, "EP0_IDLE");
92 case EP0_IN_DATA_PHASE:
93 dwc_strcpy(str, "EP0_IN_DATA_PHASE");
95 case EP0_OUT_DATA_PHASE:
96 dwc_strcpy(str, "EP0_OUT_DATA_PHASE");
98 case EP0_IN_STATUS_PHASE:
99 dwc_strcpy(str, "EP0_IN_STATUS_PHASE");
101 case EP0_OUT_STATUS_PHASE:
102 dwc_strcpy(str, "EP0_OUT_STATUS_PHASE");
105 dwc_strcpy(str, "EP0_STALL");
108 dwc_strcpy(str, "EP0_INVALID");
111 DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
116 * This function calculate the size of the payload in the memory
117 * for out endpoints and prints size for debug purposes(used in
118 * 2.93a DevOutNak feature).
120 static inline void print_memory_payload(dwc_otg_pcd_t *pcd, dwc_ep_t *ep)
123 deptsiz_data_t deptsiz_init = {.d32 = 0 };
124 deptsiz_data_t deptsiz_updt = {.d32 = 0 };
128 deptsiz_init.d32 = pcd->core_if->start_doeptsiz_val[ep->num];
130 DWC_READ_REG32(&pcd->core_if->dev_if->
131 out_ep_regs[ep->num]->doeptsiz);
132 /* Payload will be */
133 payload = deptsiz_init.b.xfersize - deptsiz_updt.b.xfersize;
134 /* Packet count is decremented every time a packet
135 * is written to the RxFIFO not in to the external memory
136 * So, if payload == 0, then it means no packet was sent to ext memory*/
138 (!payload) ? 0 : (deptsiz_init.b.pktcnt - deptsiz_updt.b.pktcnt);
139 DWC_DEBUGPL(DBG_PCDV, "Payload for EP%d-%s\n", ep->num,
140 (ep->is_in ? "IN" : "OUT"));
141 DWC_DEBUGPL(DBG_PCDV, "Number of transfered bytes = 0x%08x\n", payload);
142 DWC_DEBUGPL(DBG_PCDV, "Number of transfered packets = %d\n", pack_num);
147 static inline void print_desc(struct dwc_otg_dma_desc *ddesc,
148 const uint8_t *epname, int descnum)
151 ("%s DMA_DESC(%d) buf=0x%08x bytes=0x%04x; sp=0x%x; l=0x%x; sts=0x%02x; bs=0x%02x\n",
152 epname, descnum, ddesc->buf, ddesc->status.b.bytes,
153 ddesc->status.b.sp, ddesc->status.b.l, ddesc->status.b.sts,
159 * This function returns pointer to in ep struct with number ep_num
161 static inline dwc_otg_pcd_ep_t *get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num)
164 int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
168 for (i = 0; i < num_in_eps; ++i) {
169 if (pcd->in_ep[i].dwc_ep.num == ep_num)
170 return &pcd->in_ep[i];
177 * This function returns pointer to out ep struct with number ep_num
179 static inline dwc_otg_pcd_ep_t *get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num)
182 int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
186 for (i = 0; i < num_out_eps; ++i) {
187 if (pcd->out_ep[i].dwc_ep.num == ep_num)
188 return &pcd->out_ep[i];
195 * This functions gets a pointer to an EP from the wIndex address
196 * value of the control request.
198 dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t *pcd, u16 wIndex)
200 dwc_otg_pcd_ep_t *ep;
201 uint32_t ep_num = UE_GET_ADDR(wIndex);
205 } else if (UE_GET_DIR(wIndex) == UE_DIR_IN) { /* in ep */
206 ep = &pcd->in_ep[ep_num - 1];
208 ep = &pcd->out_ep[ep_num - 1];
215 * This function checks the EP request queue, if the queue is not
216 * empty the next request is started.
218 void start_next_request(dwc_otg_pcd_ep_t *ep)
220 dwc_otg_pcd_request_t *req = 0;
221 uint32_t max_transfer =
222 GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
225 struct dwc_otg_pcd *pcd;
229 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
230 req = DWC_CIRCLEQ_FIRST(&ep->queue);
233 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
234 ep->dwc_ep.cfi_req_len = req->length;
235 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd, ep, req);
238 /* Setup and start the Transfer */
239 if (req->dw_align_buf) {
240 ep->dwc_ep.dma_addr = req->dw_align_buf_dma;
241 ep->dwc_ep.start_xfer_buff = req->dw_align_buf;
242 ep->dwc_ep.xfer_buff = req->dw_align_buf;
244 ep->dwc_ep.dma_addr = req->dma;
245 ep->dwc_ep.start_xfer_buff = req->buf;
246 ep->dwc_ep.xfer_buff = req->buf;
248 ep->dwc_ep.sent_zlp = 0;
249 ep->dwc_ep.total_len = req->length;
250 ep->dwc_ep.xfer_len = 0;
251 ep->dwc_ep.xfer_count = 0;
253 ep->dwc_ep.maxxfer = max_transfer;
254 if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
255 uint32_t out_max_xfer = DDMA_MAX_TRANSFER_SIZE
256 - (DDMA_MAX_TRANSFER_SIZE % 4);
257 if (ep->dwc_ep.is_in) {
258 if (ep->dwc_ep.maxxfer >
259 DDMA_MAX_TRANSFER_SIZE) {
261 DDMA_MAX_TRANSFER_SIZE;
264 if (ep->dwc_ep.maxxfer > out_max_xfer) {
270 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
271 ep->dwc_ep.maxxfer -=
272 (ep->dwc_ep.maxxfer % ep->dwc_ep.maxpacket);
275 if ((ep->dwc_ep.total_len %
276 ep->dwc_ep.maxpacket == 0)
277 && (ep->dwc_ep.total_len != 0)) {
278 ep->dwc_ep.sent_zlp = 1;
285 dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
286 } else if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
287 DWC_PRINTF("There are no more ISOC requests \n");
288 ep->dwc_ep.frame_num = 0xFFFFFFFF;
293 * This function handles the SOF Interrupts. At this time the SOF
294 * Interrupt is disabled.
296 int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd)
298 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
300 gintsts_data_t gintsts;
302 DWC_DEBUGPL(DBG_PCD, "SOF\n");
304 /* Clear interrupt */
306 gintsts.b.sofintr = 1;
307 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
313 * This function handles the Rx Status Queue Level Interrupt, which
314 * indicates that there is a least one packet in the Rx FIFO. The
315 * packets are moved from the FIFO to memory, where they will be
316 * processed when the Endpoint Interrupt Register indicates Transfer
317 * Complete or SETUP Phase Done.
319 * Repeat the following until the Rx Status Queue is empty:
320 * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
322 * -# If Receive FIFO is empty then skip to step Clear the interrupt
324 * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
325 * SETUP data to the buffer
326 * -# If OUT Data Packet call dwc_otg_read_packet to copy the data
327 * to the destination buffer
329 int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd)
331 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
332 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
333 gintmsk_data_t gintmask = {.d32 = 0 };
334 device_grxsts_data_t status;
335 dwc_otg_pcd_ep_t *ep;
336 gintsts_data_t gintsts;
338 static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
341 /* DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); */
342 /* Disable the Rx Status Queue Level interrupt */
343 gintmask.b.rxstsqlvl = 1;
344 DWC_MODIFY_REG32(&global_regs->gintmsk, gintmask.d32, 0);
346 /* Get the Status from the top of the FIFO */
347 status.d32 = DWC_READ_REG32(&global_regs->grxstsp);
349 DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
350 "pktsts:%x Frame:%d(0x%0x)\n",
351 status.b.epnum, status.b.bcnt,
352 dpid_str[status.b.dpid],
353 status.b.pktsts, status.b.fn, status.b.fn);
354 /* Get pointer to EP structure */
355 ep = get_out_ep(pcd, status.b.epnum);
357 switch (status.b.pktsts) {
358 case DWC_DSTS_GOUT_NAK:
359 DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
361 case DWC_STS_DATA_UPDT:
362 DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
363 if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
364 /** @todo NGS Check for buffer overflow? */
365 dwc_otg_read_packet(core_if,
366 ep->dwc_ep.xfer_buff,
368 ep->dwc_ep.xfer_count += status.b.bcnt;
369 ep->dwc_ep.xfer_buff += status.b.bcnt;
372 case DWC_STS_XFER_COMP:
373 DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
375 case DWC_DSTS_SETUP_COMP:
377 DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
380 case DWC_DSTS_SETUP_UPDT:
381 dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
384 "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
385 pcd->setup_pkt->req.bmRequestType,
386 pcd->setup_pkt->req.bRequest,
387 UGETW(pcd->setup_pkt->req.wValue),
388 UGETW(pcd->setup_pkt->req.wIndex),
389 UGETW(pcd->setup_pkt->req.wLength));
391 ep->dwc_ep.xfer_count += status.b.bcnt;
394 DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
399 /* Enable the Rx Status Queue Level interrupt */
400 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmask.d32);
401 /* Clear interrupt */
403 gintsts.b.rxstsqlvl = 1;
404 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
406 /* DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); */
411 * This function examines the Device IN Token Learning Queue to
412 * determine the EP number of the last IN token received. This
413 * implementation is for the Mass Storage device where there are only
414 * 2 IN EPs (Control-IN and BULK-IN).
416 * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
417 * are 8 EP Numbers in each of the other possible DTKNQ Registers.
419 * @param core_if Programming view of DWC_otg controller.
422 static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if)
424 dwc_otg_device_global_regs_t *dev_global_regs =
425 core_if->dev_if->dev_global_regs;
426 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
427 /* Number of Token Queue Registers */
428 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
429 dtknq1_data_t dtknqr1;
430 uint32_t in_tkn_epnums[4];
433 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
436 /* DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); */
438 /* Read the DTKNQ Registers */
439 for (i = 0; i < DTKNQ_REG_CNT; i++) {
440 in_tkn_epnums[i] = DWC_READ_REG32(addr);
441 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
443 if (addr == &dev_global_regs->dvbusdis) {
444 addr = &dev_global_regs->dtknqr3_dthrctl;
451 /* Copy the DTKNQR1 data to the bit field. */
452 dtknqr1.d32 = in_tkn_epnums[0];
453 /* Get the EP numbers */
454 in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
455 ndx = dtknqr1.b.intknwptr - 1;
457 /* DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); */
459 /** @todo Find a simpler way to calculate the max
461 int cnt = TOKEN_Q_DEPTH;
462 if (TOKEN_Q_DEPTH <= 6) {
463 cnt = TOKEN_Q_DEPTH - 1;
464 } else if (TOKEN_Q_DEPTH <= 14) {
465 cnt = TOKEN_Q_DEPTH - 7;
466 } else if (TOKEN_Q_DEPTH <= 22) {
467 cnt = TOKEN_Q_DEPTH - 15;
469 cnt = TOKEN_Q_DEPTH - 23;
471 epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
474 epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
475 } else if (ndx <= 13) {
477 epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
478 } else if (ndx <= 21) {
480 epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
481 } else if (ndx <= 29) {
483 epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
486 /* DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); */
491 * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
492 * The active request is checked for the next packet to be loaded into
493 * the non-periodic Tx FIFO.
495 int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd)
497 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
498 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
499 dwc_otg_dev_in_ep_regs_t *ep_regs;
500 gnptxsts_data_t txstatus = {.d32 = 0 };
501 gintsts_data_t gintsts;
504 dwc_otg_pcd_ep_t *ep = 0;
508 /* Get the epnum from the IN Token Learning Queue. */
509 epnum = get_ep_of_last_in_token(core_if);
510 ep = get_in_ep(pcd, epnum);
512 DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %d \n", epnum);
514 ep_regs = core_if->dev_if->in_ep_regs[epnum];
516 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
517 if (len > ep->dwc_ep.maxpacket) {
518 len = ep->dwc_ep.maxpacket;
520 dwords = (len + 3) / 4;
522 /* While there is space in the queue and space in the FIFO and
523 * More data to tranfer, Write packets to the Tx FIFO */
524 txstatus.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
525 DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
527 while (txstatus.b.nptxqspcavail > 0 &&
528 txstatus.b.nptxfspcavail > dwords &&
529 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
531 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
532 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
534 if (len > ep->dwc_ep.maxpacket) {
535 len = ep->dwc_ep.maxpacket;
538 dwords = (len + 3) / 4;
539 txstatus.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
540 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
543 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
544 DWC_READ_REG32(&global_regs->gnptxsts));
546 /* Clear interrupt */
548 gintsts.b.nptxfempty = 1;
549 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
555 * This function is called when dedicated Tx FIFO Empty interrupt occurs.
556 * The active request is checked for the next packet to be loaded into
557 * apropriate Tx FIFO.
559 static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum)
561 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
562 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
563 dwc_otg_dev_in_ep_regs_t *ep_regs;
564 dtxfsts_data_t txstatus = {.d32 = 0 };
565 dwc_otg_pcd_ep_t *ep = 0;
569 ep = get_in_ep(pcd, epnum);
571 DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
573 ep_regs = core_if->dev_if->in_ep_regs[epnum];
575 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
577 if (len > ep->dwc_ep.maxpacket) {
578 len = ep->dwc_ep.maxpacket;
581 dwords = (len + 3) / 4;
583 /* While there is space in the queue and space in the FIFO and
584 * More data to tranfer, Write packets to the Tx FIFO */
585 txstatus.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
586 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
588 while (txstatus.b.txfspcavail > dwords &&
589 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len &&
590 ep->dwc_ep.xfer_len != 0) {
592 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
594 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
595 if (len > ep->dwc_ep.maxpacket) {
596 len = ep->dwc_ep.maxpacket;
599 dwords = (len + 3) / 4;
601 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
602 DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
606 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
607 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts));
613 * This function is called when the Device is disconnected. It stops
614 * any active requests and informs the Gadget driver of the
617 void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd)
619 int i, num_in_eps, num_out_eps;
620 dwc_otg_pcd_ep_t *ep;
621 gintmsk_data_t intr_mask = {.d32 = 0 };
623 DWC_SPINLOCK(pcd->lock);
625 num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
626 num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
628 DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
629 /* don't disconnect drivers more than once */
630 if (pcd->ep0state == EP0_DISCONNECT) {
631 DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
632 DWC_SPINUNLOCK(pcd->lock);
635 pcd->ep0state = EP0_DISCONNECT;
637 /* Reset the OTG state. */
638 dwc_otg_pcd_update_otg(pcd, 1);
640 /* Disable the NP Tx Fifo Empty Interrupt. */
641 intr_mask.b.nptxfempty = 1;
642 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
645 /* Flush the FIFOs */
646 /**@todo NGS Flush Periodic FIFOs */
647 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
648 dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd));
650 /* prevent new request submissions, kill any outstanding requests */
652 dwc_otg_request_nuke(ep);
653 /* prevent new request submissions, kill any outstanding requests */
654 for (i = 0; i < num_in_eps; i++) {
655 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i];
656 dwc_otg_request_nuke(ep);
658 /* prevent new request submissions, kill any outstanding requests */
659 for (i = 0; i < num_out_eps; i++) {
660 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i];
661 dwc_otg_request_nuke(ep);
664 /* report disconnect; the driver is already quiesced */
665 if (pcd->fops->disconnect) {
666 DWC_SPINUNLOCK(pcd->lock);
667 pcd->fops->disconnect(pcd);
668 DWC_SPINLOCK(pcd->lock);
670 DWC_SPINUNLOCK(pcd->lock);
674 * This interrupt indicates that ...
676 int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd)
678 gintmsk_data_t intr_mask = {.d32 = 0 };
679 gintsts_data_t gintsts;
681 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "i2cintr");
682 intr_mask.b.i2cintr = 1;
683 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
686 /* Clear interrupt */
688 gintsts.b.i2cintr = 1;
689 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
695 * This interrupt indicates that ...
697 int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd)
699 gintsts_data_t gintsts;
701 DWC_PRINTF("Early Suspend Detected\n");
704 /* Clear interrupt */
706 gintsts.b.erlysuspend = 1;
707 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
713 * This function configures EPO to receive SETUP packets.
715 * @todo NGS: Update the comments from the HW FS.
717 * -# Program the following fields in the endpoint specific registers
718 * for Control OUT EP 0, in order to receive a setup packet
719 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
721 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
722 * to back setup packets)
723 * - In DMA mode, DOEPDMA0 Register with a memory address to
724 * store any setup packets received
726 * @param core_if Programming view of DWC_otg controller.
727 * @param pcd Programming view of the PCD.
729 static inline void ep0_out_start(dwc_otg_core_if_t *core_if,
732 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
733 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
734 dwc_otg_dev_dma_desc_t *dma_desc;
735 depctl_data_t doepctl = {.d32 = 0 };
738 DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
739 DWC_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
741 if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
742 doepctl.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[0]->doepctl);
743 if (doepctl.b.epena) {
748 doeptsize0.b.supcnt = 3;
749 doeptsize0.b.pktcnt = 1;
750 doeptsize0.b.xfersize = 8 * 3;
752 if (core_if->dma_enable) {
753 if (!core_if->dma_desc_enable) {
754 /** put here as for Hermes mode deptisz register should not be written */
755 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
758 /** @todo dma needs to handle multiple setup packets (up to 3) */
759 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
760 pcd->setup_pkt_dma_handle);
762 dev_if->setup_desc_index =
763 (dev_if->setup_desc_index + 1) & 1;
765 dev_if->setup_desc_addr[dev_if->setup_desc_index];
767 /** DMA Descriptor Setup */
768 dma_desc->status.b.bs = BS_HOST_BUSY;
769 if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
770 dma_desc->status.b.sr = 0;
771 dma_desc->status.b.mtrf = 0;
773 dma_desc->status.b.l = 1;
774 dma_desc->status.b.ioc = 1;
775 dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket;
776 dma_desc->buf = pcd->setup_pkt_dma_handle;
777 dma_desc->status.b.sts = 0;
778 dma_desc->status.b.bs = BS_HOST_READY;
780 /** DOEPDMA0 Register write */
781 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
782 dev_if->dma_setup_desc_addr
783 [dev_if->setup_desc_index]);
787 /** put here as for Hermes mode deptisz register should not be written */
788 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
792 /** DOEPCTL0 Register write cnak will be set after setup interrupt */
795 if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
797 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
799 DWC_MODIFY_REG32(&dev_if->out_ep_regs[0]->doepctl, 0,
804 DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
805 DWC_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
806 DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
807 DWC_READ_REG32(&dev_if->in_ep_regs[0]->diepctl));
812 * This interrupt occurs when a USB Reset is detected. When the USB
813 * Reset Interrupt occurs the device state is set to DEFAULT and the
814 * EP0 state is set to IDLE.
815 * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
816 * -# Unmask the following interrupt bits
817 * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
818 * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
819 * - DOEPMSK.SETUP = 1
820 * - DOEPMSK.XferCompl = 1
821 * - DIEPMSK.XferCompl = 1
822 * - DIEPMSK.TimeOut = 1
823 * -# Program the following fields in the endpoint specific registers
824 * for Control OUT EP 0, in order to receive a setup packet
825 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
827 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
828 * to back setup packets)
829 * - In DMA mode, DOEPDMA0 Register with a memory address to
830 * store any setup packets received
831 * At this point, all the required initialization, except for enabling
832 * the control 0 OUT endpoint is done, for receiving SETUP packets.
834 int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t *pcd)
836 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
837 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
838 depctl_data_t doepctl = {.d32 = 0 };
839 depctl_data_t diepctl = {.d32 = 0 };
840 daint_data_t daintmsk = {.d32 = 0 };
841 doepmsk_data_t doepmsk = {.d32 = 0 };
842 diepmsk_data_t diepmsk = {.d32 = 0 };
843 dcfg_data_t dcfg = {.d32 = 0 };
844 grstctl_t resetctl = {.d32 = 0 };
845 dctl_data_t dctl = {.d32 = 0 };
847 gintsts_data_t gintsts;
848 pcgcctl_data_t power = {.d32 = 0 };
850 power.d32 = DWC_READ_REG32(core_if->pcgcctl);
851 if (power.b.stoppclk) {
853 power.b.stoppclk = 1;
854 DWC_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
857 DWC_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
859 power.b.rstpdwnmodule = 1;
860 DWC_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
863 core_if->lx_state = DWC_OTG_L0;
864 core_if->otg_sts = 0;
866 DWC_PRINTF("USB RESET\n");
868 for (i = 1; i < 16; ++i) {
869 dwc_otg_pcd_ep_t *ep;
871 ep = get_in_ep(pcd, i);
873 dwc_ep = &ep->dwc_ep;
874 dwc_ep->next_frame = 0xffffffff;
877 #endif /* DWC_EN_ISOC */
879 /* reset the HNP settings */
880 dwc_otg_pcd_update_otg(pcd, 1);
882 /* Clear the Remote Wakeup Signalling */
883 dctl.b.rmtwkupsig = 1;
884 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
886 /* Set NAK for all OUT EPs */
888 for (i = 0; i <= dev_if->num_out_eps; i++) {
889 DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
892 /* Flush the NP Tx FIFO */
893 dwc_otg_flush_tx_fifo(core_if, 0x10);
894 /* Flush the Learning Queue */
895 resetctl.b.intknqflsh = 1;
896 DWC_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
898 if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
899 core_if->start_predict = 0;
900 for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
901 core_if->nextep_seq[i] = 0xff; /*0xff - EP not active */
903 core_if->nextep_seq[0] = 0;
904 core_if->first_in_nextep_seq = 0;
905 diepctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
906 diepctl.b.nextep = 0;
907 DWC_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
909 /* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
910 dcfg.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dcfg);
912 DWC_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
914 DWC_DEBUGPL(DBG_PCDV,
915 "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
916 __func__, core_if->first_in_nextep_seq);
917 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
918 DWC_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
922 if (core_if->multiproc_int_enable) {
923 daintmsk.b.inep0 = 1;
924 daintmsk.b.outep0 = 1;
925 DWC_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk,
929 doepmsk.b.xfercompl = 1;
930 doepmsk.b.ahberr = 1;
931 doepmsk.b.epdisabled = 1;
933 if ((core_if->dma_desc_enable) ||
935 && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
936 doepmsk.b.stsphsercvd = 1;
938 if (core_if->dma_desc_enable)
941 doepmsk.b.babble = 1;
944 if (core_if->dma_enable) {
948 DWC_WRITE_REG32(&dev_if->dev_global_regs->doepeachintmsk[0],
951 diepmsk.b.xfercompl = 1;
952 diepmsk.b.timeout = 1;
953 diepmsk.b.epdisabled = 1;
954 diepmsk.b.ahberr = 1;
955 diepmsk.b.intknepmis = 1;
956 if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
957 diepmsk.b.intknepmis = 0;
959 /* if (core_if->dma_desc_enable) {
964 if (core_if->dma_enable) {
968 DWC_WRITE_REG32(&dev_if->dev_global_regs->diepeachintmsk[0],
971 daintmsk.b.inep0 = 1;
972 daintmsk.b.outep0 = 1;
973 DWC_WRITE_REG32(&dev_if->dev_global_regs->daintmsk,
977 doepmsk.b.xfercompl = 1;
978 doepmsk.b.ahberr = 1;
979 doepmsk.b.epdisabled = 1;
981 if ((core_if->dma_desc_enable) ||
983 && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
984 doepmsk.b.stsphsercvd = 1;
986 if (core_if->dma_desc_enable)
988 DWC_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
990 diepmsk.b.xfercompl = 1;
991 diepmsk.b.timeout = 1;
992 diepmsk.b.epdisabled = 1;
993 diepmsk.b.ahberr = 1;
994 if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
995 diepmsk.b.intknepmis = 0;
997 if (core_if->dma_desc_enable) {
1002 DWC_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
1005 /* Reset Device Address */
1006 dcfg.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dcfg);
1008 DWC_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
1010 /* setup EP0 to receive SETUP packets */
1011 if (core_if->snpsid <= OTG_CORE_REV_2_94a)
1012 ep0_out_start(core_if, pcd);
1014 /* Clear interrupt */
1016 gintsts.b.usbreset = 1;
1017 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
1023 * Get the device speed from the device status register and convert it
1024 * to USB speed constant.
1026 * @param core_if Programming view of DWC_otg controller.
1028 static int get_device_speed(dwc_otg_core_if_t *core_if)
1032 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
1034 switch (dsts.b.enumspd) {
1035 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
1036 speed = USB_SPEED_HIGH;
1038 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
1039 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
1040 speed = USB_SPEED_FULL;
1043 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
1044 speed = USB_SPEED_LOW;
1052 * Read the device status register and set the device speed in the
1054 * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
1056 int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd)
1058 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1059 gintsts_data_t gintsts;
1060 gusbcfg_data_t gusbcfg;
1061 dwc_otg_core_global_regs_t *global_regs =
1062 GET_CORE_IF(pcd)->core_global_regs;
1063 uint8_t utmi16b, utmi8b;
1066 DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
1068 if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_2_60a) {
1069 utmi16b = 5; /* vahrama old value was 6; */
1075 dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep);
1076 if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a) {
1077 ep0_out_start(GET_CORE_IF(pcd), pcd);
1080 print_ep0_state(pcd);
1083 if (pcd->ep0state == EP0_DISCONNECT) {
1084 pcd->ep0state = EP0_IDLE;
1085 } else if (pcd->ep0state == EP0_STALL) {
1086 pcd->ep0state = EP0_IDLE;
1089 pcd->ep0state = EP0_IDLE;
1093 speed = get_device_speed(GET_CORE_IF(pcd));
1094 pcd->fops->connect(pcd, speed);
1096 /* Set USB turnaround time based on device speed and PHY interface. */
1097 gusbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
1098 if (speed == USB_SPEED_HIGH) {
1099 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1100 DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
1101 /* ULPI interface */
1102 gusbcfg.b.usbtrdtim = 9;
1104 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1105 DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
1106 /* UTMI+ interface */
1107 if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
1108 gusbcfg.b.usbtrdtim = utmi8b;
1109 } else if (GET_CORE_IF(pcd)->hwcfg4.
1110 b.utmi_phy_data_width == 1) {
1111 gusbcfg.b.usbtrdtim = utmi16b;
1112 } else if (GET_CORE_IF(pcd)->
1113 core_params->phy_utmi_width == 8) {
1114 gusbcfg.b.usbtrdtim = utmi8b;
1116 gusbcfg.b.usbtrdtim = utmi16b;
1119 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1120 DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
1121 /* UTMI+ OR ULPI interface */
1122 if (gusbcfg.b.ulpi_utmi_sel == 1) {
1123 /* ULPI interface */
1124 gusbcfg.b.usbtrdtim = 9;
1126 /* UTMI+ interface */
1127 if (GET_CORE_IF(pcd)->
1128 core_params->phy_utmi_width == 16) {
1129 gusbcfg.b.usbtrdtim = utmi16b;
1131 gusbcfg.b.usbtrdtim = utmi8b;
1136 /* Full or low speed */
1137 gusbcfg.b.usbtrdtim = 9;
1139 DWC_WRITE_REG32(&global_regs->gusbcfg, gusbcfg.d32);
1141 /* Clear interrupt */
1143 gintsts.b.enumdone = 1;
1144 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1150 * This interrupt indicates that the ISO OUT Packet was dropped due to
1151 * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
1152 * read all the data from the Rx FIFO.
1154 int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd)
1156 gintmsk_data_t intr_mask = {.d32 = 0 };
1157 gintsts_data_t gintsts;
1159 DWC_WARN("INTERRUPT Handler not implemented for %s\n",
1160 "ISOC Out Dropped");
1162 intr_mask.b.isooutdrop = 1;
1163 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1166 /* Clear interrupt */
1168 gintsts.b.isooutdrop = 1;
1169 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1176 * This interrupt indicates the end of the portion of the micro-frame
1177 * for periodic transactions. If there is a periodic transaction for
1178 * the next frame, load the packets into the EP periodic Tx FIFO.
1180 int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd)
1182 gintmsk_data_t intr_mask = {.d32 = 0 };
1183 gintsts_data_t gintsts;
1184 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "EOP");
1186 intr_mask.b.eopframe = 1;
1187 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1190 /* Clear interrupt */
1192 gintsts.b.eopframe = 1;
1193 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1200 * This interrupt indicates that EP of the packet on the top of the
1201 * non-periodic Tx FIFO does not match EP of the IN Token received.
1203 * The "Device IN Token Queue" Registers are read to determine the
1204 * order the IN Tokens have been received. The non-periodic Tx FIFO
1205 * is flushed, so it can be reloaded in the order seen in the IN Token
1208 int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_pcd_t *pcd)
1210 gintsts_data_t gintsts;
1211 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1213 gintmsk_data_t intr_mask = {.d32 = 0 };
1215 if (!core_if->en_multiple_tx_fifo && core_if->dma_enable) {
1216 core_if->start_predict = 1;
1218 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1221 DWC_READ_REG32(&core_if->core_global_regs->gintsts);
1222 if (!gintsts.b.ginnakeff) {
1223 /* Disable EP Mismatch interrupt */
1225 intr_mask.b.epmismatch = 1;
1226 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
1228 /* Enable the Global IN NAK Effective Interrupt */
1230 intr_mask.b.ginnakeff = 1;
1231 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
1233 /* Set the global non-periodic IN NAK handshake */
1235 DWC_READ_REG32(&core_if->dev_if->
1236 dev_global_regs->dctl);
1237 dctl.b.sgnpinnak = 1;
1238 DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
1242 ("gintsts.b.ginnakeff = 1! dctl.b.sgnpinnak not set\n");
1244 /* Disabling of all EP's will be done in dwc_otg_pcd_handle_in_nak_effective()
1245 * handler after Global IN NAK Effective interrupt will be asserted */
1247 /* Clear interrupt */
1249 gintsts.b.epmismatch = 1;
1250 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
1256 * This interrupt is valid only in DMA mode. This interrupt indicates that the
1257 * core has stopped fetching data for IN endpoints due to the unavailability of
1258 * TxFIFO space or Request Queue space. This interrupt is used by the
1259 * application for an endpoint mismatch algorithm.
1261 * @param pcd The PCD
1263 int32_t dwc_otg_pcd_handle_ep_fetsusp_intr(dwc_otg_pcd_t *pcd)
1265 gintsts_data_t gintsts;
1266 gintmsk_data_t gintmsk_data;
1268 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1269 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1271 /* Clear the global non-periodic IN NAK handshake */
1273 dctl.b.cgnpinnak = 1;
1274 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,
1277 /* Mask GINTSTS.FETSUSP interrupt */
1278 gintmsk_data.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
1279 gintmsk_data.b.fetsusp = 0;
1280 DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk_data.d32);
1282 /* Clear interrupt */
1284 gintsts.b.fetsusp = 1;
1285 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
1291 * This funcion stalls EP0.
1293 static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val)
1295 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1296 usb_device_request_t *ctrl = &pcd->setup_pkt->req;
1297 DWC_WARN("req %02x.%02x protocol STALL; err %d\n",
1298 ctrl->bmRequestType, ctrl->bRequest, err_val);
1300 ep0->dwc_ep.is_in = 1;
1301 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->dwc_ep);
1302 ep0->dwc_ep.is_in = 0;
1303 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->dwc_ep);
1304 pcd->ep0.stopped = 1;
1305 pcd->ep0state = EP0_IDLE;
1306 ep0_out_start(GET_CORE_IF(pcd), pcd);
1310 * This functions delegates the setup command to the gadget driver.
1312 static inline void do_gadget_setup(dwc_otg_pcd_t *pcd,
1313 usb_device_request_t *ctrl)
1316 DWC_SPINUNLOCK(pcd->lock);
1317 ret = pcd->fops->setup(pcd, (uint8_t *) ctrl);
1318 if (spin_is_locked((spinlock_t *) pcd->lock))
1319 DWC_WARN("%s warning: pcd->lock locked without unlock\n",
1321 DWC_SPINLOCK(pcd->lock);
1323 ep0_do_stall(pcd, ret);
1326 /** @todo This is a g_file_storage gadget driver specific
1327 * workaround: a DELAYED_STATUS result from the fsg_setup
1328 * routine will result in the gadget queueing a EP0 IN status
1329 * phase for a two-stage control transfer. Exactly the same as
1330 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
1331 * specific request. Need a generic way to know when the gadget
1332 * driver will queue the status phase. Can we assume when we
1333 * call the gadget driver setup() function that it will always
1334 * queue and require the following flag? Need to look into
1338 if (ret == 256 + 999) {
1339 pcd->request_config = 1;
1345 * This functions delegates the CFI setup commands to the gadget driver.
1346 * This function will return a negative value to indicate a failure.
1348 static inline int cfi_gadget_setup(dwc_otg_pcd_t *pcd,
1349 struct cfi_usb_ctrlrequest *ctrl_req)
1353 if (pcd->fops && pcd->fops->cfi_setup) {
1354 DWC_SPINUNLOCK(pcd->lock);
1355 ret = pcd->fops->cfi_setup(pcd, ctrl_req);
1356 DWC_SPINLOCK(pcd->lock);
1358 ep0_do_stall(pcd, ret);
1368 * This function starts the Zero-Length Packet for the IN status phase
1369 * of a 2 stage control transfer.
1371 static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd)
1373 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1374 if (pcd->ep0state == EP0_STALL) {
1378 pcd->ep0state = EP0_IN_STATUS_PHASE;
1380 /* Prepare for more SETUP Packets */
1381 DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
1382 if ((GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a)
1383 && (pcd->core_if->dma_desc_enable)
1384 && (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len)) {
1385 DWC_DEBUGPL(DBG_PCDV,
1386 "Data terminated wait next packet in out_desc_addr\n");
1387 pcd->backup_buf = phys_to_virt(ep0->dwc_ep.dma_addr);
1388 pcd->data_terminated = 1;
1390 ep0->dwc_ep.xfer_len = 0;
1391 ep0->dwc_ep.xfer_count = 0;
1392 ep0->dwc_ep.is_in = 1;
1393 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1394 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1396 /* Prepare for more SETUP Packets */
1397 /* ep0_out_start(GET_CORE_IF(pcd), pcd); */
1401 * This function starts the Zero-Length Packet for the OUT status phase
1402 * of a 2 stage control transfer.
1404 static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd)
1406 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1407 if (pcd->ep0state == EP0_STALL) {
1408 DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
1411 pcd->ep0state = EP0_OUT_STATUS_PHASE;
1413 DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
1414 ep0->dwc_ep.xfer_len = 0;
1415 ep0->dwc_ep.xfer_count = 0;
1416 ep0->dwc_ep.is_in = 0;
1417 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1418 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1420 /* Prepare for more SETUP Packets */
1421 if (GET_CORE_IF(pcd)->dma_enable == 0) {
1422 ep0_out_start(GET_CORE_IF(pcd), pcd);
1427 * Clear the EP halt (STALL) and if pending requests start the
1430 static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep)
1432 if (ep->dwc_ep.stall_clear_flag) {
1433 /* Start Control Status Phase */
1434 do_setup_in_status_phase(pcd);
1438 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1440 /* Reactive the EP */
1441 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1444 /* If there is a request in the EP queue start it */
1446 /** @todo FIXME: this causes an EP mismatch in DMA mode.
1447 * epmismatch not yet implemented. */
1450 * Above fixme is solved by implmenting a tasklet to call the
1451 * start_next_request(), outside of interrupt context at some
1452 * time after the current time, after a clear-halt setup packet.
1453 * Still need to implement ep mismatch in the future if a gadget
1454 * ever uses more than one endpoint at once
1457 DWC_TASK_SCHEDULE(pcd->start_xfer_tasklet);
1459 /* Start Control Status Phase */
1460 do_setup_in_status_phase(pcd);
1464 * This function is called when the SET_FEATURE TEST_MODE Setup packet
1465 * is sent from the host. The Device Control register is written with
1466 * the Test Mode bits set to the specified Test Mode. This is done as
1467 * a tasklet so that the "Status" phase of the control transfer
1468 * completes before transmitting the TEST packets.
1470 * @todo This has not been tested since the tasklet struct was put
1471 * into the PCD struct!
1474 void do_test_mode(void *data)
1477 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1478 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1479 int test_mode = pcd->test_mode;
1481 /* DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); */
1483 dctl.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
1484 switch (test_mode) {
1485 case 1: /* TEST_J */
1489 case 2: /* TEST_K */
1493 case 3: /* TEST_SE0_NAK */
1497 case 4: /* TEST_PACKET */
1501 case 5: /* TEST_FORCE_ENABLE */
1505 dwc_otg_set_hnpreq(core_if, 1);
1507 DWC_PRINTF("test mode = %d\n", test_mode);
1508 core_if->test_mode = test_mode;
1509 DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
1513 * This function process the GET_STATUS Setup Commands.
1515 static inline void do_get_status(dwc_otg_pcd_t *pcd)
1517 usb_device_request_t ctrl = pcd->setup_pkt->req;
1518 dwc_otg_pcd_ep_t *ep;
1519 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1520 uint16_t *status = pcd->status_buf;
1521 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1524 DWC_DEBUGPL(DBG_PCD,
1525 "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
1526 ctrl.bmRequestType, ctrl.bRequest,
1527 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1528 UGETW(ctrl.wLength));
1531 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1533 if (UGETW(ctrl.wIndex) == 0xF000) { /* OTG Status selector */
1534 DWC_PRINTF("wIndex - %d\n", UGETW(ctrl.wIndex));
1535 DWC_PRINTF("OTG VERSION - %d\n", core_if->otg_ver);
1536 DWC_PRINTF("OTG CAP - %d, %d\n",
1537 core_if->core_params->otg_cap,
1538 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
1539 if (core_if->otg_ver == 1
1540 && core_if->core_params->otg_cap ==
1541 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1542 uint8_t *otgsts = (uint8_t *) pcd->status_buf;
1543 *otgsts = (core_if->otg_sts & 0x1);
1544 pcd->ep0_pending = 1;
1545 ep0->dwc_ep.start_xfer_buff =
1547 ep0->dwc_ep.xfer_buff = (uint8_t *) otgsts;
1548 ep0->dwc_ep.dma_addr =
1549 pcd->status_buf_dma_handle;
1550 ep0->dwc_ep.xfer_len = 1;
1551 ep0->dwc_ep.xfer_count = 0;
1552 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1553 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
1557 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1562 *status = 0x1; /* Self powered */
1563 *status |= pcd->remote_wakeup_enable << 1;
1571 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1572 if (ep == 0 || UGETW(ctrl.wLength) > 2) {
1573 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1576 /** @todo check for EP stall */
1577 *status = ep->stopped;
1580 pcd->ep0_pending = 1;
1581 ep0->dwc_ep.start_xfer_buff = (uint8_t *) status;
1582 ep0->dwc_ep.xfer_buff = (uint8_t *) status;
1583 ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle;
1584 ep0->dwc_ep.xfer_len = 2;
1585 ep0->dwc_ep.xfer_count = 0;
1586 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1587 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1591 * This function process the SET_FEATURE Setup Commands.
1593 static inline void do_set_feature(dwc_otg_pcd_t *pcd)
1595 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1596 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1597 usb_device_request_t ctrl = pcd->setup_pkt->req;
1598 dwc_otg_pcd_ep_t *ep = 0;
1599 int32_t otg_cap_param = core_if->core_params->otg_cap;
1600 gotgctl_data_t gotgctl = {.d32 = 0 };
1602 DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1603 ctrl.bmRequestType, ctrl.bRequest,
1604 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1605 UGETW(ctrl.wLength));
1606 DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
1608 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1610 switch (UGETW(ctrl.wValue)) {
1611 case UF_DEVICE_REMOTE_WAKEUP:
1612 pcd->remote_wakeup_enable = 1;
1616 /* Setup the Test Mode tasklet to do the Test
1617 * Packet generation after the SETUP Status
1618 * phase has completed. */
1620 /** @todo This has not been tested since the
1621 * tasklet struct was put into the PCD
1623 pcd->test_mode = UGETW(ctrl.wIndex) >> 8;
1624 DWC_TASK_SCHEDULE(pcd->test_mode_tasklet);
1627 case UF_DEVICE_B_HNP_ENABLE:
1628 DWC_DEBUGPL(DBG_PCDV,
1629 "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
1631 /* dev may initiate HNP */
1632 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1633 gotgctl.b.devhnpen = 1;
1634 if (core_if->otg_ver == 1)
1635 DWC_MODIFY_REG32(&global_regs->gotgctl,
1638 pcd->b_hnp_enable = 1;
1639 dwc_otg_pcd_update_otg(pcd, 0);
1640 DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
1641 /**@todo Is the gotgctl.devhnpen cleared
1642 * by a USB Reset? */
1643 gotgctl.b.hnpreq = 1;
1644 DWC_WRITE_REG32(&global_regs->gotgctl,
1648 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1653 case UF_DEVICE_A_HNP_SUPPORT:
1654 /* RH port supports HNP */
1655 DWC_DEBUGPL(DBG_PCDV,
1656 "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
1657 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1658 pcd->a_hnp_support = 1;
1659 dwc_otg_pcd_update_otg(pcd, 0);
1661 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1666 case UF_DEVICE_A_ALT_HNP_SUPPORT:
1667 /* other RH port does */
1668 DWC_DEBUGPL(DBG_PCDV,
1669 "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
1670 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1671 pcd->a_alt_hnp_support = 1;
1672 dwc_otg_pcd_update_otg(pcd, 0);
1674 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1680 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1684 do_setup_in_status_phase(pcd);
1688 do_gadget_setup(pcd, &ctrl);
1692 if (UGETW(ctrl.wValue) == UF_ENDPOINT_HALT) {
1693 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1695 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1699 dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
1701 do_setup_in_status_phase(pcd);
1707 * This function process the CLEAR_FEATURE Setup Commands.
1709 static inline void do_clear_feature(dwc_otg_pcd_t *pcd)
1711 usb_device_request_t ctrl = pcd->setup_pkt->req;
1712 dwc_otg_pcd_ep_t *ep = 0;
1714 DWC_DEBUGPL(DBG_PCD,
1715 "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1716 ctrl.bmRequestType, ctrl.bRequest,
1717 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1718 UGETW(ctrl.wLength));
1720 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1722 switch (UGETW(ctrl.wValue)) {
1723 case UF_DEVICE_REMOTE_WAKEUP:
1724 pcd->remote_wakeup_enable = 0;
1728 /** @todo Add CLEAR_FEATURE for TEST modes. */
1732 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1735 do_setup_in_status_phase(pcd);
1739 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1741 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1745 pcd_clear_halt(pcd, ep);
1752 * This function process the SET_ADDRESS Setup Commands.
1754 static inline void do_set_address(dwc_otg_pcd_t *pcd)
1756 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1757 usb_device_request_t ctrl = pcd->setup_pkt->req;
1759 if (ctrl.bmRequestType == UT_DEVICE) {
1760 dcfg_data_t dcfg = {.d32 = 0 };
1763 /* DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); */
1765 dcfg.b.devaddr = UGETW(ctrl.wValue);
1766 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
1767 do_setup_in_status_phase(pcd);
1772 * This function processes SETUP commands. In Linux, the USB Command
1773 * processing is done in two places - the first being the PCD and the
1774 * second in the Gadget Driver (for example, the File-Backed Storage
1778 * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
1780 * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
1781 * defined in chapter 9 of the USB 2.0 Specification chapter 9
1784 * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1785 * requests are the ENDPOINT_HALT feature is procesed, all others the
1786 * interface requests are ignored.</td></tr>
1788 * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1789 * requests are processed by the PCD. Interface requests are passed
1790 * to the Gadget Driver.</td></tr>
1792 * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
1793 * with device address received </td></tr>
1795 * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
1796 * requested descriptor</td></tr>
1798 * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
1799 * not implemented by any of the existing Gadget Drivers.</td></tr>
1801 * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
1802 * all EPs and enable EPs for new configuration.</td></tr>
1804 * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
1805 * the current configuration</td></tr>
1807 * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
1808 * EPs and enable EPs for new configuration.</td></tr>
1810 * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
1811 * current interface.</td></tr>
1813 * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
1814 * message.</td></tr>
1817 * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
1818 * processed by pcd_setup. Calling the Function Driver's setup function from
1819 *pcd_setup processes the gadget SETUP commands.
1821 static inline void pcd_setup(dwc_otg_pcd_t *pcd)
1823 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1824 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1825 usb_device_request_t ctrl = pcd->setup_pkt->req;
1826 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1828 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
1832 struct cfi_usb_ctrlrequest cfi_req;
1835 doeptsize0.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[0]->doeptsiz);
1837 /** In BDMA more then 1 setup packet is not supported till 3.00a */
1838 if (core_if->dma_enable && core_if->dma_desc_enable == 0
1839 && (doeptsize0.b.supcnt < 2)
1840 && (core_if->snpsid < OTG_CORE_REV_2_94a)) {
1842 ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n");
1844 if ((core_if->snpsid >= OTG_CORE_REV_3_00a)
1845 && (core_if->dma_enable == 1) && (core_if->dma_desc_enable == 0)) {
1848 (3 - doeptsize0.b.supcnt - 1 +
1849 ep0->dwc_ep.stp_rollover))->req;
1852 DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1853 ctrl.bmRequestType, ctrl.bRequest,
1854 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1855 UGETW(ctrl.wLength));
1858 /* Clean up the request queue */
1859 dwc_otg_request_nuke(ep0);
1862 if (ctrl.bmRequestType & UE_DIR_IN) {
1863 ep0->dwc_ep.is_in = 1;
1864 pcd->ep0state = EP0_IN_DATA_PHASE;
1866 ep0->dwc_ep.is_in = 0;
1867 pcd->ep0state = EP0_OUT_DATA_PHASE;
1870 if (UGETW(ctrl.wLength) == 0) {
1871 ep0->dwc_ep.is_in = 1;
1872 pcd->ep0state = EP0_IN_STATUS_PHASE;
1875 if (UT_GET_TYPE(ctrl.bmRequestType) != UT_STANDARD) {
1878 DWC_MEMCPY(&cfi_req, &ctrl, sizeof(usb_device_request_t));
1880 /* printk(KERN_ALERT "CFI: req_type=0x%02x; req=0x%02x\n",
1881 * ctrl.bRequestType, ctrl.bRequest); */
1882 if (UT_GET_TYPE(cfi_req.bRequestType) == UT_VENDOR) {
1883 if (cfi_req.bRequest > 0xB0 && cfi_req.bRequest < 0xBF) {
1884 retval = cfi_setup(pcd, &cfi_req);
1886 ep0_do_stall(pcd, retval);
1887 pcd->ep0_pending = 0;
1891 /* if need gadget setup then call it and check the retval */
1892 if (pcd->cfi->need_gadget_att) {
1894 cfi_gadget_setup(pcd,
1898 pcd->ep0_pending = 0;
1903 if (pcd->cfi->need_status_in_complete) {
1904 do_setup_in_status_phase(pcd);
1911 /* handle non-standard (class/vendor) requests in the gadget driver */
1912 do_gadget_setup(pcd, &ctrl);
1916 /** @todo NGS: Handle bad setup packet? */
1918 /* --- Standard Request handling --- */
1920 switch (ctrl.bRequest) {
1925 case UR_CLEAR_FEATURE:
1926 do_clear_feature(pcd);
1929 case UR_SET_FEATURE:
1930 do_set_feature(pcd);
1933 case UR_SET_ADDRESS:
1934 do_set_address(pcd);
1937 case UR_SET_INTERFACE:
1939 /* _pcd->request_config = 1; */ /* Configuration changed */
1940 do_gadget_setup(pcd, &ctrl);
1943 case UR_SYNCH_FRAME:
1944 do_gadget_setup(pcd, &ctrl);
1948 /* Call the Gadget Driver's setup functions */
1949 do_gadget_setup(pcd, &ctrl);
1955 * This function completes the ep0 control transfer.
1957 static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep)
1959 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1960 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1961 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1962 dev_if->in_ep_regs[ep->dwc_ep.num];
1964 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
1965 dev_if->out_ep_regs[ep->dwc_ep.num];
1967 deptsiz0_data_t deptsiz;
1968 dev_dma_desc_sts_t desc_sts;
1969 dwc_otg_pcd_request_t *req;
1971 dwc_otg_pcd_t *pcd = ep->pcd;
1974 struct cfi_usb_ctrlrequest *ctrlreq;
1975 int retval = -DWC_E_NOT_SUPPORTED;
1978 if (pcd->ep0_pending && DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1979 if (ep->dwc_ep.is_in) {
1981 DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
1983 do_setup_out_status_phase(pcd);
1986 DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
1990 ctrlreq = &pcd->cfi->ctrl_req;
1992 if (UT_GET_TYPE(ctrlreq->bRequestType) == UT_VENDOR) {
1993 if (ctrlreq->bRequest > 0xB0
1994 && ctrlreq->bRequest < 0xBF) {
1996 /* Return if the PCD failed to handle the request */
1997 retval = pcd->cfi->ops.
1998 ctrl_write_complete(pcd->cfi, pcd);
2001 ("ERROR setting a new value in the PCD(%d)\n",
2003 ep0_do_stall(pcd, retval);
2004 pcd->ep0_pending = 0;
2008 /* If the gadget needs to be notified on the request */
2009 if (pcd->cfi->need_gadget_att == 1) {
2010 /* retval = do_gadget_setup(pcd, &pcd->cfi->ctrl_req); */
2012 cfi_gadget_setup(pcd,
2016 /* Return from the function if the gadget failed to process
2017 * the request properly - this should never happen !!!
2021 ("ERROR setting a new value in the gadget(%d)\n",
2023 pcd->ep0_pending = 0;
2028 CFI_INFO("%s: RETVAL=%d\n", __func__,
2030 /* If we hit here then the PCD and the gadget has properly
2031 * handled the request - so send the ZLP IN to the host.
2033 /* @todo: MAS - decide whether we need to start the setup
2034 * stage based on the need_setup value of the cfi object
2036 do_setup_in_status_phase(pcd);
2037 pcd->ep0_pending = 0;
2043 do_setup_in_status_phase(pcd);
2045 pcd->ep0_pending = 0;
2049 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2052 req = DWC_CIRCLEQ_FIRST(&ep->queue);
2054 if (pcd->ep0state == EP0_OUT_STATUS_PHASE
2055 || pcd->ep0state == EP0_IN_STATUS_PHASE) {
2057 } else if (ep->dwc_ep.is_in) {
2058 deptsiz.d32 = DWC_READ_REG32(&in_ep_regs->dieptsiz);
2059 if (core_if->dma_desc_enable != 0)
2060 desc_sts = dev_if->in_desc_addr->status;
2062 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xfersize=%d pktcnt=%d\n",
2063 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
2064 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2067 if (((core_if->dma_desc_enable == 0)
2068 && (deptsiz.b.xfersize == 0))
2069 || ((core_if->dma_desc_enable != 0)
2070 && (desc_sts.b.bytes == 0))) {
2071 req->actual = ep->dwc_ep.xfer_count;
2072 /* Is a Zero Len Packet needed? */
2073 if (req->sent_zlp) {
2075 DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
2079 do_setup_out_status_phase(pcd);
2084 deptsiz.d32 = DWC_READ_REG32(&out_ep_regs->doeptsiz);
2085 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xsize=%d pktcnt=%d\n",
2086 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
2087 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2089 req->actual = ep->dwc_ep.xfer_count;
2091 /* Is a Zero Len Packet needed? */
2092 if (req->sent_zlp) {
2094 DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
2098 /* For older cores do setup in status phase in Slave/BDMA modes,
2099 * starting from 3.00 do that only in slave, and for DMA modes
2100 * just re-enable ep 0 OUT here*/
2101 if (core_if->dma_enable == 0
2102 || (core_if->dma_desc_enable == 0
2103 && core_if->snpsid <= OTG_CORE_REV_2_94a)) {
2104 do_setup_in_status_phase(pcd);
2105 } else if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
2106 DWC_DEBUGPL(DBG_PCDV,
2107 "Enable out ep before in status phase\n");
2108 ep0_out_start(core_if, pcd);
2112 /* Complete the request */
2114 dwc_otg_request_done(ep, req, 0);
2115 ep->dwc_ep.start_xfer_buff = 0;
2116 ep->dwc_ep.xfer_buff = 0;
2117 ep->dwc_ep.xfer_len = 0;
2125 * This function calculates traverses all the CFI DMA descriptors and
2126 * and accumulates the bytes that are left to be transfered.
2128 * @return The total bytes left to transfered, or a negative value as failure
2130 static inline int cfi_calc_desc_residue(dwc_otg_pcd_ep_t *ep)
2134 struct dwc_otg_dma_desc *ddesc = NULL;
2135 struct cfi_ep *cfiep;
2137 /* See if the pcd_ep has its respective cfi_ep mapped */
2138 cfiep = get_cfi_ep_by_pcd_ep(ep->pcd->cfi, ep);
2140 CFI_INFO("%s: Failed to find ep\n", __func__);
2144 ddesc = ep->dwc_ep.descs;
2146 for (i = 0; (i < cfiep->desc_count) && (i < MAX_DMA_DESCS_PER_EP); i++) {
2148 #if defined(PRINT_CFI_DMA_DESCS)
2149 print_desc(ddesc, ep->ep.name, i);
2151 ret += ddesc->status.b.bytes;
2156 CFI_INFO("!!!!!!!!!! WARNING (%s) - residue=%d\n", __func__,
2164 * This function completes the request for the EP. If there are
2165 * additional requests for the EP in the queue they will be started.
2167 static void complete_ep(dwc_otg_pcd_ep_t *ep)
2169 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
2170 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2171 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
2172 dev_if->in_ep_regs[ep->dwc_ep.num];
2173 deptsiz_data_t deptsiz;
2174 dev_dma_desc_sts_t desc_sts;
2175 dwc_otg_pcd_request_t *req = 0;
2176 dwc_otg_dev_dma_desc_t *dma_desc;
2177 uint32_t byte_count = 0;
2181 DWC_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->dwc_ep.num,
2182 (ep->dwc_ep.is_in ? "IN" : "OUT"));
2184 /* Get any pending requests */
2185 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2186 req = DWC_CIRCLEQ_FIRST(&ep->queue);
2188 DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
2192 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
2196 DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
2198 if (ep->dwc_ep.is_in) {
2199 deptsiz.d32 = DWC_READ_REG32(&in_ep_regs->dieptsiz);
2201 if (core_if->dma_enable) {
2202 if (core_if->dma_desc_enable == 0) {
2203 if (deptsiz.b.xfersize == 0
2204 && deptsiz.b.pktcnt == 0) {
2206 ep->dwc_ep.xfer_len -
2207 ep->dwc_ep.xfer_count;
2209 ep->dwc_ep.xfer_buff += byte_count;
2210 ep->dwc_ep.dma_addr += byte_count;
2211 ep->dwc_ep.xfer_count += byte_count;
2213 DWC_DEBUGPL(DBG_PCDV,
2214 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
2216 (ep->dwc_ep.is_in ? "IN" :
2218 ep->dwc_ep.xfer_len,
2222 if (ep->dwc_ep.xfer_len <
2223 ep->dwc_ep.total_len) {
2224 dwc_otg_ep_start_transfer
2225 (core_if, &ep->dwc_ep);
2226 } else if (ep->dwc_ep.sent_zlp) {
2228 * This fragment of code should initiate 0
2229 * length transfer in case if it is queued
2230 * a transfer with size divisible to EPs max
2231 * packet size and with usb_request zero field
2232 * is set, which means that after data is transfered,
2233 * it is also should be transfered
2234 * a 0 length packet at the end. For Slave and
2235 * Buffer DMA modes in this case SW has
2236 * to initiate 2 transfers one with transfer size,
2237 * and the second with 0 size. For Descriptor
2238 * DMA mode SW is able to initiate a transfer,
2239 * which will handle all the packets including
2240 * the last 0 length.
2242 ep->dwc_ep.sent_zlp = 0;
2243 dwc_otg_ep_start_zl_transfer
2244 (core_if, &ep->dwc_ep);
2249 if (ep->dwc_ep.type ==
2250 DWC_OTG_EP_TYPE_ISOC) {
2252 dwc_otg_request_done(ep, req,
2255 ep->dwc_ep.start_xfer_buff = 0;
2256 ep->dwc_ep.xfer_buff = 0;
2257 ep->dwc_ep.xfer_len = 0;
2259 /* If there is a request in the queue start it. */
2260 start_next_request(ep);
2263 ("Incomplete transfer (%d - %s [siz=%d pkt=%d])\n",
2266 dwc_ep.is_in ? "IN" :
2272 dma_desc = ep->dwc_ep.desc_addr;
2274 ep->dwc_ep.sent_zlp = 0;
2277 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2278 ep->dwc_ep.buff_mode);
2279 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2282 residue = cfi_calc_desc_residue(ep);
2286 byte_count = residue;
2289 for (i = 0; i < ep->dwc_ep.desc_cnt;
2291 desc_sts = dma_desc->status;
2292 byte_count += desc_sts.b.bytes;
2298 if (byte_count == 0) {
2299 ep->dwc_ep.xfer_count =
2300 ep->dwc_ep.total_len;
2303 DWC_WARN("Incomplete transfer\n");
2307 if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
2308 DWC_DEBUGPL(DBG_PCDV,
2309 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
2311 ep->dwc_ep.is_in ? "IN" : "OUT",
2312 ep->dwc_ep.xfer_len,
2316 /* Check if the whole transfer was completed,
2317 * if no, setup transfer for next portion of data
2319 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2320 dwc_otg_ep_start_transfer(core_if,
2322 } else if (ep->dwc_ep.sent_zlp) {
2324 * This fragment of code should initiate 0
2325 * length trasfer in case if it is queued
2326 * a trasfer with size divisible to EPs max
2327 * packet size and with usb_request zero field
2328 * is set, which means that after data is transfered,
2329 * it is also should be transfered
2330 * a 0 length packet at the end. For Slave and
2331 * Buffer DMA modes in this case SW has
2332 * to initiate 2 transfers one with transfer size,
2333 * and the second with 0 size. For Desriptor
2334 * DMA mode SW is able to initiate a transfer,
2335 * which will handle all the packets including
2338 ep->dwc_ep.sent_zlp = 0;
2339 dwc_otg_ep_start_zl_transfer(core_if,
2346 ("Incomplete transfer (%d-%s [siz=%d pkt=%d])\n",
2348 (ep->dwc_ep.is_in ? "IN" : "OUT"),
2349 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2353 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
2354 dev_if->out_ep_regs[ep->dwc_ep.num];
2356 if (core_if->dma_enable) {
2357 if (core_if->dma_desc_enable) {
2358 dma_desc = ep->dwc_ep.desc_addr;
2360 ep->dwc_ep.sent_zlp = 0;
2363 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2364 ep->dwc_ep.buff_mode);
2365 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2367 residue = cfi_calc_desc_residue(ep);
2370 byte_count = residue;
2374 for (i = 0; i < ep->dwc_ep.desc_cnt;
2376 desc_sts = dma_desc->status;
2377 byte_count += desc_sts.b.bytes;
2384 /* Checking for interrupt Out transfers with not
2385 * dword aligned mps sizes
2387 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_INTR &&
2388 (ep->dwc_ep.maxpacket % 4)) {
2389 ep->dwc_ep.xfer_count =
2390 ep->dwc_ep.total_len - byte_count;
2391 if ((ep->dwc_ep.xfer_len %
2392 ep->dwc_ep.maxpacket)
2393 && (ep->dwc_ep.xfer_len /
2394 ep->dwc_ep.maxpacket <
2396 ep->dwc_ep.xfer_len -=
2397 (ep->dwc_ep.desc_cnt -
2398 1)*ep->dwc_ep.maxpacket +
2399 ep->dwc_ep.xfer_len %
2400 ep->dwc_ep.maxpacket;
2402 ep->dwc_ep.xfer_len -=
2403 ep->dwc_ep.desc_cnt *
2404 ep->dwc_ep.maxpacket;
2405 if (ep->dwc_ep.xfer_len > 0) {
2406 dwc_otg_ep_start_transfer
2407 (core_if, &ep->dwc_ep);
2412 ep->dwc_ep.xfer_count =
2413 ep->dwc_ep.total_len - byte_count +
2416 total_len & 0x3)) & 0x3);
2422 DWC_READ_REG32(&out_ep_regs->doeptsiz);
2424 byte_count = (ep->dwc_ep.xfer_len -
2425 ep->dwc_ep.xfer_count -
2426 deptsiz.b.xfersize);
2427 ep->dwc_ep.xfer_buff += byte_count;
2428 ep->dwc_ep.dma_addr += byte_count;
2429 ep->dwc_ep.xfer_count += byte_count;
2431 /* Check if the whole transfer was completed,
2432 * if no, setup transfer for next portion of data
2434 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2435 dwc_otg_ep_start_transfer(core_if,
2437 } else if (ep->dwc_ep.sent_zlp) {
2439 * This fragment of code should initiate 0
2440 * length trasfer in case if it is queued
2441 * a trasfer with size divisible to EPs max
2442 * packet size and with usb_request zero field
2443 * is set, which means that after data is transfered,
2444 * it is also should be transfered
2445 * a 0 length packet at the end. For Slave and
2446 * Buffer DMA modes in this case SW has
2447 * to initiate 2 transfers one with transfer size,
2448 * and the second with 0 size. For Desriptor
2449 * DMA mode SW is able to initiate a transfer,
2450 * which will handle all the packets including
2453 ep->dwc_ep.sent_zlp = 0;
2454 dwc_otg_ep_start_zl_transfer(core_if,
2461 /* Check if the whole transfer was completed,
2462 * if no, setup transfer for next portion of data
2464 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2465 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
2466 } else if (ep->dwc_ep.sent_zlp) {
2468 * This fragment of code should initiate 0
2469 * length transfer in case if it is queued
2470 * a transfer with size divisible to EPs max
2471 * packet size and with usb_request zero field
2472 * is set, which means that after data is transfered,
2473 * it is also should be transfered
2474 * a 0 length packet at the end. For Slave and
2475 * Buffer DMA modes in this case SW has
2476 * to initiate 2 transfers one with transfer size,
2477 * and the second with 0 size. For Descriptor
2478 * DMA mode SW is able to initiate a transfer,
2479 * which will handle all the packets including
2480 * the last 0 length.
2482 ep->dwc_ep.sent_zlp = 0;
2483 dwc_otg_ep_start_zl_transfer(core_if,
2490 DWC_DEBUGPL(DBG_PCDV,
2491 "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
2492 &out_ep_regs->doeptsiz, ep->dwc_ep.num,
2493 ep->dwc_ep.is_in ? "IN" : "OUT",
2494 ep->dwc_ep.xfer_len, ep->dwc_ep.xfer_count,
2495 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2498 /* Complete the request */
2501 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2502 req->actual = ep->dwc_ep.cfi_req_len - byte_count;
2505 req->actual = ep->dwc_ep.xfer_count;
2509 if (req->dw_align_buf) {
2510 if (!ep->dwc_ep.is_in) {
2511 dwc_memcpy(req->buf, req->dw_align_buf,
2514 DWC_DMA_FREE(req->length, req->dw_align_buf,
2515 req->dw_align_buf_dma);
2518 dwc_otg_request_done(ep, req, 0);
2520 ep->dwc_ep.start_xfer_buff = 0;
2521 ep->dwc_ep.xfer_buff = 0;
2522 ep->dwc_ep.xfer_len = 0;
2524 /* If there is a request in the queue start it. */
2525 start_next_request(ep);
2532 * This function BNA interrupt for Isochronous EPs
2535 static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep)
2537 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2538 volatile uint32_t *addr;
2539 depctl_data_t depctl = {
2541 dwc_otg_pcd_t *pcd = ep->pcd;
2542 dwc_otg_dev_dma_desc_t *dma_desc;
2546 dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num);
2548 if (dwc_ep->is_in) {
2549 dev_dma_desc_sts_t sts = {
2551 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2552 sts.d32 = dma_desc->status.d32;
2553 sts.b_iso_in.bs = BS_HOST_READY;
2554 dma_desc->status.d32 = sts.d32;
2557 dev_dma_desc_sts_t sts = {
2559 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2560 sts.d32 = dma_desc->status.d32;
2561 sts.b_iso_out.bs = BS_HOST_READY;
2562 dma_desc->status.d32 = sts.d32;
2566 if (dwc_ep->is_in == 0) {
2568 &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->
2572 &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2575 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
2579 * This function sets latest iso packet information(non-PTI mode)
2581 * @param core_if Programming view of DWC_otg controller.
2582 * @param ep The EP to start the transfer on.
2585 void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2587 deptsiz_data_t deptsiz = {
2589 dma_addr_t dma_addr;
2592 if (ep->proc_buf_num)
2593 dma_addr = ep->dma_addr1;
2595 dma_addr = ep->dma_addr0;
2599 DWC_READ_REG32(&core_if->dev_if->
2600 in_ep_regs[ep->num]->dieptsiz);
2601 offset = ep->data_per_frame;
2604 DWC_READ_REG32(&core_if->dev_if->
2605 out_ep_regs[ep->num]->doeptsiz);
2607 ep->data_per_frame +
2608 (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
2611 if (!deptsiz.b.xfersize) {
2612 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2613 ep->pkt_info[ep->cur_pkt].offset =
2614 ep->cur_pkt_dma_addr - dma_addr;
2615 ep->pkt_info[ep->cur_pkt].status = 0;
2617 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2618 ep->pkt_info[ep->cur_pkt].offset =
2619 ep->cur_pkt_dma_addr - dma_addr;
2620 ep->pkt_info[ep->cur_pkt].status = -DWC_E_NO_DATA;
2622 ep->cur_pkt_addr += offset;
2623 ep->cur_pkt_dma_addr += offset;
2628 * This function sets latest iso packet information(DDMA mode)
2630 * @param core_if Programming view of DWC_otg controller.
2631 * @param dwc_ep The EP to start the transfer on.
2634 static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if,
2636 dwc_otg_dev_dma_desc_t *dma_desc;
2637 dev_dma_desc_sts_t sts = {
2639 iso_pkt_info_t *iso_packet;
2640 uint32_t data_per_desc;
2644 iso_packet = dwc_ep->pkt_info;
2646 /** Reinit closed DMA Descriptors*/
2648 if (dwc_ep->is_in == 0) {
2650 dwc_ep->iso_desc_addr +
2651 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2654 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2655 i += dwc_ep->pkt_per_frm) {
2656 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2658 ((j + 1)*dwc_ep->maxpacket >
2660 data_per_frame) ? dwc_ep->data_per_frame -
2661 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2663 (data_per_desc % 4) ? (4 -
2667 sts.d32 = dma_desc->status.d32;
2669 /* Write status in iso_packet_decsriptor */
2670 iso_packet->status =
2671 sts.b_iso_out.rxsts +
2672 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2673 if (iso_packet->status) {
2674 iso_packet->status = -DWC_E_NO_DATA;
2677 /* Received data length */
2678 if (!sts.b_iso_out.rxbytes) {
2679 iso_packet->length =
2681 sts.b_iso_out.rxbytes;
2683 iso_packet->length =
2685 sts.b_iso_out.rxbytes + (4 -
2686 dwc_ep->data_per_frame
2690 iso_packet->offset = offset;
2692 offset += data_per_desc;
2698 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2700 ((j + 1)*dwc_ep->maxpacket >
2701 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2702 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2704 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2706 sts.d32 = dma_desc->status.d32;
2708 /* Write status in iso_packet_decsriptor */
2709 iso_packet->status =
2710 sts.b_iso_out.rxsts +
2711 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2712 if (iso_packet->status) {
2713 iso_packet->status = -DWC_E_NO_DATA;
2716 /* Received data length */
2717 iso_packet->length =
2718 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2720 iso_packet->offset = offset;
2722 offset += data_per_desc;
2727 sts.d32 = dma_desc->status.d32;
2729 /* Write status in iso_packet_decsriptor */
2730 iso_packet->status =
2731 sts.b_iso_out.rxsts + (sts.b_iso_out.bs ^ BS_DMA_DONE);
2732 if (iso_packet->status) {
2733 iso_packet->status = -DWC_E_NO_DATA;
2735 /* Received data length */
2736 if (!sts.b_iso_out.rxbytes) {
2737 iso_packet->length =
2738 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2740 iso_packet->length =
2741 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes +
2742 (4 - dwc_ep->data_per_frame % 4);
2745 iso_packet->offset = offset;
2750 dwc_ep->iso_desc_addr +
2751 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2753 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2754 sts.d32 = dma_desc->status.d32;
2756 /* Write status in iso packet descriptor */
2757 iso_packet->status =
2758 sts.b_iso_in.txsts +
2759 (sts.b_iso_in.bs ^ BS_DMA_DONE);
2760 if (iso_packet->status != 0) {
2761 iso_packet->status = -DWC_E_NO_DATA;
2764 /* Bytes has been transfered */
2765 iso_packet->length =
2766 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2772 sts.d32 = dma_desc->status.d32;
2773 while (sts.b_iso_in.bs == BS_DMA_BUSY) {
2774 sts.d32 = dma_desc->status.d32;
2777 /* Write status in iso packet descriptor ??? do be done with ERROR codes */
2778 iso_packet->status =
2779 sts.b_iso_in.txsts + (sts.b_iso_in.bs ^ BS_DMA_DONE);
2780 if (iso_packet->status != 0) {
2781 iso_packet->status = -DWC_E_NO_DATA;
2784 /* Bytes has been transfered */
2785 iso_packet->length =
2786 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2791 * This function reinitialize DMA Descriptors for Isochronous transfer
2793 * @param core_if Programming view of DWC_otg controller.
2794 * @param dwc_ep The EP to start the transfer on.
2797 static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep)
2800 dwc_otg_dev_dma_desc_t *dma_desc;
2802 volatile uint32_t *addr;
2803 dev_dma_desc_sts_t sts = {
2805 uint32_t data_per_desc;
2807 if (dwc_ep->is_in == 0) {
2808 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
2810 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2813 if (dwc_ep->proc_buf_num == 0) {
2814 /** Buffer 0 descriptors setup */
2815 dma_ad = dwc_ep->dma_addr0;
2817 /** Buffer 1 descriptors setup */
2818 dma_ad = dwc_ep->dma_addr1;
2821 /** Reinit closed DMA Descriptors*/
2823 if (dwc_ep->is_in == 0) {
2825 dwc_ep->iso_desc_addr +
2826 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2828 sts.b_iso_out.bs = BS_HOST_READY;
2829 sts.b_iso_out.rxsts = 0;
2830 sts.b_iso_out.l = 0;
2831 sts.b_iso_out.sp = 0;
2832 sts.b_iso_out.ioc = 0;
2833 sts.b_iso_out.pid = 0;
2834 sts.b_iso_out.framenum = 0;
2836 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2837 i += dwc_ep->pkt_per_frm) {
2838 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2840 ((j + 1)*dwc_ep->maxpacket >
2842 data_per_frame) ? dwc_ep->data_per_frame -
2843 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2845 (data_per_desc % 4) ? (4 -
2848 sts.b_iso_out.rxbytes = data_per_desc;
2849 dma_desc->buf = dma_ad;
2850 dma_desc->status.d32 = sts.d32;
2852 dma_ad += data_per_desc;
2857 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2860 ((j + 1)*dwc_ep->maxpacket >
2861 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2862 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2864 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2865 sts.b_iso_out.rxbytes = data_per_desc;
2867 dma_desc->buf = dma_ad;
2868 dma_desc->status.d32 = sts.d32;
2871 dma_ad += data_per_desc;
2874 sts.b_iso_out.ioc = 1;
2875 sts.b_iso_out.l = dwc_ep->proc_buf_num;
2878 ((j + 1)*dwc_ep->maxpacket >
2879 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2880 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2882 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2883 sts.b_iso_out.rxbytes = data_per_desc;
2885 dma_desc->buf = dma_ad;
2886 dma_desc->status.d32 = sts.d32;
2891 dwc_ep->iso_desc_addr +
2892 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2894 sts.b_iso_in.bs = BS_HOST_READY;
2895 sts.b_iso_in.txsts = 0;
2896 sts.b_iso_in.sp = 0;
2897 sts.b_iso_in.ioc = 0;
2898 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
2899 sts.b_iso_in.framenum = dwc_ep->next_frame;
2900 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
2903 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2904 dma_desc->buf = dma_ad;
2905 dma_desc->status.d32 = sts.d32;
2907 sts.b_iso_in.framenum += dwc_ep->bInterval;
2908 dma_ad += dwc_ep->data_per_frame;
2912 sts.b_iso_in.ioc = 1;
2913 sts.b_iso_in.l = dwc_ep->proc_buf_num;
2915 dma_desc->buf = dma_ad;
2916 dma_desc->status.d32 = sts.d32;
2918 dwc_ep->next_frame =
2919 sts.b_iso_in.framenum + dwc_ep->bInterval * 1;
2921 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2925 * This function is to handle Iso EP transfer complete interrupt
2926 * in case Iso out packet was dropped
2928 * @param core_if Programming view of DWC_otg controller.
2929 * @param dwc_ep The EP for wihich transfer complete was asserted
2932 static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if,
2936 uint32_t drp_pkt_cnt;
2937 deptsiz_data_t deptsiz = {
2939 depctl_data_t depctl = {
2944 DWC_READ_REG32(&core_if->dev_if->
2945 out_ep_regs[dwc_ep->num]->doeptsiz);
2947 drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt;
2948 drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm);
2950 /* Setting dropped packets status */
2951 for (i = 0; i < drp_pkt_cnt; ++i) {
2952 dwc_ep->pkt_info[drp_pkt].status = -DWC_E_NO_DATA;
2957 if (deptsiz.b.pktcnt > 0) {
2958 deptsiz.b.xfersize =
2959 dwc_ep->xfer_len - (dwc_ep->pkt_cnt -
2960 deptsiz.b.pktcnt)*dwc_ep->maxpacket;
2962 deptsiz.b.xfersize = 0;
2963 deptsiz.b.pktcnt = 0;
2966 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz,
2969 if (deptsiz.b.pktcnt > 0) {
2970 if (dwc_ep->proc_buf_num) {
2972 dwc_ep->dma_addr1 + dwc_ep->xfer_len -
2976 dwc_ep->dma_addr0 + dwc_ep->xfer_len -
2977 deptsiz.b.xfersize;;
2980 DWC_WRITE_REG32(&core_if->dev_if->
2981 out_ep_regs[dwc_ep->num]->doepdma, dma_addr);
2983 /** Re-enable endpoint, clear nak */
2988 DWC_MODIFY_REG32(&core_if->dev_if->
2989 out_ep_regs[dwc_ep->num]->doepctl, depctl.d32,
2998 * This function sets iso packets information(PTI mode)
3000 * @param core_if Programming view of DWC_otg controller.
3001 * @param ep The EP to start the transfer on.
3004 static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3008 iso_pkt_info_t *packet_info = ep->pkt_info;
3010 uint32_t frame_data;
3011 deptsiz_data_t deptsiz;
3013 if (ep->proc_buf_num == 0) {
3014 /** Buffer 0 descriptors setup */
3015 dma_ad = ep->dma_addr0;
3017 /** Buffer 1 descriptors setup */
3018 dma_ad = ep->dma_addr1;
3023 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
3027 DWC_READ_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
3031 if (!deptsiz.b.xfersize) {
3033 for (i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) {
3034 frame_data = ep->data_per_frame;
3035 for (j = 0; j < ep->pkt_per_frm; ++j) {
3037 /* Packet status - is not set as initially
3038 * it is set to 0 and if packet was sent
3039 successfully, status field will remain 0*/
3041 /* Bytes has been transfered */
3042 packet_info->length =
3044 frame_data) ? ep->maxpacket : frame_data;
3046 /* Received packet offset */
3047 packet_info->offset = offset;
3048 offset += packet_info->length;
3049 frame_data -= packet_info->length;
3056 /* This is a workaround for in case of Transfer Complete with
3057 * PktDrpSts interrupts merging - in this case Transfer complete
3058 * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
3059 * set and with DOEPTSIZ register non zero. Investigations showed,
3060 * that this happens when Out packet is dropped, but because of
3061 * interrupts merging during first interrupt handling PktDrpSts
3062 * bit is cleared and for next merged interrupts it is not reset.
3063 * In this case SW hadles the interrupt as if PktDrpSts bit is set.
3068 return handle_iso_out_pkt_dropped(core_if, ep);
3074 * This function is to handle Iso EP transfer complete interrupt
3076 * @param pcd The PCD
3077 * @param ep The EP for which transfer complete was asserted
3080 static void complete_iso_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep)
3082 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
3083 dwc_ep_t *dwc_ep = &ep->dwc_ep;
3084 uint8_t is_last = 0;
3086 if (ep->dwc_ep.next_frame == 0xffffffff) {
3087 DWC_WARN("Next frame is not set!\n");
3091 if (core_if->dma_enable) {
3092 if (core_if->dma_desc_enable) {
3093 set_ddma_iso_pkts_info(core_if, dwc_ep);
3094 reinit_ddma_iso_xfer(core_if, dwc_ep);
3097 if (core_if->pti_enh_enable) {
3098 if (set_iso_pkts_info(core_if, dwc_ep)) {
3099 dwc_ep->proc_buf_num =
3100 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3101 dwc_otg_iso_ep_start_buf_transfer
3106 set_current_pkt_info(core_if, dwc_ep);
3107 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3109 dwc_ep->cur_pkt = 0;
3110 dwc_ep->proc_buf_num =
3111 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3112 if (dwc_ep->proc_buf_num) {
3113 dwc_ep->cur_pkt_addr =
3115 dwc_ep->cur_pkt_dma_addr =
3118 dwc_ep->cur_pkt_addr =
3120 dwc_ep->cur_pkt_dma_addr =
3125 dwc_otg_iso_ep_start_frm_transfer(core_if,
3130 set_current_pkt_info(core_if, dwc_ep);
3131 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3133 dwc_ep->cur_pkt = 0;
3134 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
3135 if (dwc_ep->proc_buf_num) {
3136 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
3137 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
3139 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
3140 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
3144 dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep);
3147 dwc_otg_iso_buffer_done(pcd, ep, ep->iso_req_handle);
3149 #endif /* DWC_EN_ISOC */
3152 * This function handle BNA interrupt for Non Isochronous EPs
3155 static void dwc_otg_pcd_handle_noniso_bna(dwc_otg_pcd_ep_t *ep)
3157 dwc_ep_t *dwc_ep = &ep->dwc_ep;
3158 volatile uint32_t *addr;
3159 depctl_data_t depctl = {
3161 dwc_otg_pcd_t *pcd = ep->pcd;
3162 dwc_otg_dev_dma_desc_t *dma_desc;
3163 dev_dma_desc_sts_t sts = {
3165 dwc_otg_core_if_t *core_if = ep->pcd->core_if;
3168 if (!dwc_ep->desc_cnt)
3169 DWC_WARN("Ep%d %s Descriptor count = %d \n", dwc_ep->num,
3170 (dwc_ep->is_in ? "IN" : "OUT"), dwc_ep->desc_cnt);
3172 if (core_if->core_params->cont_on_bna && !dwc_ep->is_in
3173 && dwc_ep->type != DWC_OTG_EP_TYPE_CONTROL) {
3175 dwc_otg_dev_out_ep_regs_t *out_regs =
3176 core_if->dev_if->out_ep_regs[dwc_ep->num];
3177 doepdma = DWC_READ_REG32(&(out_regs->doepdma));
3180 dwc_ep->dma_desc_addr) / sizeof(dwc_otg_dev_dma_desc_t);
3181 dma_desc = &(dwc_ep->desc_addr[start]);
3184 dma_desc = dwc_ep->desc_addr;
3187 for (i = start; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
3188 sts.d32 = dma_desc->status.d32;
3189 sts.b.bs = BS_HOST_READY;
3190 dma_desc->status.d32 = sts.d32;
3193 if (dwc_ep->is_in == 0) {
3195 &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->
3199 &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
3203 DWC_MODIFY_REG32(addr, 0, depctl.d32);
3207 * This function handles EP0 Control transfers.
3209 * The state of the control transfers are tracked in
3210 * <code>ep0state</code>.
3212 static void handle_ep0(dwc_otg_pcd_t *pcd)
3214 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3215 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
3216 dev_dma_desc_sts_t desc_sts;
3217 deptsiz0_data_t deptsiz;
3218 uint32_t byte_count;
3221 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
3222 print_ep0_state(pcd);
3225 switch (pcd->ep0state) {
3226 case EP0_DISCONNECT:
3230 pcd->request_config = 0;
3235 case EP0_IN_DATA_PHASE:
3237 DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
3238 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
3239 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
3242 if (core_if->dma_enable != 0) {
3244 * For EP0 we can only program 1 packet at a time so we
3245 * need to do the make calculations after each complete.
3246 * Call write_packet to make the calculations, as in
3247 * slave mode, and use those values to determine if we
3250 if (core_if->dma_desc_enable == 0) {
3252 DWC_READ_REG32(&core_if->
3253 dev_if->in_ep_regs[0]->
3256 ep0->dwc_ep.xfer_len - deptsiz.b.xfersize;
3259 core_if->dev_if->in_desc_addr->status;
3261 ep0->dwc_ep.xfer_len - desc_sts.b.bytes;
3263 ep0->dwc_ep.xfer_count += byte_count;
3264 ep0->dwc_ep.xfer_buff += byte_count;
3265 ep0->dwc_ep.dma_addr += byte_count;
3267 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
3268 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3270 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
3271 } else if (ep0->dwc_ep.sent_zlp) {
3272 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3274 ep0->dwc_ep.sent_zlp = 0;
3275 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
3277 ep0_complete_request(ep0);
3278 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
3281 case EP0_OUT_DATA_PHASE:
3283 DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
3284 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
3285 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
3287 if (core_if->dma_enable != 0) {
3288 if (core_if->dma_desc_enable == 0) {
3290 DWC_READ_REG32(&core_if->
3291 dev_if->out_ep_regs[0]->
3294 ep0->dwc_ep.maxpacket - deptsiz.b.xfersize;
3297 core_if->dev_if->out_desc_addr->status;
3299 ep0->dwc_ep.maxpacket - desc_sts.b.bytes;
3301 ep0->dwc_ep.xfer_count += byte_count;
3302 ep0->dwc_ep.xfer_buff += byte_count;
3303 ep0->dwc_ep.dma_addr += byte_count;
3305 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
3306 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3308 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
3309 } else if (ep0->dwc_ep.sent_zlp) {
3310 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3312 ep0->dwc_ep.sent_zlp = 0;
3313 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
3315 ep0_complete_request(ep0);
3316 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
3320 case EP0_IN_STATUS_PHASE:
3321 case EP0_OUT_STATUS_PHASE:
3322 DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
3323 ep0_complete_request(ep0);
3324 pcd->ep0state = EP0_IDLE;
3326 ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */
3328 /* Prepare for more SETUP Packets */
3329 if (core_if->dma_enable) {
3330 ep0_out_start(core_if, pcd);
3335 DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
3339 print_ep0_state(pcd);
3346 static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum)
3348 dwc_otg_core_if_t *core_if;
3349 dwc_otg_dev_if_t *dev_if;
3350 deptsiz_data_t dieptsiz = {
3352 dwc_otg_pcd_ep_t *ep;
3354 ep = get_in_ep(pcd, epnum);
3357 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
3360 #endif /* DWC_EN_ISOC */
3362 core_if = GET_CORE_IF(pcd);
3363 dev_if = core_if->dev_if;
3365 dieptsiz.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3367 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
3368 " stopped=%d\n", ep->dwc_ep.xfer_buff,
3369 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len, ep->stopped);
3371 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
3373 if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
3374 ep->dwc_ep.start_xfer_buff != 0) {
3375 if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) {
3376 ep->dwc_ep.xfer_count = 0;
3377 ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
3378 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3380 ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
3381 /* convert packet size to dwords. */
3382 ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
3383 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3386 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
3387 "xfer_len=%0x stopped=%d\n",
3388 ep->dwc_ep.xfer_buff,
3389 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
3392 dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
3394 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
3400 * This function create new nextep sequnce based on Learn Queue.
3402 * @param core_if Programming view of DWC_otg controller
3404 void predict_nextep_seq(dwc_otg_core_if_t *core_if)
3406 dwc_otg_device_global_regs_t *dev_global_regs =
3407 core_if->dev_if->dev_global_regs;
3408 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
3409 /* Number of Token Queue Registers */
3410 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
3411 dtknq1_data_t dtknqr1;
3412 uint32_t in_tkn_epnums[4];
3413 uint8_t seqnum[MAX_EPS_CHANNELS];
3414 uint8_t intkn_seq[TOKEN_Q_DEPTH];
3415 grstctl_t resetctl = {
3423 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
3425 DWC_DEBUGPL(DBG_PCD, "dev_token_q_depth=%d\n", TOKEN_Q_DEPTH);
3427 /* Read the DTKNQ Registers */
3428 for (i = 0; i < DTKNQ_REG_CNT; i++) {
3429 in_tkn_epnums[i] = DWC_READ_REG32(addr);
3430 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
3432 if (addr == &dev_global_regs->dvbusdis) {
3433 addr = &dev_global_regs->dtknqr3_dthrctl;
3440 /* Copy the DTKNQR1 data to the bit field. */
3441 dtknqr1.d32 = in_tkn_epnums[0];
3442 if (dtknqr1.b.wrap_bit) {
3443 ndx = dtknqr1.b.intknwptr;
3446 end = TOKEN_Q_DEPTH - 1;
3449 end = dtknqr1.b.intknwptr - 1;
3455 /* Fill seqnum[] by initial values: EP number + 31 */
3456 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3460 /* Fill intkn_seq[] from in_tkn_epnums[0] */
3461 for (i = 0; i < 6; i++)
3462 intkn_seq[i] = (in_tkn_epnums[0] >> ((7 - i) * 4)) & 0xf;
3464 if (TOKEN_Q_DEPTH > 6) {
3465 /* Fill intkn_seq[] from in_tkn_epnums[1] */
3466 for (i = 6; i < 14; i++)
3468 (in_tkn_epnums[1] >> ((7 - (i - 6)) * 4)) & 0xf;
3471 if (TOKEN_Q_DEPTH > 14) {
3472 /* Fill intkn_seq[] from in_tkn_epnums[1] */
3473 for (i = 14; i < 22; i++)
3475 (in_tkn_epnums[2] >> ((7 - (i - 14)) * 4)) & 0xf;
3478 if (TOKEN_Q_DEPTH > 22) {
3479 /* Fill intkn_seq[] from in_tkn_epnums[1] */
3480 for (i = 22; i < 30; i++)
3482 (in_tkn_epnums[3] >> ((7 - (i - 22)) * 4)) & 0xf;
3485 DWC_DEBUGPL(DBG_PCDV, "%s start=%d end=%d intkn_seq[]:\n", __func__,
3487 for (i = 0; i < TOKEN_Q_DEPTH; i++)
3488 DWC_DEBUGPL(DBG_PCDV, "%d\n", intkn_seq[i]);
3490 /* Update seqnum based on intkn_seq[] */
3493 seqnum[intkn_seq[ndx]] = i;
3496 if (ndx == TOKEN_Q_DEPTH)
3498 } while (i < TOKEN_Q_DEPTH);
3500 /* Mark non active EP's in seqnum[] by 0xff */
3501 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3502 if (core_if->nextep_seq[i] == 0xff)
3508 while (!sort_done) {
3510 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
3511 if (seqnum[i] > seqnum[i + 1]) {
3513 seqnum[i] = seqnum[i + 1];
3514 seqnum[i + 1] = temp;
3520 ndx = start + seqnum[0];
3521 if (ndx >= TOKEN_Q_DEPTH)
3522 ndx = ndx % TOKEN_Q_DEPTH;
3523 core_if->first_in_nextep_seq = intkn_seq[ndx];
3525 /* Update seqnum[] by EP numbers */
3526 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3528 if (seqnum[i] < 31) {
3529 ndx = start + seqnum[i];
3530 if (ndx >= TOKEN_Q_DEPTH)
3531 ndx = ndx % TOKEN_Q_DEPTH;
3532 seqnum[i] = intkn_seq[ndx];
3534 if (seqnum[i] < 0xff) {
3535 seqnum[i] = seqnum[i] - 31;
3542 /* Update nextep_seq[] based on seqnum[] */
3543 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
3544 if (seqnum[i] != 0xff) {
3545 if (seqnum[i + 1] != 0xff) {
3546 core_if->nextep_seq[seqnum[i]] = seqnum[i + 1];
3548 core_if->nextep_seq[seqnum[i]] =
3549 core_if->first_in_nextep_seq;
3557 DWC_DEBUGPL(DBG_PCDV, "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
3558 __func__, core_if->first_in_nextep_seq);
3559 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3560 DWC_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
3563 /* Flush the Learning Queue */
3564 resetctl.d32 = DWC_READ_REG32(&core_if->core_global_regs->grstctl);
3565 resetctl.b.intknqflsh = 1;
3566 DWC_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
3571 * handle the IN EP disable interrupt.
3573 static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd,
3574 const uint32_t epnum)
3576 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3577 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3578 deptsiz_data_t dieptsiz = {
3580 dctl_data_t dctl = {
3582 dwc_otg_pcd_ep_t *ep;
3584 gintmsk_data_t gintmsk_data;
3585 depctl_data_t depctl;
3587 uint32_t remain_to_transfer = 0;
3591 ep = get_in_ep(pcd, epnum);
3592 dwc_ep = &ep->dwc_ep;
3594 if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3595 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
3600 DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", epnum,
3601 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl));
3602 dieptsiz.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3603 depctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
3605 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3606 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3608 if ((core_if->start_predict == 0) || (depctl.b.eptype & 1)) {
3610 if (core_if->en_multiple_tx_fifo)
3611 /* Flush the Tx FIFO */
3612 dwc_otg_flush_tx_fifo(core_if,
3613 dwc_ep->tx_fifo_num);
3614 /* Clear the Global IN NP NAK */
3616 dctl.b.cgnpinnak = 1;
3617 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl,
3618 dctl.d32, dctl.d32);
3619 /* Restart the transaction */
3620 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3621 restart_transfer(pcd, epnum);
3624 /* Restart the transaction */
3625 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3626 restart_transfer(pcd, epnum);
3628 DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
3633 if (core_if->start_predict > 2) {
3635 core_if->start_predict--;
3639 core_if->start_predict--;
3641 if (core_if->start_predict == 1) {
3642 /* All NP IN Ep's disabled now */
3643 predict_nextep_seq(core_if);
3645 /* Update all active IN EP's NextEP field based of nextep_seq[] */
3646 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3648 DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
3649 if (core_if->nextep_seq[i] != 0xff) {
3650 /* Active NP IN EP */
3651 depctl.b.nextep = core_if->nextep_seq[i];
3652 DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl,
3656 /* Flush Shared NP TxFIFO */
3657 dwc_otg_flush_tx_fifo(core_if, 0);
3658 /* Rewind buffers */
3659 if (!core_if->dma_desc_enable) {
3660 i = core_if->first_in_nextep_seq;
3662 ep = get_in_ep(pcd, i);
3664 DWC_READ_REG32(&dev_if->
3665 in_ep_regs[i]->dieptsiz);
3667 ep->dwc_ep.total_len -
3668 ep->dwc_ep.xfer_count;
3669 if (xfer_size > ep->dwc_ep.maxxfer)
3670 xfer_size = ep->dwc_ep.maxxfer;
3672 DWC_READ_REG32(&dev_if->
3673 in_ep_regs[i]->diepctl);
3674 if (dieptsiz.b.pktcnt != 0) {
3675 if (xfer_size == 0) {
3676 remain_to_transfer = 0;
3679 ep->dwc_ep.maxpacket) ==
3681 remain_to_transfer =
3686 remain_to_transfer =
3699 DWC_READ_REG32(&dev_if->in_ep_regs
3701 dieptsiz.b.xfersize =
3703 DWC_WRITE_REG32(&dev_if->
3704 in_ep_regs[i]->dieptsiz,
3707 ep->dwc_ep.dma_addr + (xfer_size -
3708 remain_to_transfer);
3709 DWC_WRITE_REG32(&dev_if->
3710 in_ep_regs[i]->diepdma,
3713 i = core_if->nextep_seq[i];
3714 } while (i != core_if->first_in_nextep_seq);
3715 } else { /* dma_desc_enable */
3716 DWC_PRINTF("%s Learning Queue not supported in DDMA\n",
3720 /* Restart transfers in predicted sequences */
3721 i = core_if->first_in_nextep_seq;
3724 DWC_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
3726 DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
3727 if (dieptsiz.b.pktcnt != 0) {
3729 DWC_READ_REG32(&dev_if->
3730 in_ep_regs[i]->diepctl);
3733 DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl,
3736 i = core_if->nextep_seq[i];
3737 } while (i != core_if->first_in_nextep_seq);
3739 /* Clear the global non-periodic IN NAK handshake */
3741 dctl.b.cgnpinnak = 1;
3742 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32,
3745 /* Unmask EP Mismatch interrupt */
3746 gintmsk_data.d32 = 0;
3747 gintmsk_data.b.epmismatch = 1;
3748 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
3751 core_if->start_predict = 0;
3757 * Handler for the IN EP timeout handshake interrupt.
3759 static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd,
3760 const uint32_t epnum)
3762 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3763 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3766 deptsiz_data_t dieptsiz = {
3770 dctl_data_t dctl = {
3772 dwc_otg_pcd_ep_t *ep;
3774 gintmsk_data_t intr_mask = {
3777 ep = get_in_ep(pcd, epnum);
3779 /* Disable the NP Tx Fifo Empty Interrrupt */
3780 if (!core_if->dma_enable) {
3781 intr_mask.b.nptxfempty = 1;
3782 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
3785 /** @todo NGS Check EP type.
3786 * Implement for Periodic EPs */
3790 /* Enable the Global IN NAK Effective Interrupt */
3791 intr_mask.b.ginnakeff = 1;
3792 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
3794 /* Set Global IN NAK */
3795 dctl.b.sgnpinnak = 1;
3796 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
3801 dieptsiz.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[num]->dieptsiz);
3802 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3803 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3806 #ifdef DISABLE_PERIODIC_EP
3808 * Set the NAK bit for this EP to
3809 * start the disable process.
3813 DWC_MODIFY_REG32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32,
3821 * Handler for the IN EP NAK interrupt.
3823 static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd,
3824 const uint32_t epnum)
3826 /** @todo implement ISR */
3827 dwc_otg_core_if_t *core_if;
3828 diepmsk_data_t intr_mask = {
3831 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
3832 core_if = GET_CORE_IF(pcd);
3833 intr_mask.b.nak = 1;
3835 if (core_if->multiproc_int_enable) {
3836 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3837 diepeachintmsk[epnum], intr_mask.d32, 0);
3839 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->diepmsk,
3847 * Handler for the OUT EP Babble interrupt.
3849 static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd,
3850 const uint32_t epnum)
3852 /** @todo implement ISR */
3853 dwc_otg_core_if_t *core_if;
3854 doepmsk_data_t intr_mask = {
3857 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n",
3859 core_if = GET_CORE_IF(pcd);
3860 intr_mask.b.babble = 1;
3862 if (core_if->multiproc_int_enable) {
3863 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3864 doepeachintmsk[epnum], intr_mask.d32, 0);
3866 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
3874 * Handler for the OUT EP NAK interrupt.
3876 static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd,
3877 const uint32_t epnum)
3879 /** @todo implement ISR */
3880 dwc_otg_core_if_t *core_if;
3881 doepmsk_data_t intr_mask = {
3884 DWC_DEBUGPL(DBG_ANY, "INTERRUPT Handler not implemented for %s\n",
3886 core_if = GET_CORE_IF(pcd);
3887 intr_mask.b.nak = 1;
3889 if (core_if->multiproc_int_enable) {
3890 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3891 doepeachintmsk[epnum], intr_mask.d32, 0);
3893 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
3901 * Handler for the OUT EP NYET interrupt.
3903 static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd,
3904 const uint32_t epnum)
3906 /** @todo implement ISR */
3907 dwc_otg_core_if_t *core_if;
3908 doepmsk_data_t intr_mask = {
3911 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
3912 core_if = GET_CORE_IF(pcd);
3913 intr_mask.b.nyet = 1;
3915 if (core_if->multiproc_int_enable) {
3916 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3917 doepeachintmsk[epnum], intr_mask.d32, 0);
3919 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
3927 * This interrupt indicates that an IN EP has a pending Interrupt.
3928 * The sequence for handling the IN EP interrupt is shown below:
3929 * -# Read the Device All Endpoint Interrupt register
3930 * -# Repeat the following for each IN EP interrupt bit set (from
3932 * -# Read the Device Endpoint Interrupt (DIEPINTn) register
3933 * -# If "Transfer Complete" call the request complete function
3934 * -# If "Endpoint Disabled" complete the EP disable procedure.
3935 * -# If "AHB Error Interrupt" log error
3936 * -# If "Time-out Handshake" log error
3937 * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
3939 * -# If "IN Token EP Mismatch" (disable, this is handled by EP
3940 * Mismatch Interrupt)
3942 static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd)
3944 #define CLEAR_IN_EP_INTR(__core_if, __epnum, __intr) \
3946 diepint_data_t diepint = {.d32 = 0}; \
3947 diepint.b.__intr = 1; \
3948 DWC_WRITE_REG32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
3952 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3953 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3954 diepint_data_t diepint = {
3956 depctl_data_t depctl = {
3960 dwc_otg_pcd_ep_t *ep;
3962 gintmsk_data_t intr_mask = {
3964 dctl_data_t dctl = {
3967 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
3969 /* Read in the device interrupt bits */
3970 ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
3972 /* Service the Device IN interrupts for each endpoint */
3974 if (ep_intr & 0x1) {
3976 /* Get EP pointer */
3977 ep = get_in_ep(pcd, epnum);
3978 dwc_ep = &ep->dwc_ep;
3981 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
3983 DWC_READ_REG32(&dev_if->
3984 dev_global_regs->dtknqr4_fifoemptymsk);
3986 DWC_DEBUGPL(DBG_PCDV,
3987 "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
3988 epnum, empty_msk, depctl.d32);
3990 DWC_DEBUGPL(DBG_PCD,
3991 "EP%d-%s: type=%d, mps=%d\n",
3992 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
3993 dwc_ep->type, dwc_ep->maxpacket);
3996 dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
3998 DWC_DEBUGPL(DBG_PCDV,
3999 "EP %d Interrupt Register - 0x%x\n", epnum,
4001 /* Transfer complete */
4002 if (diepint.b.xfercompl) {
4003 /* Disable the NP Tx FIFO Empty
4005 if (core_if->en_multiple_tx_fifo == 0) {
4006 intr_mask.b.nptxfempty = 1;
4009 core_global_regs->gintmsk,
4012 /* Disable the Tx FIFO Empty Interrupt for this EP */
4013 uint32_t fifoemptymsk =
4015 DWC_MODIFY_REG32(&core_if->
4016 dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
4019 /* Clear the bit in DIEPINTn for this interrupt */
4020 CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
4022 /* Complete the transfer */
4027 else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4029 complete_iso_ep(pcd, ep);
4031 #endif /* DWC_EN_ISOC */
4032 #ifdef DWC_UTE_PER_IO
4033 else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4035 complete_xiso_ep(ep);
4037 #endif /* DWC_UTE_PER_IO */
4039 if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC
4040 && dwc_ep->bInterval > 1) {
4041 dwc_ep->frame_num +=
4043 if (dwc_ep->frame_num > 0x3FFF) {
4044 dwc_ep->frm_overrun = 1;
4045 dwc_ep->frame_num &=
4048 dwc_ep->frm_overrun = 0;
4052 CLEAR_IN_EP_INTR(core_if, epnum,
4056 /* Endpoint disable */
4057 if (diepint.b.epdisabled) {
4058 DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
4060 handle_in_ep_disable_intr(pcd, epnum);
4062 /* Clear the bit in DIEPINTn for this interrupt */
4063 CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
4066 if (diepint.b.ahberr) {
4067 DWC_ERROR("EP%d IN AHB Error\n", epnum);
4068 /* Clear the bit in DIEPINTn for this interrupt */
4069 DWC_ERROR("EP%d DEPDMA=0x%08x \n",
4072 in_ep_regs[epnum]->diepdma);
4073 CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
4075 DWC_READ_REG32(&core_if->
4076 dev_if->dev_global_regs->
4078 dctl.b.sftdiscon = 1;
4079 DWC_WRITE_REG32(&core_if->
4080 dev_if->dev_global_regs->dctl,
4082 dwc_otg_disable_global_interrupts(core_if);
4083 ep->pcd->vbus_status = 0;
4084 if (ep->pcd->conn_status) {
4085 ep->pcd->conn_status = 0;
4087 DWC_SPINUNLOCK(pcd->lock);
4088 cil_pcd_stop(core_if);
4089 DWC_SPINLOCK(pcd->lock);
4091 /* TimeOUT Handshake (non-ISOC IN EPs) */
4092 if (diepint.b.timeout) {
4093 DWC_ERROR("EP%d IN Time-out\n", epnum);
4094 handle_in_ep_timeout_intr(pcd, epnum);
4096 CLEAR_IN_EP_INTR(core_if, epnum, timeout);
4098 /** IN Token received with TxF Empty */
4099 if (diepint.b.intktxfemp) {
4100 DWC_DEBUGPL(DBG_ANY,
4101 "EP%d IN TKN TxFifo Empty\n",
4103 if (!ep->stopped && epnum != 0) {
4105 diepmsk_data_t diepmsk = {
4107 diepmsk.b.intktxfemp = 1;
4109 if (core_if->multiproc_int_enable) {
4112 dev_global_regs->diepeachintmsk
4113 [epnum], diepmsk.d32, 0);
4117 dev_global_regs->diepmsk,
4120 } else if (core_if->dma_desc_enable
4123 EP0_OUT_STATUS_PHASE) {
4124 /* EP0 IN set STALL */
4126 DWC_READ_REG32(&dev_if->in_ep_regs
4129 /* set the disable and stall bits */
4130 if (depctl.b.epena) {
4134 DWC_WRITE_REG32(&dev_if->in_ep_regs
4138 CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
4140 /** IN Token Received with EP mismatch */
4141 if (diepint.b.intknepmis) {
4142 DWC_DEBUGPL(DBG_ANY,
4143 "EP%d IN TKN EP Mismatch\n", epnum);
4144 CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
4146 /** IN Endpoint NAK Effective */
4147 if (diepint.b.inepnakeff) {
4148 DWC_DEBUGPL(DBG_ANY,
4149 "EP%d IN EP NAK Effective\n",
4152 if (ep->disabling) {
4156 DWC_MODIFY_REG32(&dev_if->in_ep_regs
4161 CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
4165 /** IN EP Tx FIFO Empty Intr */
4166 if (diepint.b.emptyintr) {
4167 DWC_DEBUGPL(DBG_ANY,
4168 "EP%d Tx FIFO Empty Intr \n",
4170 write_empty_tx_fifo(pcd, epnum);
4172 CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
4176 /** IN EP BNA Intr */
4177 if (diepint.b.bna) {
4178 CLEAR_IN_EP_INTR(core_if, epnum, bna);
4179 if (core_if->dma_desc_enable) {
4182 DWC_OTG_EP_TYPE_ISOC) {
4184 * This checking is performed to prevent first "false" BNA
4185 * handling occuring right after reconnect
4187 if (dwc_ep->next_frame !=
4189 dwc_otg_pcd_handle_iso_bna
4192 #endif /* DWC_EN_ISOC */
4194 dwc_otg_pcd_handle_noniso_bna
4200 if (diepint.b.nak) {
4201 DWC_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
4203 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
4204 depctl_data_t depctl;
4205 if (ep->dwc_ep.frame_num == 0xFFFFFFFF) {
4206 ep->dwc_ep.frame_num =
4208 if (ep->dwc_ep.bInterval > 1) {
4212 (&dev_if->in_ep_regs
4232 (&dev_if->in_ep_regs
4236 start_next_request(ep);
4238 ep->dwc_ep.frame_num +=
4239 ep->dwc_ep.bInterval;
4240 if (dwc_ep->frame_num > 0x3FFF) {
4241 dwc_ep->frm_overrun = 1;
4242 dwc_ep->frame_num &= 0x3FFF;
4244 dwc_ep->frm_overrun = 0;
4247 CLEAR_IN_EP_INTR(core_if, epnum, nak);
4255 #undef CLEAR_IN_EP_INTR
4259 * This interrupt indicates that an OUT EP has a pending Interrupt.
4260 * The sequence for handling the OUT EP interrupt is shown below:
4261 * -# Read the Device All Endpoint Interrupt register
4262 * -# Repeat the following for each OUT EP interrupt bit set (from
4264 * -# Read the Device Endpoint Interrupt (DOEPINTn) register
4265 * -# If "Transfer Complete" call the request complete function
4266 * -# If "Endpoint Disabled" complete the EP disable procedure.
4267 * -# If "AHB Error Interrupt" log error
4268 * -# If "Setup Phase Done" process Setup Packet (See Standard USB
4269 * Command Processing)
4271 static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd)
4273 #define CLEAR_OUT_EP_INTR(__core_if, __epnum, __intr) \
4275 doepint_data_t doepint = {.d32 = 0}; \
4276 doepint.b.__intr = 1; \
4277 DWC_WRITE_REG32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
4281 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
4283 doepint_data_t doepint = {
4286 dwc_otg_pcd_ep_t *ep;
4288 dctl_data_t dctl = {
4290 gintmsk_data_t gintmsk = {
4293 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
4295 /* Read in the device interrupt bits */
4296 ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
4299 if (ep_intr & 0x1) {
4300 /* Get EP pointer */
4301 ep = get_out_ep(pcd, epnum);
4302 dwc_ep = &ep->dwc_ep;
4305 DWC_DEBUGPL(DBG_PCDV,
4306 "EP%d-%s: type=%d, mps=%d\n",
4307 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
4308 dwc_ep->type, dwc_ep->maxpacket);
4311 dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
4312 /* Transfer complete */
4313 if (doepint.b.xfercompl) {
4316 /* Clear the bit in DOEPINTn for this interrupt */
4317 CLEAR_OUT_EP_INTR(core_if, epnum,
4319 if (core_if->snpsid >=
4320 OTG_CORE_REV_3_00a) {
4321 DWC_DEBUGPL(DBG_PCDV,
4322 "in xfer xomplete DOEPINT=%x doepint=%x\n",
4328 DWC_DEBUGPL(DBG_PCDV,
4335 if (core_if->snpsid >=
4337 && core_if->dma_enable ==
4339 doepint_data_t doepint;
4345 if (pcd->ep0state ==
4351 goto exit_xfercompl;
4354 /* In case of DDMA look at SR bit to go to the Data Stage */
4355 if (core_if->dma_desc_enable) {
4359 if (pcd->ep0state ==
4362 core_if->dev_if->setup_desc_addr
4364 dev_if->setup_desc_index]->status.
4366 if (pcd->data_terminated) {
4367 pcd->data_terminated
4371 core_if->dev_if->out_desc_addr->status.d32;
4373 (&pcd->setup_pkt->req,
4378 if (doepint.b.setup) {
4381 "DMA DESC EP0_IDLE SR=1 setup=1\n");
4382 /* Already started data stage, clear setup */
4392 /* Prepare for more setup packets */
4393 if (pcd->ep0state == EP0_IN_STATUS_PHASE || pcd->ep0state == EP0_IN_DATA_PHASE) {
4399 goto exit_xfercompl;
4401 /* Prepare for more setup packets */
4404 "EP0_IDLE SR=1 setup=0 new setup comes\n");
4411 dwc_otg_pcd_request_t
4424 if (pcd->ep0state == EP0_STALL || pcd->ep0state == EP0_DISCONNECT) {
4426 ("EP0 is stalled/disconnected\n");
4429 /* Clear IN xfercompl if set */
4439 EP0_IN_DATA_PHASE)) {
4448 core_if->dev_if->setup_desc_addr
4450 dev_if->setup_desc_index]->status.
4461 EP0_OUT_DATA_PHASE))
4464 core_if->dev_if->out_desc_addr->status.d32;
4465 if (pcd->ep0state == EP0_OUT_STATUS_PHASE)
4469 out_desc_addr->status.d32;
4472 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
4475 "Request queue empty!!\n");
4479 "complete req!!\n");
4480 req = DWC_CIRCLEQ_FIRST(&ep->queue);
4481 if (ep->dwc_ep.xfer_count != ep->dwc_ep.total_len && pcd->ep0state == EP0_OUT_DATA_PHASE) {
4482 /* Read arrived setup packet from req->buf */
4484 (&pcd->setup_pkt->req,
4487 ep->dwc_ep.xfer_count,
4492 ep->dwc_ep.xfer_count;
4493 dwc_otg_request_done
4497 ep->dwc_ep.start_xfer_buff = 0;
4498 ep->dwc_ep.xfer_buff = 0;
4499 ep->dwc_ep.xfer_len = 0;
4504 if (doepint.b.setup) {
4507 "EP0_IDLE SR=1 setup=1\n");
4508 /* Data stage started, clear setup */
4518 /* Prepare for setup packets if ep0in was enabled */
4519 if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
4525 goto exit_xfercompl;
4527 /* Prepare for more setup packets */
4530 "EP0_IDLE SR=1 setup=0 new setup comes 2\n");
4538 if (core_if->snpsid >=
4540 && core_if->dma_enable
4541 && core_if->dma_desc_enable
4576 EP0_IN_STATUS_PHASE)){
4583 "WA for xfercompl along with stsphs \n");
4590 goto exit_xfercompl;
4593 if (pcd->ep0state ==
4595 if (doepint_temp.b.sr) {
4601 /* Delay is needed for core to update setup
4602 * packet count from 3 to 2 after receiving
4610 if (doeptsize0.b.supcnt == 3) {
4613 "Rolling over!!!!!!!\n");
4614 ep->dwc_ep.stp_rollover = 1;
4619 /* Already started data stage, clear setup */
4620 CLEAR_OUT_EP_INTR(core_if, epnum, setup);
4625 ep->dwc_ep.stp_rollover = 0;
4626 /* Prepare for more setup packets */
4627 if (pcd->ep0state == EP0_IN_STATUS_PHASE || pcd->ep0state == EP0_IN_DATA_PHASE) {
4638 /* Core not updating setup packet count
4639 * in case of PET testing - @TODO vahrama
4640 * to check with HW team further */
4641 if (!core_if->otg_ver) {
4650 goto exit_xfercompl;
4652 /* Prepare for more setup packets */
4655 "EP0_IDLE SR=1 setup=0 new setup comes\n");
4662 if (doepint.b.setup)
4669 dwc_otg_pcd_request_t
4691 if (pcd->ep0state == EP0_IN_DATA_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) {
4692 if (diepint0.b.xfercompl) {
4699 if (diepctl0.b.epena) {
4721 } while (!diepint.b.inepnakeff);
4722 diepint.b.inepnakeff
4737 (&core_if->dev_if->in_ep_regs
4749 } while (!diepint.b.epdisabled);
4750 diepint.b.epdisabled
4754 (&core_if->dev_if->in_ep_regs
4764 [ep->dwc_ep.num]->doepint);
4765 if (doepint_temp.b.sr) {
4770 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
4773 "Request queue empty!!\n");
4777 "complete req!!\n");
4778 req = DWC_CIRCLEQ_FIRST(&ep->queue);
4779 if (ep->dwc_ep.xfer_count != ep->dwc_ep.total_len && pcd->ep0state == EP0_OUT_DATA_PHASE) {
4780 /* Read arrived setup packet from req->buf */
4782 (&pcd->setup_pkt->req,
4785 ep->dwc_ep.xfer_count,
4790 ep->dwc_ep.xfer_count;
4791 dwc_otg_request_done
4795 ep->dwc_ep.start_xfer_buff = 0;
4796 ep->dwc_ep.xfer_buff = 0;
4797 ep->dwc_ep.xfer_len = 0;
4802 if (doepint.b.setup) {
4805 "EP0_IDLE SR=1 setup=1\n");
4806 /* Data stage started, clear setup */
4816 /* Prepare for setup packets if ep0in was enabled */
4817 if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
4828 /* Core not updating setup packet count
4829 * in case of PET testing - @TODO vahrama
4830 * to check with HW team further */
4831 if (!core_if->otg_ver) {
4840 goto exit_xfercompl;
4842 /* Prepare for more setup packets */
4845 "EP0_IDLE SR=1 setup=0 new setup comes 2\n");
4853 if (core_if->dma_enable == 0
4858 DWC_DEBUGPL(DBG_PCDV,
4859 "after DOEPINT=%x doepint=%x\n",
4860 dwc_otg_read_dev_out_ep_intr
4864 if (core_if->dma_desc_enable ==
4871 } else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4872 if (doepint.b.pktdrpsts == 0) {
4873 /* Clear the bit in DOEPINTn for this interrupt */
4874 CLEAR_OUT_EP_INTR(core_if,
4877 complete_iso_ep(pcd, ep);
4880 doepint_data_t doepint = {
4882 doepint.b.xfercompl = 1;
4883 doepint.b.pktdrpsts = 1;
4889 if (handle_iso_out_pkt_dropped
4890 (core_if, dwc_ep)) {
4891 complete_iso_ep(pcd,
4895 #endif /* DWC_EN_ISOC */
4896 #ifdef DWC_UTE_PER_IO
4897 } else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4898 CLEAR_OUT_EP_INTR(core_if, epnum,
4901 complete_xiso_ep(ep);
4902 #endif /* DWC_UTE_PER_IO */
4904 /* Clear the bit in DOEPINTn for this interrupt */
4905 CLEAR_OUT_EP_INTR(core_if, epnum,
4908 if (core_if->core_params->dev_out_nak) {
4909 DWC_TIMER_CANCEL(pcd->
4910 core_if->ep_xfer_timer
4913 core_if->ep_xfer_info
4916 print_memory_payload(pcd,
4925 if (doepint.b.stsphsercvd) {
4926 deptsiz0_data_t deptsiz;
4927 CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
4929 DWC_READ_REG32(&core_if->dev_if->
4930 out_ep_regs[0]->doeptsiz);
4931 if ((core_if->dma_desc_enable)
4932 || (core_if->dma_enable
4933 && core_if->snpsid >=
4934 OTG_CORE_REV_3_00a)) {
4935 do_setup_in_status_phase(pcd);
4939 /* Endpoint disable */
4940 if (doepint.b.epdisabled) {
4942 /* Clear the bit in DOEPINTn for this interrupt */
4943 CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
4944 if (core_if->core_params->dev_out_nak) {
4946 print_memory_payload(pcd, dwc_ep);
4948 /* In case of timeout condition */
4950 ep_xfer_info[epnum].state == 2) {
4954 dev_global_regs->dctl);
4955 dctl.b.cgoutnak = 1;
4957 (&core_if->dev_if->dev_global_regs->dctl,
4959 /* Unmask goutnakeff interrupt which was masked
4960 * during handle nak out interrupt */
4961 gintmsk.b.goutnakeff = 1;
4963 (&core_if->core_global_regs->gintmsk,
4969 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
4971 gintmsk_data_t intr_mask = {
4973 dwc_otg_pcd_request_t *req = 0;
4976 DWC_READ_REG32(&core_if->dev_if->
4977 dev_global_regs->dctl);
4978 dctl.b.cgoutnak = 1;
4979 DWC_WRITE_REG32(&core_if->
4980 dev_if->dev_global_regs->
4984 intr_mask.b.incomplisoout = 1;
4986 /* Get any pending requests */
4987 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
4993 ("complete_ep 0x%p, req = NULL!\n",
4996 dwc_otg_request_done(ep,
4999 start_next_request(ep);
5003 ("complete_ep 0x%p, ep->queue empty!\n",
5009 if (doepint.b.ahberr) {
5010 DWC_ERROR("EP%d OUT AHB Error\n", epnum);
5011 DWC_ERROR("EP%d DEPDMA=0x%08x \n",
5014 out_ep_regs[epnum]->doepdma);
5015 CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
5017 /* Setup Phase Done (contorl EPs) */
5018 if (doepint.b.setup) {
5020 DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n",
5023 CLEAR_OUT_EP_INTR(core_if, epnum, setup);
5028 /** OUT EP BNA Intr */
5029 if (doepint.b.bna) {
5030 CLEAR_OUT_EP_INTR(core_if, epnum, bna);
5031 if (core_if->dma_desc_enable) {
5034 DWC_OTG_EP_TYPE_ISOC) {
5036 * This checking is performed to prevent first "false" BNA
5037 * handling occuring right after reconnect
5039 if (dwc_ep->next_frame !=
5041 dwc_otg_pcd_handle_iso_bna
5044 #endif /* DWC_EN_ISOC */
5046 dwc_otg_pcd_handle_noniso_bna
5051 /* Babble Interrupt */
5052 if (doepint.b.babble) {
5053 DWC_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
5055 handle_out_ep_babble_intr(pcd, epnum);
5057 CLEAR_OUT_EP_INTR(core_if, epnum, babble);
5059 if (doepint.b.outtknepdis) {
5060 DWC_DEBUGPL(DBG_ANY, "EP%d OUT Token received when EP is \
5063 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
5064 doepmsk_data_t doepmsk = {
5066 ep->dwc_ep.frame_num =
5068 if (ep->dwc_ep.bInterval > 1) {
5069 depctl_data_t depctl;
5075 if (ep->dwc_ep.frame_num & 0x1) {
5076 depctl.b.setd1pid = 1;
5077 depctl.b.setd0pid = 0;
5079 depctl.b.setd0pid = 1;
5080 depctl.b.setd1pid = 0;
5088 start_next_request(ep);
5089 doepmsk.b.outtknepdis = 1;
5090 DWC_MODIFY_REG32(&core_if->
5091 dev_if->dev_global_regs->doepmsk,
5094 CLEAR_OUT_EP_INTR(core_if, epnum, outtknepdis);
5098 if (doepint.b.nak) {
5099 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
5100 handle_out_ep_nak_intr(pcd, epnum);
5102 CLEAR_OUT_EP_INTR(core_if, epnum, nak);
5104 /* NYET Interrutp */
5105 if (doepint.b.nyet) {
5106 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
5107 handle_out_ep_nyet_intr(pcd, epnum);
5109 CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
5119 #undef CLEAR_OUT_EP_INTR
5122 static int drop_transfer(uint32_t trgt_fr, uint32_t curr_fr,
5123 uint8_t frm_overrun)
5126 if (!frm_overrun && curr_fr >= trgt_fr)
5128 else if (frm_overrun
5129 && (curr_fr >= trgt_fr && ((curr_fr - trgt_fr) < 0x3FFF / 2)))
5135 * Incomplete ISO IN Transfer Interrupt.
5136 * This interrupt indicates one of the following conditions occurred
5137 * while transmitting an ISOC transaction.
5138 * - Corrupted IN Token for ISOC EP.
5139 * - Packet not complete in FIFO.
5140 * The follow actions will be taken:
5141 * -# Determine the EP
5142 * -# Set incomplete flag in dwc_ep structure
5143 * -# Disable EP; when "Endpoint Disabled" interrupt is received
5146 int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd)
5148 gintsts_data_t gintsts;
5151 dwc_otg_dev_if_t *dev_if;
5152 deptsiz_data_t deptsiz = {
5154 depctl_data_t depctl = {
5156 dsts_data_t dsts = {
5161 dev_if = GET_CORE_IF(pcd)->dev_if;
5163 for (i = 1; i <= dev_if->num_in_eps; ++i) {
5164 dwc_ep = &pcd->in_ep[i].dwc_ep;
5165 if (dwc_ep->active && dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
5167 DWC_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
5169 DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
5171 if (depctl.b.epdis && deptsiz.d32) {
5172 set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep);
5173 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
5174 dwc_ep->cur_pkt = 0;
5175 dwc_ep->proc_buf_num =
5176 (dwc_ep->proc_buf_num ^ 1) & 0x1;
5178 if (dwc_ep->proc_buf_num) {
5179 dwc_ep->cur_pkt_addr =
5181 dwc_ep->cur_pkt_dma_addr =
5184 dwc_ep->cur_pkt_addr =
5186 dwc_ep->cur_pkt_dma_addr =
5193 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
5194 dev_global_regs->dsts);
5195 dwc_ep->next_frame = dsts.b.soffn;
5197 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
5205 depctl_data_t depctl = {
5208 dwc_otg_dev_if_t *dev_if;
5210 dev_if = GET_CORE_IF(pcd)->dev_if;
5212 DWC_DEBUGPL(DBG_PCD, "Incomplete ISO IN \n");
5214 for (i = 1; i <= dev_if->num_in_eps; ++i) {
5215 dwc_ep = &pcd->in_ep[i - 1].dwc_ep;
5216 depctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
5217 if (depctl.b.epena && dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
5219 (dwc_ep->frame_num, GET_CORE_IF(pcd)->frame_num,
5220 dwc_ep->frm_overrun)) {
5222 DWC_READ_REG32(&dev_if->
5223 in_ep_regs[i]->diepctl);
5226 DWC_MODIFY_REG32(&dev_if->
5227 in_ep_regs[i]->diepctl,
5228 depctl.d32, depctl.d32);
5233 /*intr_mask.b.incomplisoin = 1;
5234 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
5235 intr_mask.d32, 0); */
5236 #endif /* DWC_EN_ISOC */
5238 /* Clear interrupt */
5240 gintsts.b.incomplisoin = 1;
5241 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5248 * Incomplete ISO OUT Transfer Interrupt.
5250 * This interrupt indicates that the core has dropped an ISO OUT
5251 * packet. The following conditions can be the cause:
5252 * - FIFO Full, the entire packet would not fit in the FIFO.
5255 * The follow actions will be taken:
5256 * -# Determine the EP
5257 * -# Set incomplete flag in dwc_ep structure
5258 * -# Read any data from the FIFO
5259 * -# Disable EP. When "Endpoint Disabled" interrupt is received
5262 int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd)
5265 gintsts_data_t gintsts;
5268 dwc_otg_dev_if_t *dev_if;
5269 deptsiz_data_t deptsiz = {
5271 depctl_data_t depctl = {
5273 dsts_data_t dsts = {
5278 dev_if = GET_CORE_IF(pcd)->dev_if;
5280 for (i = 1; i <= dev_if->num_out_eps; ++i) {
5281 dwc_ep = &pcd->in_ep[i].dwc_ep;
5282 if (pcd->out_ep[i].dwc_ep.active &&
5283 pcd->out_ep[i].dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
5285 DWC_READ_REG32(&dev_if->out_ep_regs[i]->doeptsiz);
5287 DWC_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
5289 if (depctl.b.epdis && deptsiz.d32) {
5290 set_current_pkt_info(GET_CORE_IF(pcd),
5291 &pcd->out_ep[i].dwc_ep);
5292 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
5293 dwc_ep->cur_pkt = 0;
5294 dwc_ep->proc_buf_num =
5295 (dwc_ep->proc_buf_num ^ 1) & 0x1;
5297 if (dwc_ep->proc_buf_num) {
5298 dwc_ep->cur_pkt_addr =
5300 dwc_ep->cur_pkt_dma_addr =
5303 dwc_ep->cur_pkt_addr =
5305 dwc_ep->cur_pkt_dma_addr =
5312 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
5313 dev_global_regs->dsts);
5314 dwc_ep->next_frame = dsts.b.soffn;
5316 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
5323 /** @todo implement ISR */
5324 gintmsk_data_t intr_mask = {
5326 dwc_otg_core_if_t *core_if;
5327 deptsiz_data_t deptsiz = {
5329 depctl_data_t depctl = {
5331 dctl_data_t dctl = {
5333 dwc_ep_t *dwc_ep = NULL;
5335 core_if = GET_CORE_IF(pcd);
5337 for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
5338 dwc_ep = &pcd->out_ep[i].dwc_ep;
5340 DWC_READ_REG32(&core_if->dev_if->
5341 out_ep_regs[dwc_ep->num]->doepctl);
5343 && depctl.b.dpid == (core_if->frame_num & 0x1)) {
5344 core_if->dev_if->isoc_ep = dwc_ep;
5346 DWC_READ_REG32(&core_if->
5347 dev_if->out_ep_regs[dwc_ep->num]->
5352 dctl.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
5353 gintsts.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintsts);
5354 intr_mask.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
5356 if (!intr_mask.b.goutnakeff) {
5358 intr_mask.b.goutnakeff = 1;
5359 DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk,
5362 if (!gintsts.b.goutnakeff) {
5363 dctl.b.sgoutnak = 1;
5365 DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
5368 DWC_READ_REG32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl);
5369 if (depctl.b.epena) {
5373 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl,
5377 intr_mask.b.incomplisoout = 1;
5379 #endif /* DWC_EN_ISOC */
5381 /* Clear interrupt */
5383 gintsts.b.incomplisoout = 1;
5384 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5391 * This function handles the Global IN NAK Effective interrupt.
5394 int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd)
5396 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
5397 depctl_data_t diepctl = {
5399 gintmsk_data_t intr_mask = {
5401 gintsts_data_t gintsts;
5402 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
5405 DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
5407 /* Disable all active IN EPs */
5408 for (i = 0; i <= dev_if->num_in_eps; i++) {
5409 diepctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
5410 if (!(diepctl.b.eptype & 1) && diepctl.b.epena) {
5411 if (core_if->start_predict > 0)
5412 core_if->start_predict++;
5413 diepctl.b.epdis = 1;
5415 DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl,
5420 /* Disable the Global IN NAK Effective Interrupt */
5421 intr_mask.b.ginnakeff = 1;
5422 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
5425 /* Clear interrupt */
5427 gintsts.b.ginnakeff = 1;
5428 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5435 * OUT NAK Effective.
5438 int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd)
5440 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
5441 gintmsk_data_t intr_mask = {
5443 gintsts_data_t gintsts;
5444 depctl_data_t doepctl;
5447 /* Disable the Global OUT NAK Effective Interrupt */
5448 intr_mask.b.goutnakeff = 1;
5449 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
5452 /* If DEV OUT NAK enabled */
5453 if (pcd->core_if->core_params->dev_out_nak) {
5454 /* Run over all out endpoints to determine the ep number on
5455 * which the timeout has happened
5457 for (i = 0; i <= dev_if->num_out_eps; i++) {
5458 if (pcd->core_if->ep_xfer_info[i].state == 2)
5461 if (i > dev_if->num_out_eps) {
5464 DWC_READ_REG32(&dev_if->dev_global_regs->dctl);
5465 dctl.b.cgoutnak = 1;
5466 DWC_WRITE_REG32(&dev_if->dev_global_regs->dctl,
5471 /* Disable the endpoint */
5472 doepctl.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
5473 if (doepctl.b.epena) {
5474 doepctl.b.epdis = 1;
5477 DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
5480 /* We come here from Incomplete ISO OUT handler */
5481 if (dev_if->isoc_ep) {
5482 dwc_ep_t *dwc_ep = (dwc_ep_t *) dev_if->isoc_ep;
5483 uint32_t epnum = dwc_ep->num;
5484 doepint_data_t doepint;
5486 DWC_READ_REG32(&dev_if->out_ep_regs[dwc_ep->num]->doepint);
5487 dev_if->isoc_ep = NULL;
5489 DWC_READ_REG32(&dev_if->out_ep_regs[epnum]->doepctl);
5490 DWC_PRINTF("Before disable DOEPCTL = %08x\n", doepctl.d32);
5491 if (doepctl.b.epena) {
5492 doepctl.b.epdis = 1;
5495 DWC_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepctl,
5499 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n",
5500 "Global OUT NAK Effective\n");
5503 /* Clear interrupt */
5505 gintsts.b.goutnakeff = 1;
5506 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5513 * PCD interrupt handler.
5515 * The PCD handles the device interrupts. Many conditions can cause a
5516 * device interrupt. When an interrupt occurs, the device interrupt
5517 * service routine determines the cause of the interrupt and
5518 * dispatches handling to the appropriate function. These interrupt
5519 * handling functions are described below.
5521 * All interrupt registers are processed from LSB to MSB.
5524 int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd)
5526 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
5528 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
5530 gintsts_data_t gintr_status;
5533 if (dwc_otg_check_haps_status(core_if) == -1) {
5534 DWC_WARN("HAPS is disconnected");
5538 /* Exit from ISR if core is hibernated */
5539 if (core_if->hibernation_suspend == 1) {
5543 DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n",
5545 DWC_READ_REG32(&global_regs->gintsts),
5546 DWC_READ_REG32(&global_regs->gintmsk));
5549 if (dwc_otg_is_device_mode(core_if)) {
5550 DWC_SPINLOCK(pcd->lock);
5552 DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n",
5554 DWC_READ_REG32(&global_regs->gintsts),
5555 DWC_READ_REG32(&global_regs->gintmsk));
5558 gintr_status.d32 = dwc_otg_read_core_intr(core_if);
5560 DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
5561 __func__, gintr_status.d32);
5563 if (gintr_status.b.sofintr) {
5564 retval |= dwc_otg_pcd_handle_sof_intr(pcd);
5566 if (gintr_status.b.rxstsqlvl) {
5568 dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
5570 if (gintr_status.b.nptxfempty) {
5571 retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
5573 if (gintr_status.b.goutnakeff) {
5574 retval |= dwc_otg_pcd_handle_out_nak_effective(pcd);
5576 if (gintr_status.b.i2cintr) {
5577 retval |= dwc_otg_pcd_handle_i2c_intr(pcd);
5579 if (gintr_status.b.erlysuspend) {
5580 retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd);
5582 if (gintr_status.b.usbreset) {
5583 retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd);
5584 pcd->conn_status = -1;
5586 if (gintr_status.b.enumdone) {
5587 retval |= dwc_otg_pcd_handle_enum_done_intr(pcd);
5589 if (gintr_status.b.isooutdrop) {
5591 dwc_otg_pcd_handle_isoc_out_packet_dropped_intr
5594 if (gintr_status.b.eopframe) {
5596 dwc_otg_pcd_handle_end_periodic_frame_intr(pcd);
5598 if (gintr_status.b.inepint) {
5599 if (!core_if->multiproc_int_enable) {
5600 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
5603 if (gintr_status.b.outepintr) {
5604 if (!core_if->multiproc_int_enable) {
5605 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
5608 if (gintr_status.b.epmismatch) {
5609 retval |= dwc_otg_pcd_handle_ep_mismatch_intr(pcd);
5611 if (gintr_status.b.fetsusp) {
5612 retval |= dwc_otg_pcd_handle_ep_fetsusp_intr(pcd);
5614 if (gintr_status.b.ginnakeff) {
5615 retval |= dwc_otg_pcd_handle_in_nak_effective(pcd);
5617 if (gintr_status.b.incomplisoin) {
5619 dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
5621 if (gintr_status.b.incomplisoout) {
5623 dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
5626 /* In MPI mode Device Endpoints interrupts are asserted
5627 * without setting outepintr and inepint bits set, so these
5628 * Interrupt handlers are called without checking these bit-fields
5630 if (core_if->multiproc_int_enable) {
5631 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
5632 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
5635 DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
5636 DWC_READ_REG32(&global_regs->gintsts));
5638 DWC_SPINUNLOCK(pcd->lock);
5643 #endif /* DWC_HOST_ONLY */