1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
35 #include "dwc_otg_pcd.h"
36 #include "dwc_otg_driver.h"
39 #include "dwc_otg_cfi.h"
43 extern void complete_xiso_ep(dwc_otg_pcd_ep_t *ep);
45 /* #define PRINT_CFI_DMA_DESCS */
50 * This function updates OTG.
52 static void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset)
56 pcd->b_hnp_enable = 0;
57 pcd->a_hnp_support = 0;
58 pcd->a_alt_hnp_support = 0;
61 if (pcd->fops->hnp_changed) {
62 pcd->fops->hnp_changed(pcd);
67 * This file contains the implementation of the PCD Interrupt handlers.
69 * The PCD handles the device interrupts. Many conditions can cause a
70 * device interrupt. When an interrupt occurs, the device interrupt
71 * service routine determines the cause of the interrupt and
72 * dispatches handling to the appropriate function. These interrupt
73 * handling functions are described below.
74 * All interrupt registers are processed from LSB to MSB.
78 * This function prints the ep0 state for debug purposes.
80 static inline void print_ep0_state(dwc_otg_pcd_t *pcd)
85 switch (pcd->ep0state) {
87 dwc_strcpy(str, "EP0_DISCONNECT");
90 dwc_strcpy(str, "EP0_IDLE");
92 case EP0_IN_DATA_PHASE:
93 dwc_strcpy(str, "EP0_IN_DATA_PHASE");
95 case EP0_OUT_DATA_PHASE:
96 dwc_strcpy(str, "EP0_OUT_DATA_PHASE");
98 case EP0_IN_STATUS_PHASE:
99 dwc_strcpy(str, "EP0_IN_STATUS_PHASE");
101 case EP0_OUT_STATUS_PHASE:
102 dwc_strcpy(str, "EP0_OUT_STATUS_PHASE");
105 dwc_strcpy(str, "EP0_STALL");
108 dwc_strcpy(str, "EP0_INVALID");
111 DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
116 * This function calculate the size of the payload in the memory
117 * for out endpoints and prints size for debug purposes(used in
118 * 2.93a DevOutNak feature).
120 static inline void print_memory_payload(dwc_otg_pcd_t *pcd, dwc_ep_t *ep)
123 deptsiz_data_t deptsiz_init = {.d32 = 0 };
124 deptsiz_data_t deptsiz_updt = {.d32 = 0 };
128 deptsiz_init.d32 = pcd->core_if->start_doeptsiz_val[ep->num];
130 DWC_READ_REG32(&pcd->core_if->dev_if->
131 out_ep_regs[ep->num]->doeptsiz);
132 /* Payload will be */
133 payload = deptsiz_init.b.xfersize - deptsiz_updt.b.xfersize;
134 /* Packet count is decremented every time a packet
135 * is written to the RxFIFO not in to the external memory
136 * So, if payload == 0, then it means no packet was sent to ext memory*/
138 (!payload) ? 0 : (deptsiz_init.b.pktcnt - deptsiz_updt.b.pktcnt);
139 DWC_DEBUGPL(DBG_PCDV, "Payload for EP%d-%s\n", ep->num,
140 (ep->is_in ? "IN" : "OUT"));
141 DWC_DEBUGPL(DBG_PCDV, "Number of transfered bytes = 0x%08x\n", payload);
142 DWC_DEBUGPL(DBG_PCDV, "Number of transfered packets = %d\n", pack_num);
147 static inline void print_desc(struct dwc_otg_dma_desc *ddesc,
148 const uint8_t *epname, int descnum)
151 ("%s DMA_DESC(%d) buf=0x%08x bytes=0x%04x; sp=0x%x; l=0x%x; sts=0x%02x; bs=0x%02x\n",
152 epname, descnum, ddesc->buf, ddesc->status.b.bytes,
153 ddesc->status.b.sp, ddesc->status.b.l, ddesc->status.b.sts,
159 * This function returns pointer to in ep struct with number ep_num
161 static inline dwc_otg_pcd_ep_t *get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num)
164 int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
168 for (i = 0; i < num_in_eps; ++i) {
169 if (pcd->in_ep[i].dwc_ep.num == ep_num)
170 return &pcd->in_ep[i];
177 * This function returns pointer to out ep struct with number ep_num
179 static inline dwc_otg_pcd_ep_t *get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num)
182 int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
186 for (i = 0; i < num_out_eps; ++i) {
187 if (pcd->out_ep[i].dwc_ep.num == ep_num)
188 return &pcd->out_ep[i];
195 * This functions gets a pointer to an EP from the wIndex address
196 * value of the control request.
198 dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t *pcd, u16 wIndex)
200 dwc_otg_pcd_ep_t *ep;
201 uint32_t ep_num = UE_GET_ADDR(wIndex);
205 } else if (UE_GET_DIR(wIndex) == UE_DIR_IN) { /* in ep */
206 ep = &pcd->in_ep[ep_num - 1];
208 ep = &pcd->out_ep[ep_num - 1];
215 * This function checks the EP request queue, if the queue is not
216 * empty the next request is started.
218 void start_next_request(dwc_otg_pcd_ep_t *ep)
220 dwc_otg_pcd_request_t *req = 0;
221 uint32_t max_transfer =
222 GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
225 struct dwc_otg_pcd *pcd;
229 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
230 req = DWC_CIRCLEQ_FIRST(&ep->queue);
233 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
234 ep->dwc_ep.cfi_req_len = req->length;
235 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd, ep, req);
238 /* Setup and start the Transfer */
239 if (req->dw_align_buf) {
240 ep->dwc_ep.dma_addr = req->dw_align_buf_dma;
241 ep->dwc_ep.start_xfer_buff = req->dw_align_buf;
242 ep->dwc_ep.xfer_buff = req->dw_align_buf;
244 ep->dwc_ep.dma_addr = req->dma;
245 ep->dwc_ep.start_xfer_buff = req->buf;
246 ep->dwc_ep.xfer_buff = req->buf;
248 ep->dwc_ep.sent_zlp = 0;
249 ep->dwc_ep.total_len = req->length;
250 ep->dwc_ep.xfer_len = 0;
251 ep->dwc_ep.xfer_count = 0;
253 ep->dwc_ep.maxxfer = max_transfer;
254 if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
255 uint32_t out_max_xfer = DDMA_MAX_TRANSFER_SIZE
256 - (DDMA_MAX_TRANSFER_SIZE % 4);
257 if (ep->dwc_ep.is_in) {
258 if (ep->dwc_ep.maxxfer >
259 DDMA_MAX_TRANSFER_SIZE) {
261 DDMA_MAX_TRANSFER_SIZE;
264 if (ep->dwc_ep.maxxfer > out_max_xfer) {
270 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
271 ep->dwc_ep.maxxfer -=
272 (ep->dwc_ep.maxxfer % ep->dwc_ep.maxpacket);
275 if ((ep->dwc_ep.total_len %
276 ep->dwc_ep.maxpacket == 0)
277 && (ep->dwc_ep.total_len != 0)) {
278 ep->dwc_ep.sent_zlp = 1;
285 dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
286 } else if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
287 DWC_PRINTF("There are no more ISOC requests \n");
288 ep->dwc_ep.frame_num = 0xFFFFFFFF;
293 * This function handles the SOF Interrupts. At this time the SOF
294 * Interrupt is disabled.
296 int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd)
298 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
300 gintsts_data_t gintsts;
302 DWC_DEBUGPL(DBG_PCD, "SOF\n");
304 /* Clear interrupt */
306 gintsts.b.sofintr = 1;
307 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
313 * This function handles the Rx Status Queue Level Interrupt, which
314 * indicates that there is a least one packet in the Rx FIFO. The
315 * packets are moved from the FIFO to memory, where they will be
316 * processed when the Endpoint Interrupt Register indicates Transfer
317 * Complete or SETUP Phase Done.
319 * Repeat the following until the Rx Status Queue is empty:
320 * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
322 * -# If Receive FIFO is empty then skip to step Clear the interrupt
324 * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
325 * SETUP data to the buffer
326 * -# If OUT Data Packet call dwc_otg_read_packet to copy the data
327 * to the destination buffer
329 int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd)
331 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
332 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
333 gintmsk_data_t gintmask = {.d32 = 0 };
334 device_grxsts_data_t status;
335 dwc_otg_pcd_ep_t *ep;
336 gintsts_data_t gintsts;
338 static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
341 /* DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); */
342 /* Disable the Rx Status Queue Level interrupt */
343 gintmask.b.rxstsqlvl = 1;
344 DWC_MODIFY_REG32(&global_regs->gintmsk, gintmask.d32, 0);
346 /* Get the Status from the top of the FIFO */
347 status.d32 = DWC_READ_REG32(&global_regs->grxstsp);
349 DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
350 "pktsts:%x Frame:%d(0x%0x)\n",
351 status.b.epnum, status.b.bcnt,
352 dpid_str[status.b.dpid],
353 status.b.pktsts, status.b.fn, status.b.fn);
354 /* Get pointer to EP structure */
355 ep = get_out_ep(pcd, status.b.epnum);
357 switch (status.b.pktsts) {
358 case DWC_DSTS_GOUT_NAK:
359 DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
361 case DWC_STS_DATA_UPDT:
362 DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
363 if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
364 /** @todo NGS Check for buffer overflow? */
365 dwc_otg_read_packet(core_if,
366 ep->dwc_ep.xfer_buff,
368 ep->dwc_ep.xfer_count += status.b.bcnt;
369 ep->dwc_ep.xfer_buff += status.b.bcnt;
372 case DWC_STS_XFER_COMP:
373 DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
375 case DWC_DSTS_SETUP_COMP:
377 DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
380 case DWC_DSTS_SETUP_UPDT:
381 dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
384 "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
385 pcd->setup_pkt->req.bmRequestType,
386 pcd->setup_pkt->req.bRequest,
387 UGETW(pcd->setup_pkt->req.wValue),
388 UGETW(pcd->setup_pkt->req.wIndex),
389 UGETW(pcd->setup_pkt->req.wLength));
391 ep->dwc_ep.xfer_count += status.b.bcnt;
394 DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
399 /* Enable the Rx Status Queue Level interrupt */
400 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmask.d32);
401 /* Clear interrupt */
403 gintsts.b.rxstsqlvl = 1;
404 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
406 /* DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); */
411 * This function examines the Device IN Token Learning Queue to
412 * determine the EP number of the last IN token received. This
413 * implementation is for the Mass Storage device where there are only
414 * 2 IN EPs (Control-IN and BULK-IN).
416 * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
417 * are 8 EP Numbers in each of the other possible DTKNQ Registers.
419 * @param core_if Programming view of DWC_otg controller.
422 static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if)
424 dwc_otg_device_global_regs_t *dev_global_regs =
425 core_if->dev_if->dev_global_regs;
426 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
427 /* Number of Token Queue Registers */
428 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
429 dtknq1_data_t dtknqr1;
430 uint32_t in_tkn_epnums[4];
433 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
436 /* DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); */
438 /* Read the DTKNQ Registers */
439 for (i = 0; i < DTKNQ_REG_CNT; i++) {
440 in_tkn_epnums[i] = DWC_READ_REG32(addr);
441 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
443 if (addr == &dev_global_regs->dvbusdis) {
444 addr = &dev_global_regs->dtknqr3_dthrctl;
451 /* Copy the DTKNQR1 data to the bit field. */
452 dtknqr1.d32 = in_tkn_epnums[0];
453 /* Get the EP numbers */
454 in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
455 ndx = dtknqr1.b.intknwptr - 1;
457 /* DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); */
459 /** @todo Find a simpler way to calculate the max
461 int cnt = TOKEN_Q_DEPTH;
462 if (TOKEN_Q_DEPTH <= 6) {
463 cnt = TOKEN_Q_DEPTH - 1;
464 } else if (TOKEN_Q_DEPTH <= 14) {
465 cnt = TOKEN_Q_DEPTH - 7;
466 } else if (TOKEN_Q_DEPTH <= 22) {
467 cnt = TOKEN_Q_DEPTH - 15;
469 cnt = TOKEN_Q_DEPTH - 23;
471 epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
474 epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
475 } else if (ndx <= 13) {
477 epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
478 } else if (ndx <= 21) {
480 epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
481 } else if (ndx <= 29) {
483 epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
486 /* DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); */
491 * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
492 * The active request is checked for the next packet to be loaded into
493 * the non-periodic Tx FIFO.
495 int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd)
497 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
498 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
499 dwc_otg_dev_in_ep_regs_t *ep_regs;
500 gnptxsts_data_t txstatus = {.d32 = 0 };
501 gintsts_data_t gintsts;
504 dwc_otg_pcd_ep_t *ep = 0;
508 /* Get the epnum from the IN Token Learning Queue. */
509 epnum = get_ep_of_last_in_token(core_if);
510 ep = get_in_ep(pcd, epnum);
512 DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %d \n", epnum);
514 ep_regs = core_if->dev_if->in_ep_regs[epnum];
516 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
517 if (len > ep->dwc_ep.maxpacket) {
518 len = ep->dwc_ep.maxpacket;
520 dwords = (len + 3) / 4;
522 /* While there is space in the queue and space in the FIFO and
523 * More data to tranfer, Write packets to the Tx FIFO */
524 txstatus.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
525 DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
527 while (txstatus.b.nptxqspcavail > 0 &&
528 txstatus.b.nptxfspcavail > dwords &&
529 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
531 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
532 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
534 if (len > ep->dwc_ep.maxpacket) {
535 len = ep->dwc_ep.maxpacket;
538 dwords = (len + 3) / 4;
539 txstatus.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
540 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
543 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
544 DWC_READ_REG32(&global_regs->gnptxsts));
546 /* Clear interrupt */
548 gintsts.b.nptxfempty = 1;
549 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
555 * This function is called when dedicated Tx FIFO Empty interrupt occurs.
556 * The active request is checked for the next packet to be loaded into
557 * apropriate Tx FIFO.
559 static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum)
561 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
562 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
563 dwc_otg_dev_in_ep_regs_t *ep_regs;
564 dtxfsts_data_t txstatus = {.d32 = 0 };
565 dwc_otg_pcd_ep_t *ep = 0;
569 ep = get_in_ep(pcd, epnum);
571 DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
573 ep_regs = core_if->dev_if->in_ep_regs[epnum];
575 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
577 if (len > ep->dwc_ep.maxpacket) {
578 len = ep->dwc_ep.maxpacket;
581 dwords = (len + 3) / 4;
583 /* While there is space in the queue and space in the FIFO and
584 * More data to tranfer, Write packets to the Tx FIFO */
585 txstatus.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
586 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
588 while (txstatus.b.txfspcavail > dwords &&
589 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len &&
590 ep->dwc_ep.xfer_len != 0) {
592 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
594 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
595 if (len > ep->dwc_ep.maxpacket) {
596 len = ep->dwc_ep.maxpacket;
599 dwords = (len + 3) / 4;
601 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
602 DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
606 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
607 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts));
613 * This function is called when the Device is disconnected. It stops
614 * any active requests and informs the Gadget driver of the
617 void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd)
619 int i, num_in_eps, num_out_eps;
620 dwc_otg_pcd_ep_t *ep;
621 gintmsk_data_t intr_mask = {.d32 = 0 };
623 DWC_SPINLOCK(pcd->lock);
625 num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
626 num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
628 DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
629 /* don't disconnect drivers more than once */
630 if (pcd->ep0state == EP0_DISCONNECT) {
631 DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
632 DWC_SPINUNLOCK(pcd->lock);
635 pcd->ep0state = EP0_DISCONNECT;
637 /* Reset the OTG state. */
638 dwc_otg_pcd_update_otg(pcd, 1);
640 /* Disable the NP Tx Fifo Empty Interrupt. */
641 intr_mask.b.nptxfempty = 1;
642 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
645 /* Flush the FIFOs */
646 /**@todo NGS Flush Periodic FIFOs */
647 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
648 dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd));
650 /* prevent new request submissions, kill any outstanding requests */
652 dwc_otg_request_nuke(ep);
653 /* prevent new request submissions, kill any outstanding requests */
654 for (i = 0; i < num_in_eps; i++) {
655 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i];
656 dwc_otg_request_nuke(ep);
658 /* prevent new request submissions, kill any outstanding requests */
659 for (i = 0; i < num_out_eps; i++) {
660 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i];
661 dwc_otg_request_nuke(ep);
664 /* report disconnect; the driver is already quiesced */
665 if (pcd->fops->disconnect) {
666 DWC_SPINUNLOCK(pcd->lock);
667 pcd->fops->disconnect(pcd);
668 DWC_SPINLOCK(pcd->lock);
670 DWC_SPINUNLOCK(pcd->lock);
674 * This interrupt indicates that ...
676 int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd)
678 gintmsk_data_t intr_mask = {.d32 = 0 };
679 gintsts_data_t gintsts;
681 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "i2cintr");
682 intr_mask.b.i2cintr = 1;
683 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
686 /* Clear interrupt */
688 gintsts.b.i2cintr = 1;
689 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
695 * This interrupt indicates that ...
697 int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd)
699 gintsts_data_t gintsts;
701 DWC_PRINTF("Early Suspend Detected\n");
704 /* Clear interrupt */
706 gintsts.b.erlysuspend = 1;
707 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
713 * This function configures EPO to receive SETUP packets.
715 * @todo NGS: Update the comments from the HW FS.
717 * -# Program the following fields in the endpoint specific registers
718 * for Control OUT EP 0, in order to receive a setup packet
719 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
721 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
722 * to back setup packets)
723 * - In DMA mode, DOEPDMA0 Register with a memory address to
724 * store any setup packets received
726 * @param core_if Programming view of DWC_otg controller.
727 * @param pcd Programming view of the PCD.
729 static inline void ep0_out_start(dwc_otg_core_if_t *core_if,
732 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
733 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
734 dwc_otg_dev_dma_desc_t *dma_desc;
735 depctl_data_t doepctl = {.d32 = 0 };
738 DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
739 DWC_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
741 if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
742 doepctl.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[0]->doepctl);
743 if (doepctl.b.epena) {
748 doeptsize0.b.supcnt = 1;
749 doeptsize0.b.pktcnt = 1;
750 doeptsize0.b.xfersize = 8 * 3;
752 if (core_if->dma_enable) {
753 if (!core_if->dma_desc_enable) {
754 /** put here as for Hermes mode deptisz register should not be written */
755 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
758 /** @todo dma needs to handle multiple setup packets (up to 3) */
759 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
760 pcd->setup_pkt_dma_handle);
762 dev_if->setup_desc_index =
763 (dev_if->setup_desc_index + 1) & 1;
765 dev_if->setup_desc_addr[dev_if->setup_desc_index];
767 /** DMA Descriptor Setup */
768 dma_desc->status.b.bs = BS_HOST_BUSY;
769 if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
770 dma_desc->status.b.sr = 0;
771 dma_desc->status.b.mtrf = 0;
773 dma_desc->status.b.l = 1;
774 dma_desc->status.b.ioc = 1;
775 dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket;
776 dma_desc->buf = pcd->setup_pkt_dma_handle;
777 dma_desc->status.b.sts = 0;
778 dma_desc->status.b.bs = BS_HOST_READY;
780 /** DOEPDMA0 Register write */
781 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
782 dev_if->dma_setup_desc_addr
783 [dev_if->setup_desc_index]);
787 /** put here as for Hermes mode deptisz register should not be written */
788 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
792 /** DOEPCTL0 Register write cnak will be set after setup interrupt */
795 if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
797 DWC_WRITE_REG32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
799 DWC_MODIFY_REG32(&dev_if->out_ep_regs[0]->doepctl, 0,
804 DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
805 DWC_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
806 DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
807 DWC_READ_REG32(&dev_if->in_ep_regs[0]->diepctl));
812 * This interrupt occurs when a USB Reset is detected. When the USB
813 * Reset Interrupt occurs the device state is set to DEFAULT and the
814 * EP0 state is set to IDLE.
815 * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
816 * -# Unmask the following interrupt bits
817 * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
818 * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
819 * - DOEPMSK.SETUP = 1
820 * - DOEPMSK.XferCompl = 1
821 * - DIEPMSK.XferCompl = 1
822 * - DIEPMSK.TimeOut = 1
823 * -# Program the following fields in the endpoint specific registers
824 * for Control OUT EP 0, in order to receive a setup packet
825 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
827 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
828 * to back setup packets)
829 * - In DMA mode, DOEPDMA0 Register with a memory address to
830 * store any setup packets received
831 * At this point, all the required initialization, except for enabling
832 * the control 0 OUT endpoint is done, for receiving SETUP packets.
834 int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t *pcd)
836 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
837 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
838 depctl_data_t doepctl = {.d32 = 0 };
839 depctl_data_t diepctl = {.d32 = 0 };
840 daint_data_t daintmsk = {.d32 = 0 };
841 doepmsk_data_t doepmsk = {.d32 = 0 };
842 diepmsk_data_t diepmsk = {.d32 = 0 };
843 dcfg_data_t dcfg = {.d32 = 0 };
844 grstctl_t resetctl = {.d32 = 0 };
845 dctl_data_t dctl = {.d32 = 0 };
847 gintsts_data_t gintsts;
848 pcgcctl_data_t power = {.d32 = 0 };
850 power.d32 = DWC_READ_REG32(core_if->pcgcctl);
851 if (power.b.stoppclk) {
853 power.b.stoppclk = 1;
854 DWC_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
857 DWC_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
859 power.b.rstpdwnmodule = 1;
860 DWC_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
863 core_if->lx_state = DWC_OTG_L0;
864 core_if->otg_sts = 0;
866 DWC_PRINTF("USB RESET\n");
868 for (i = 1; i < 16; ++i) {
869 dwc_otg_pcd_ep_t *ep;
871 ep = get_in_ep(pcd, i);
873 dwc_ep = &ep->dwc_ep;
874 dwc_ep->next_frame = 0xffffffff;
877 #endif /* DWC_EN_ISOC */
879 /* reset the HNP settings */
880 dwc_otg_pcd_update_otg(pcd, 1);
882 /* Clear the Remote Wakeup Signalling */
883 dctl.b.rmtwkupsig = 1;
884 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
886 /* Set NAK for all OUT EPs */
888 for (i = 0; i <= dev_if->num_out_eps; i++) {
889 DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
892 /* Flush the NP Tx FIFO */
893 dwc_otg_flush_tx_fifo(core_if, 0x10);
894 /* Flush the Learning Queue */
895 resetctl.b.intknqflsh = 1;
896 DWC_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
898 if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
899 core_if->start_predict = 0;
900 for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
901 core_if->nextep_seq[i] = 0xff; /*0xff - EP not active */
903 core_if->nextep_seq[0] = 0;
904 core_if->first_in_nextep_seq = 0;
905 diepctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
906 diepctl.b.nextep = 0;
907 DWC_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
909 /* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
910 dcfg.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dcfg);
912 DWC_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
914 DWC_DEBUGPL(DBG_PCDV,
915 "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
916 __func__, core_if->first_in_nextep_seq);
917 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
918 DWC_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
922 if (core_if->multiproc_int_enable) {
923 daintmsk.b.inep0 = 1;
924 daintmsk.b.outep0 = 1;
925 DWC_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk,
929 doepmsk.b.xfercompl = 1;
930 doepmsk.b.ahberr = 1;
931 doepmsk.b.epdisabled = 1;
933 if ((core_if->dma_desc_enable) ||
935 && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
936 doepmsk.b.stsphsercvd = 1;
938 if (core_if->dma_desc_enable)
941 doepmsk.b.babble = 1;
944 if (core_if->dma_enable) {
948 DWC_WRITE_REG32(&dev_if->dev_global_regs->doepeachintmsk[0],
951 diepmsk.b.xfercompl = 1;
952 diepmsk.b.timeout = 1;
953 diepmsk.b.epdisabled = 1;
954 diepmsk.b.ahberr = 1;
955 diepmsk.b.intknepmis = 1;
956 if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
957 diepmsk.b.intknepmis = 0;
959 /* if (core_if->dma_desc_enable) {
964 if (core_if->dma_enable) {
968 DWC_WRITE_REG32(&dev_if->dev_global_regs->diepeachintmsk[0],
971 daintmsk.b.inep0 = 1;
972 daintmsk.b.outep0 = 1;
973 DWC_WRITE_REG32(&dev_if->dev_global_regs->daintmsk,
977 doepmsk.b.xfercompl = 1;
978 doepmsk.b.ahberr = 1;
979 doepmsk.b.epdisabled = 1;
981 if ((core_if->dma_desc_enable) ||
983 && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
984 doepmsk.b.stsphsercvd = 1;
986 if (core_if->dma_desc_enable)
988 DWC_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
990 diepmsk.b.xfercompl = 1;
991 diepmsk.b.timeout = 1;
992 diepmsk.b.epdisabled = 1;
993 diepmsk.b.ahberr = 1;
994 if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
995 diepmsk.b.intknepmis = 0;
997 if (core_if->dma_desc_enable) {
1002 DWC_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
1005 /* Reset Device Address */
1006 dcfg.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dcfg);
1008 DWC_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
1010 /* setup EP0 to receive SETUP packets */
1011 if (core_if->snpsid <= OTG_CORE_REV_2_94a)
1012 ep0_out_start(core_if, pcd);
1014 /* Clear interrupt */
1016 gintsts.b.usbreset = 1;
1017 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
1023 * Get the device speed from the device status register and convert it
1024 * to USB speed constant.
1026 * @param core_if Programming view of DWC_otg controller.
1028 static int get_device_speed(dwc_otg_core_if_t *core_if)
1032 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
1034 switch (dsts.b.enumspd) {
1035 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
1036 speed = USB_SPEED_HIGH;
1038 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
1039 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
1040 speed = USB_SPEED_FULL;
1043 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
1044 speed = USB_SPEED_LOW;
1052 * Read the device status register and set the device speed in the
1054 * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
1056 int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd)
1058 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1059 gintsts_data_t gintsts;
1060 gusbcfg_data_t gusbcfg;
1061 dwc_otg_core_global_regs_t *global_regs =
1062 GET_CORE_IF(pcd)->core_global_regs;
1063 uint8_t utmi16b, utmi8b;
1066 DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
1068 if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_2_60a) {
1069 utmi16b = 5; /* vahrama old value was 6; */
1075 dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep);
1076 if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a) {
1077 ep0_out_start(GET_CORE_IF(pcd), pcd);
1080 print_ep0_state(pcd);
1083 if (pcd->ep0state == EP0_DISCONNECT) {
1084 pcd->ep0state = EP0_IDLE;
1085 } else if (pcd->ep0state == EP0_STALL) {
1086 pcd->ep0state = EP0_IDLE;
1089 pcd->ep0state = EP0_IDLE;
1093 speed = get_device_speed(GET_CORE_IF(pcd));
1094 pcd->fops->connect(pcd, speed);
1096 /* Set USB turnaround time based on device speed and PHY interface. */
1097 gusbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
1098 if (speed == USB_SPEED_HIGH) {
1099 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1100 DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
1101 /* ULPI interface */
1102 gusbcfg.b.usbtrdtim = 9;
1104 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1105 DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
1106 /* UTMI+ interface */
1107 if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
1108 gusbcfg.b.usbtrdtim = utmi8b;
1109 } else if (GET_CORE_IF(pcd)->hwcfg4.
1110 b.utmi_phy_data_width == 1) {
1111 gusbcfg.b.usbtrdtim = utmi16b;
1112 } else if (GET_CORE_IF(pcd)->
1113 core_params->phy_utmi_width == 8) {
1114 gusbcfg.b.usbtrdtim = utmi8b;
1116 gusbcfg.b.usbtrdtim = utmi16b;
1119 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1120 DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
1121 /* UTMI+ OR ULPI interface */
1122 if (gusbcfg.b.ulpi_utmi_sel == 1) {
1123 /* ULPI interface */
1124 gusbcfg.b.usbtrdtim = 9;
1126 /* UTMI+ interface */
1127 if (GET_CORE_IF(pcd)->
1128 core_params->phy_utmi_width == 16) {
1129 gusbcfg.b.usbtrdtim = utmi16b;
1131 gusbcfg.b.usbtrdtim = utmi8b;
1136 /* Full or low speed */
1137 gusbcfg.b.usbtrdtim = 9;
1139 DWC_WRITE_REG32(&global_regs->gusbcfg, gusbcfg.d32);
1141 /* Clear interrupt */
1143 gintsts.b.enumdone = 1;
1144 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1150 * This interrupt indicates that the ISO OUT Packet was dropped due to
1151 * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
1152 * read all the data from the Rx FIFO.
1154 int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd)
1156 gintmsk_data_t intr_mask = {.d32 = 0 };
1157 gintsts_data_t gintsts;
1159 DWC_WARN("INTERRUPT Handler not implemented for %s\n",
1160 "ISOC Out Dropped");
1162 intr_mask.b.isooutdrop = 1;
1163 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1166 /* Clear interrupt */
1168 gintsts.b.isooutdrop = 1;
1169 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1176 * This interrupt indicates the end of the portion of the micro-frame
1177 * for periodic transactions. If there is a periodic transaction for
1178 * the next frame, load the packets into the EP periodic Tx FIFO.
1180 int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd)
1182 gintmsk_data_t intr_mask = {.d32 = 0 };
1183 gintsts_data_t gintsts;
1184 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "EOP");
1186 intr_mask.b.eopframe = 1;
1187 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1190 /* Clear interrupt */
1192 gintsts.b.eopframe = 1;
1193 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1200 * This interrupt indicates that EP of the packet on the top of the
1201 * non-periodic Tx FIFO does not match EP of the IN Token received.
1203 * The "Device IN Token Queue" Registers are read to determine the
1204 * order the IN Tokens have been received. The non-periodic Tx FIFO
1205 * is flushed, so it can be reloaded in the order seen in the IN Token
1208 int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_pcd_t *pcd)
1210 gintsts_data_t gintsts;
1211 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1213 gintmsk_data_t intr_mask = {.d32 = 0 };
1215 if (!core_if->en_multiple_tx_fifo && core_if->dma_enable) {
1216 core_if->start_predict = 1;
1218 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1221 DWC_READ_REG32(&core_if->core_global_regs->gintsts);
1222 if (!gintsts.b.ginnakeff) {
1223 /* Disable EP Mismatch interrupt */
1225 intr_mask.b.epmismatch = 1;
1226 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
1228 /* Enable the Global IN NAK Effective Interrupt */
1230 intr_mask.b.ginnakeff = 1;
1231 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
1233 /* Set the global non-periodic IN NAK handshake */
1235 DWC_READ_REG32(&core_if->dev_if->
1236 dev_global_regs->dctl);
1237 dctl.b.sgnpinnak = 1;
1238 DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
1242 ("gintsts.b.ginnakeff = 1! dctl.b.sgnpinnak not set\n");
1244 /* Disabling of all EP's will be done in dwc_otg_pcd_handle_in_nak_effective()
1245 * handler after Global IN NAK Effective interrupt will be asserted */
1247 /* Clear interrupt */
1249 gintsts.b.epmismatch = 1;
1250 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
1256 * This interrupt is valid only in DMA mode. This interrupt indicates that the
1257 * core has stopped fetching data for IN endpoints due to the unavailability of
1258 * TxFIFO space or Request Queue space. This interrupt is used by the
1259 * application for an endpoint mismatch algorithm.
1261 * @param pcd The PCD
1263 int32_t dwc_otg_pcd_handle_ep_fetsusp_intr(dwc_otg_pcd_t *pcd)
1265 gintsts_data_t gintsts;
1266 gintmsk_data_t gintmsk_data;
1268 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1269 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1271 /* Clear the global non-periodic IN NAK handshake */
1273 dctl.b.cgnpinnak = 1;
1274 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,
1277 /* Mask GINTSTS.FETSUSP interrupt */
1278 gintmsk_data.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
1279 gintmsk_data.b.fetsusp = 0;
1280 DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk_data.d32);
1282 /* Clear interrupt */
1284 gintsts.b.fetsusp = 1;
1285 DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
1291 * This funcion stalls EP0.
1293 static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val)
1295 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1296 usb_device_request_t *ctrl = &pcd->setup_pkt->req;
1297 DWC_WARN("req %02x.%02x protocol STALL; err %d\n",
1298 ctrl->bmRequestType, ctrl->bRequest, err_val);
1300 ep0->dwc_ep.is_in = 1;
1301 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->dwc_ep);
1302 ep0->dwc_ep.is_in = 0;
1303 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->dwc_ep);
1304 pcd->ep0.stopped = 1;
1305 pcd->ep0state = EP0_IDLE;
1306 ep0_out_start(GET_CORE_IF(pcd), pcd);
1310 * This functions delegates the setup command to the gadget driver.
1312 static inline void do_gadget_setup(dwc_otg_pcd_t *pcd,
1313 usb_device_request_t *ctrl)
1316 DWC_SPINUNLOCK(pcd->lock);
1317 ret = pcd->fops->setup(pcd, (uint8_t *) ctrl);
1318 if (spin_is_locked((spinlock_t *) pcd->lock))
1319 DWC_WARN("%s warning: pcd->lock locked without unlock\n",
1321 DWC_SPINLOCK(pcd->lock);
1323 ep0_do_stall(pcd, ret);
1326 /** @todo This is a g_file_storage gadget driver specific
1327 * workaround: a DELAYED_STATUS result from the fsg_setup
1328 * routine will result in the gadget queueing a EP0 IN status
1329 * phase for a two-stage control transfer. Exactly the same as
1330 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
1331 * specific request. Need a generic way to know when the gadget
1332 * driver will queue the status phase. Can we assume when we
1333 * call the gadget driver setup() function that it will always
1334 * queue and require the following flag? Need to look into
1338 if (ret == 256 + 999) {
1339 pcd->request_config = 1;
1345 * This functions delegates the CFI setup commands to the gadget driver.
1346 * This function will return a negative value to indicate a failure.
1348 static inline int cfi_gadget_setup(dwc_otg_pcd_t *pcd,
1349 struct cfi_usb_ctrlrequest *ctrl_req)
1353 if (pcd->fops && pcd->fops->cfi_setup) {
1354 DWC_SPINUNLOCK(pcd->lock);
1355 ret = pcd->fops->cfi_setup(pcd, ctrl_req);
1356 DWC_SPINLOCK(pcd->lock);
1358 ep0_do_stall(pcd, ret);
1368 * This function starts the Zero-Length Packet for the IN status phase
1369 * of a 2 stage control transfer.
1371 static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd)
1373 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1374 if (pcd->ep0state == EP0_STALL) {
1378 pcd->ep0state = EP0_IN_STATUS_PHASE;
1380 /* Prepare for more SETUP Packets */
1381 DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
1382 if ((GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a)
1383 && (pcd->core_if->dma_desc_enable)
1384 && (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len)) {
1385 DWC_DEBUGPL(DBG_PCDV,
1386 "Data terminated wait next packet in out_desc_addr\n");
1387 pcd->backup_buf = phys_to_virt(ep0->dwc_ep.dma_addr);
1388 pcd->data_terminated = 1;
1390 ep0->dwc_ep.xfer_len = 0;
1391 ep0->dwc_ep.xfer_count = 0;
1392 ep0->dwc_ep.is_in = 1;
1393 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1394 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1396 /* Prepare for more SETUP Packets */
1397 /* ep0_out_start(GET_CORE_IF(pcd), pcd); */
1401 * This function starts the Zero-Length Packet for the OUT status phase
1402 * of a 2 stage control transfer.
1404 static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd)
1406 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1407 if (pcd->ep0state == EP0_STALL) {
1408 DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
1411 pcd->ep0state = EP0_OUT_STATUS_PHASE;
1413 DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
1414 ep0->dwc_ep.xfer_len = 0;
1415 ep0->dwc_ep.xfer_count = 0;
1416 ep0->dwc_ep.is_in = 0;
1417 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1418 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1420 /* Prepare for more SETUP Packets */
1421 if (GET_CORE_IF(pcd)->dma_enable == 0) {
1422 ep0_out_start(GET_CORE_IF(pcd), pcd);
1427 * Clear the EP halt (STALL) and if pending requests start the
1430 static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep)
1432 if (ep->dwc_ep.stall_clear_flag) {
1433 /* Start Control Status Phase */
1434 do_setup_in_status_phase(pcd);
1438 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1440 /* Reactive the EP */
1441 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1444 /* If there is a request in the EP queue start it */
1446 /** @todo FIXME: this causes an EP mismatch in DMA mode.
1447 * epmismatch not yet implemented. */
1450 * Above fixme is solved by implmenting a tasklet to call the
1451 * start_next_request(), outside of interrupt context at some
1452 * time after the current time, after a clear-halt setup packet.
1453 * Still need to implement ep mismatch in the future if a gadget
1454 * ever uses more than one endpoint at once
1457 DWC_TASK_SCHEDULE(pcd->start_xfer_tasklet);
1459 /* Start Control Status Phase */
1460 do_setup_in_status_phase(pcd);
1464 * This function is called when the SET_FEATURE TEST_MODE Setup packet
1465 * is sent from the host. The Device Control register is written with
1466 * the Test Mode bits set to the specified Test Mode. This is done as
1467 * a tasklet so that the "Status" phase of the control transfer
1468 * completes before transmitting the TEST packets.
1470 * @todo This has not been tested since the tasklet struct was put
1471 * into the PCD struct!
1474 void do_test_mode(void *data)
1477 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1478 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1479 int test_mode = pcd->test_mode;
1481 /* DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); */
1483 dctl.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
1484 switch (test_mode) {
1485 case 1: /* TEST_J */
1489 case 2: /* TEST_K */
1493 case 3: /* TEST_SE0_NAK */
1497 case 4: /* TEST_PACKET */
1501 case 5: /* TEST_FORCE_ENABLE */
1505 dwc_otg_set_hnpreq(core_if, 1);
1507 DWC_PRINTF("test mode = %d\n", test_mode);
1508 core_if->test_mode = test_mode;
1509 DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
1513 * This function process the GET_STATUS Setup Commands.
1515 static inline void do_get_status(dwc_otg_pcd_t *pcd)
1517 usb_device_request_t ctrl = pcd->setup_pkt->req;
1518 dwc_otg_pcd_ep_t *ep;
1519 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1520 uint16_t *status = pcd->status_buf;
1521 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1524 DWC_DEBUGPL(DBG_PCD,
1525 "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
1526 ctrl.bmRequestType, ctrl.bRequest,
1527 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1528 UGETW(ctrl.wLength));
1531 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1533 if (UGETW(ctrl.wIndex) == 0xF000) { /* OTG Status selector */
1534 DWC_PRINTF("wIndex - %d\n", UGETW(ctrl.wIndex));
1535 DWC_PRINTF("OTG VERSION - %d\n", core_if->otg_ver);
1536 DWC_PRINTF("OTG CAP - %d, %d\n",
1537 core_if->core_params->otg_cap,
1538 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
1539 if (core_if->otg_ver == 1
1540 && core_if->core_params->otg_cap ==
1541 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1542 uint8_t *otgsts = (uint8_t *) pcd->status_buf;
1543 *otgsts = (core_if->otg_sts & 0x1);
1544 pcd->ep0_pending = 1;
1545 ep0->dwc_ep.start_xfer_buff =
1547 ep0->dwc_ep.xfer_buff = (uint8_t *) otgsts;
1548 ep0->dwc_ep.dma_addr =
1549 pcd->status_buf_dma_handle;
1550 ep0->dwc_ep.xfer_len = 1;
1551 ep0->dwc_ep.xfer_count = 0;
1552 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1553 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
1557 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1562 *status = 0x1; /* Self powered */
1563 *status |= pcd->remote_wakeup_enable << 1;
1571 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1572 if (ep == 0 || UGETW(ctrl.wLength) > 2) {
1573 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1576 /** @todo check for EP stall */
1577 *status = ep->stopped;
1580 pcd->ep0_pending = 1;
1581 ep0->dwc_ep.start_xfer_buff = (uint8_t *) status;
1582 ep0->dwc_ep.xfer_buff = (uint8_t *) status;
1583 ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle;
1584 ep0->dwc_ep.xfer_len = 2;
1585 ep0->dwc_ep.xfer_count = 0;
1586 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1587 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1591 * This function process the SET_FEATURE Setup Commands.
1593 static inline void do_set_feature(dwc_otg_pcd_t *pcd)
1595 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1596 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1597 usb_device_request_t ctrl = pcd->setup_pkt->req;
1598 dwc_otg_pcd_ep_t *ep = 0;
1599 int32_t otg_cap_param = core_if->core_params->otg_cap;
1600 gotgctl_data_t gotgctl = {.d32 = 0 };
1602 DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1603 ctrl.bmRequestType, ctrl.bRequest,
1604 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1605 UGETW(ctrl.wLength));
1606 DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
1608 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1610 switch (UGETW(ctrl.wValue)) {
1611 case UF_DEVICE_REMOTE_WAKEUP:
1612 pcd->remote_wakeup_enable = 1;
1616 /* Setup the Test Mode tasklet to do the Test
1617 * Packet generation after the SETUP Status
1618 * phase has completed. */
1620 /** @todo This has not been tested since the
1621 * tasklet struct was put into the PCD
1623 pcd->test_mode = UGETW(ctrl.wIndex) >> 8;
1624 DWC_TASK_SCHEDULE(pcd->test_mode_tasklet);
1627 case UF_DEVICE_B_HNP_ENABLE:
1628 DWC_DEBUGPL(DBG_PCDV,
1629 "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
1631 /* dev may initiate HNP */
1632 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1633 gotgctl.b.devhnpen = 1;
1634 if (core_if->otg_ver == 1)
1635 DWC_MODIFY_REG32(&global_regs->gotgctl,
1638 pcd->b_hnp_enable = 1;
1639 dwc_otg_pcd_update_otg(pcd, 0);
1640 DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
1641 /**@todo Is the gotgctl.devhnpen cleared
1642 * by a USB Reset? */
1643 gotgctl.b.hnpreq = 1;
1644 DWC_WRITE_REG32(&global_regs->gotgctl,
1648 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1653 case UF_DEVICE_A_HNP_SUPPORT:
1654 /* RH port supports HNP */
1655 DWC_DEBUGPL(DBG_PCDV,
1656 "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
1657 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1658 pcd->a_hnp_support = 1;
1659 dwc_otg_pcd_update_otg(pcd, 0);
1661 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1666 case UF_DEVICE_A_ALT_HNP_SUPPORT:
1667 /* other RH port does */
1668 DWC_DEBUGPL(DBG_PCDV,
1669 "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
1670 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1671 pcd->a_alt_hnp_support = 1;
1672 dwc_otg_pcd_update_otg(pcd, 0);
1674 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1680 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1684 do_setup_in_status_phase(pcd);
1688 do_gadget_setup(pcd, &ctrl);
1692 if (UGETW(ctrl.wValue) == UF_ENDPOINT_HALT) {
1693 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1695 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1699 dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
1701 do_setup_in_status_phase(pcd);
1707 * This function process the CLEAR_FEATURE Setup Commands.
1709 static inline void do_clear_feature(dwc_otg_pcd_t *pcd)
1711 usb_device_request_t ctrl = pcd->setup_pkt->req;
1712 dwc_otg_pcd_ep_t *ep = 0;
1714 DWC_DEBUGPL(DBG_PCD,
1715 "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1716 ctrl.bmRequestType, ctrl.bRequest,
1717 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1718 UGETW(ctrl.wLength));
1720 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1722 switch (UGETW(ctrl.wValue)) {
1723 case UF_DEVICE_REMOTE_WAKEUP:
1724 pcd->remote_wakeup_enable = 0;
1728 /** @todo Add CLEAR_FEATURE for TEST modes. */
1732 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1735 do_setup_in_status_phase(pcd);
1739 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1741 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1745 pcd_clear_halt(pcd, ep);
1752 * This function process the SET_ADDRESS Setup Commands.
1754 static inline void do_set_address(dwc_otg_pcd_t *pcd)
1756 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1757 usb_device_request_t ctrl = pcd->setup_pkt->req;
1759 if (ctrl.bmRequestType == UT_DEVICE) {
1760 dcfg_data_t dcfg = {.d32 = 0 };
1763 /* DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); */
1765 dcfg.b.devaddr = UGETW(ctrl.wValue);
1766 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
1767 do_setup_in_status_phase(pcd);
1772 * This function processes SETUP commands. In Linux, the USB Command
1773 * processing is done in two places - the first being the PCD and the
1774 * second in the Gadget Driver (for example, the File-Backed Storage
1778 * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
1780 * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
1781 * defined in chapter 9 of the USB 2.0 Specification chapter 9
1784 * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1785 * requests are the ENDPOINT_HALT feature is procesed, all others the
1786 * interface requests are ignored.</td></tr>
1788 * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1789 * requests are processed by the PCD. Interface requests are passed
1790 * to the Gadget Driver.</td></tr>
1792 * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
1793 * with device address received </td></tr>
1795 * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
1796 * requested descriptor</td></tr>
1798 * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
1799 * not implemented by any of the existing Gadget Drivers.</td></tr>
1801 * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
1802 * all EPs and enable EPs for new configuration.</td></tr>
1804 * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
1805 * the current configuration</td></tr>
1807 * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
1808 * EPs and enable EPs for new configuration.</td></tr>
1810 * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
1811 * current interface.</td></tr>
1813 * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
1814 * message.</td></tr>
1817 * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
1818 * processed by pcd_setup. Calling the Function Driver's setup function from
1819 *pcd_setup processes the gadget SETUP commands.
1821 static inline void pcd_setup(dwc_otg_pcd_t *pcd)
1823 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1824 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1825 usb_device_request_t ctrl = pcd->setup_pkt->req;
1826 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1828 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
1832 struct cfi_usb_ctrlrequest cfi_req;
1835 doeptsize0.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[0]->doeptsiz);
1838 DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1839 ctrl.bmRequestType, ctrl.bRequest,
1840 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1841 UGETW(ctrl.wLength));
1844 /* Clean up the request queue */
1845 dwc_otg_request_nuke(ep0);
1848 if (ctrl.bmRequestType & UE_DIR_IN) {
1849 ep0->dwc_ep.is_in = 1;
1850 pcd->ep0state = EP0_IN_DATA_PHASE;
1852 ep0->dwc_ep.is_in = 0;
1853 pcd->ep0state = EP0_OUT_DATA_PHASE;
1856 if (UGETW(ctrl.wLength) == 0) {
1857 ep0->dwc_ep.is_in = 1;
1858 pcd->ep0state = EP0_IN_STATUS_PHASE;
1861 if (UT_GET_TYPE(ctrl.bmRequestType) != UT_STANDARD) {
1864 DWC_MEMCPY(&cfi_req, &ctrl, sizeof(usb_device_request_t));
1866 /* printk(KERN_ALERT "CFI: req_type=0x%02x; req=0x%02x\n",
1867 * ctrl.bRequestType, ctrl.bRequest); */
1868 if (UT_GET_TYPE(cfi_req.bRequestType) == UT_VENDOR) {
1869 if (cfi_req.bRequest > 0xB0 && cfi_req.bRequest < 0xBF) {
1870 retval = cfi_setup(pcd, &cfi_req);
1872 ep0_do_stall(pcd, retval);
1873 pcd->ep0_pending = 0;
1877 /* if need gadget setup then call it and check the retval */
1878 if (pcd->cfi->need_gadget_att) {
1880 cfi_gadget_setup(pcd,
1884 pcd->ep0_pending = 0;
1889 if (pcd->cfi->need_status_in_complete) {
1890 do_setup_in_status_phase(pcd);
1897 /* handle non-standard (class/vendor) requests in the gadget driver */
1898 do_gadget_setup(pcd, &ctrl);
1902 /** @todo NGS: Handle bad setup packet? */
1904 /* --- Standard Request handling --- */
1906 switch (ctrl.bRequest) {
1911 case UR_CLEAR_FEATURE:
1912 do_clear_feature(pcd);
1915 case UR_SET_FEATURE:
1916 do_set_feature(pcd);
1919 case UR_SET_ADDRESS:
1920 do_set_address(pcd);
1923 case UR_SET_INTERFACE:
1925 /* _pcd->request_config = 1; */ /* Configuration changed */
1926 do_gadget_setup(pcd, &ctrl);
1929 case UR_SYNCH_FRAME:
1930 do_gadget_setup(pcd, &ctrl);
1934 /* Call the Gadget Driver's setup functions */
1935 do_gadget_setup(pcd, &ctrl);
1941 * This function completes the ep0 control transfer.
1943 static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep)
1945 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1946 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1947 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1948 dev_if->in_ep_regs[ep->dwc_ep.num];
1950 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
1951 dev_if->out_ep_regs[ep->dwc_ep.num];
1953 deptsiz0_data_t deptsiz;
1954 dev_dma_desc_sts_t desc_sts;
1955 dwc_otg_pcd_request_t *req;
1957 dwc_otg_pcd_t *pcd = ep->pcd;
1960 struct cfi_usb_ctrlrequest *ctrlreq;
1961 int retval = -DWC_E_NOT_SUPPORTED;
1964 if (pcd->ep0_pending && DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1965 if (ep->dwc_ep.is_in) {
1967 DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
1969 do_setup_out_status_phase(pcd);
1972 DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
1976 ctrlreq = &pcd->cfi->ctrl_req;
1978 if (UT_GET_TYPE(ctrlreq->bRequestType) == UT_VENDOR) {
1979 if (ctrlreq->bRequest > 0xB0
1980 && ctrlreq->bRequest < 0xBF) {
1982 /* Return if the PCD failed to handle the request */
1983 retval = pcd->cfi->ops.
1984 ctrl_write_complete(pcd->cfi, pcd);
1987 ("ERROR setting a new value in the PCD(%d)\n",
1989 ep0_do_stall(pcd, retval);
1990 pcd->ep0_pending = 0;
1994 /* If the gadget needs to be notified on the request */
1995 if (pcd->cfi->need_gadget_att == 1) {
1996 /* retval = do_gadget_setup(pcd, &pcd->cfi->ctrl_req); */
1998 cfi_gadget_setup(pcd,
2002 /* Return from the function if the gadget failed to process
2003 * the request properly - this should never happen !!!
2007 ("ERROR setting a new value in the gadget(%d)\n",
2009 pcd->ep0_pending = 0;
2014 CFI_INFO("%s: RETVAL=%d\n", __func__,
2016 /* If we hit here then the PCD and the gadget has properly
2017 * handled the request - so send the ZLP IN to the host.
2019 /* @todo: MAS - decide whether we need to start the setup
2020 * stage based on the need_setup value of the cfi object
2022 do_setup_in_status_phase(pcd);
2023 pcd->ep0_pending = 0;
2029 do_setup_in_status_phase(pcd);
2031 pcd->ep0_pending = 0;
2035 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2038 req = DWC_CIRCLEQ_FIRST(&ep->queue);
2040 if (pcd->ep0state == EP0_OUT_STATUS_PHASE
2041 || pcd->ep0state == EP0_IN_STATUS_PHASE) {
2043 } else if (ep->dwc_ep.is_in) {
2044 deptsiz.d32 = DWC_READ_REG32(&in_ep_regs->dieptsiz);
2045 if (core_if->dma_desc_enable != 0)
2046 desc_sts = dev_if->in_desc_addr->status;
2048 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xfersize=%d pktcnt=%d\n",
2049 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
2050 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2053 if (((core_if->dma_desc_enable == 0)
2054 && (deptsiz.b.xfersize == 0))
2055 || ((core_if->dma_desc_enable != 0)
2056 && (desc_sts.b.bytes == 0))) {
2057 req->actual = ep->dwc_ep.xfer_count;
2058 /* Is a Zero Len Packet needed? */
2059 if (req->sent_zlp) {
2061 DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
2065 do_setup_out_status_phase(pcd);
2070 deptsiz.d32 = DWC_READ_REG32(&out_ep_regs->doeptsiz);
2071 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xsize=%d pktcnt=%d\n",
2072 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
2073 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2075 req->actual = ep->dwc_ep.xfer_count;
2077 /* Is a Zero Len Packet needed? */
2078 if (req->sent_zlp) {
2080 DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
2084 /* For older cores do setup in status phase in Slave/BDMA modes,
2085 * starting from 3.00 do that only in slave, and for DMA modes
2086 * just re-enable ep 0 OUT here*/
2087 do_setup_in_status_phase(pcd);
2090 /* Complete the request */
2092 dwc_otg_request_done(ep, req, 0);
2093 ep->dwc_ep.start_xfer_buff = 0;
2094 ep->dwc_ep.xfer_buff = 0;
2095 ep->dwc_ep.xfer_len = 0;
2103 * This function calculates traverses all the CFI DMA descriptors and
2104 * and accumulates the bytes that are left to be transfered.
2106 * @return The total bytes left to transfered, or a negative value as failure
2108 static inline int cfi_calc_desc_residue(dwc_otg_pcd_ep_t *ep)
2112 struct dwc_otg_dma_desc *ddesc = NULL;
2113 struct cfi_ep *cfiep;
2115 /* See if the pcd_ep has its respective cfi_ep mapped */
2116 cfiep = get_cfi_ep_by_pcd_ep(ep->pcd->cfi, ep);
2118 CFI_INFO("%s: Failed to find ep\n", __func__);
2122 ddesc = ep->dwc_ep.descs;
2124 for (i = 0; (i < cfiep->desc_count) && (i < MAX_DMA_DESCS_PER_EP); i++) {
2126 #if defined(PRINT_CFI_DMA_DESCS)
2127 print_desc(ddesc, ep->ep.name, i);
2129 ret += ddesc->status.b.bytes;
2134 CFI_INFO("!!!!!!!!!! WARNING (%s) - residue=%d\n", __func__,
2142 * This function completes the request for the EP. If there are
2143 * additional requests for the EP in the queue they will be started.
2145 static void complete_ep(dwc_otg_pcd_ep_t *ep)
2147 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
2148 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2149 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
2150 dev_if->in_ep_regs[ep->dwc_ep.num];
2151 deptsiz_data_t deptsiz;
2152 dev_dma_desc_sts_t desc_sts;
2153 dwc_otg_pcd_request_t *req = 0;
2154 dwc_otg_dev_dma_desc_t *dma_desc;
2155 uint32_t byte_count = 0;
2159 DWC_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->dwc_ep.num,
2160 (ep->dwc_ep.is_in ? "IN" : "OUT"));
2162 /* Get any pending requests */
2163 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2164 req = DWC_CIRCLEQ_FIRST(&ep->queue);
2166 DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
2170 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
2174 DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
2176 if (ep->dwc_ep.is_in) {
2177 deptsiz.d32 = DWC_READ_REG32(&in_ep_regs->dieptsiz);
2179 if (core_if->dma_enable) {
2180 if (core_if->dma_desc_enable == 0) {
2181 if (deptsiz.b.xfersize == 0
2182 && deptsiz.b.pktcnt == 0) {
2184 ep->dwc_ep.xfer_len -
2185 ep->dwc_ep.xfer_count;
2187 ep->dwc_ep.xfer_buff += byte_count;
2188 ep->dwc_ep.dma_addr += byte_count;
2189 ep->dwc_ep.xfer_count += byte_count;
2191 DWC_DEBUGPL(DBG_PCDV,
2192 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
2194 (ep->dwc_ep.is_in ? "IN" :
2196 ep->dwc_ep.xfer_len,
2200 if (ep->dwc_ep.xfer_len <
2201 ep->dwc_ep.total_len) {
2202 dwc_otg_ep_start_transfer
2203 (core_if, &ep->dwc_ep);
2204 } else if (ep->dwc_ep.sent_zlp) {
2206 * This fragment of code should initiate 0
2207 * length transfer in case if it is queued
2208 * a transfer with size divisible to EPs max
2209 * packet size and with usb_request zero field
2210 * is set, which means that after data is transfered,
2211 * it is also should be transfered
2212 * a 0 length packet at the end. For Slave and
2213 * Buffer DMA modes in this case SW has
2214 * to initiate 2 transfers one with transfer size,
2215 * and the second with 0 size. For Descriptor
2216 * DMA mode SW is able to initiate a transfer,
2217 * which will handle all the packets including
2218 * the last 0 length.
2220 ep->dwc_ep.sent_zlp = 0;
2221 dwc_otg_ep_start_zl_transfer
2222 (core_if, &ep->dwc_ep);
2227 if (ep->dwc_ep.type ==
2228 DWC_OTG_EP_TYPE_ISOC) {
2230 dwc_otg_request_done(ep, req,
2233 ep->dwc_ep.start_xfer_buff = 0;
2234 ep->dwc_ep.xfer_buff = 0;
2235 ep->dwc_ep.xfer_len = 0;
2237 /* If there is a request in the queue start it. */
2238 start_next_request(ep);
2241 ("Incomplete transfer (%d - %s [siz=%d pkt=%d])\n",
2244 dwc_ep.is_in ? "IN" :
2250 dma_desc = ep->dwc_ep.desc_addr;
2252 ep->dwc_ep.sent_zlp = 0;
2255 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2256 ep->dwc_ep.buff_mode);
2257 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2260 residue = cfi_calc_desc_residue(ep);
2264 byte_count = residue;
2267 for (i = 0; i < ep->dwc_ep.desc_cnt;
2269 desc_sts = dma_desc->status;
2270 byte_count += desc_sts.b.bytes;
2276 if (byte_count == 0) {
2277 ep->dwc_ep.xfer_count =
2278 ep->dwc_ep.total_len;
2281 DWC_WARN("Incomplete transfer\n");
2285 if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
2286 DWC_DEBUGPL(DBG_PCDV,
2287 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
2289 ep->dwc_ep.is_in ? "IN" : "OUT",
2290 ep->dwc_ep.xfer_len,
2294 /* Check if the whole transfer was completed,
2295 * if no, setup transfer for next portion of data
2297 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2298 dwc_otg_ep_start_transfer(core_if,
2300 } else if (ep->dwc_ep.sent_zlp) {
2302 * This fragment of code should initiate 0
2303 * length trasfer in case if it is queued
2304 * a trasfer with size divisible to EPs max
2305 * packet size and with usb_request zero field
2306 * is set, which means that after data is transfered,
2307 * it is also should be transfered
2308 * a 0 length packet at the end. For Slave and
2309 * Buffer DMA modes in this case SW has
2310 * to initiate 2 transfers one with transfer size,
2311 * and the second with 0 size. For Desriptor
2312 * DMA mode SW is able to initiate a transfer,
2313 * which will handle all the packets including
2316 ep->dwc_ep.sent_zlp = 0;
2317 dwc_otg_ep_start_zl_transfer(core_if,
2324 ("Incomplete transfer (%d-%s [siz=%d pkt=%d])\n",
2326 (ep->dwc_ep.is_in ? "IN" : "OUT"),
2327 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2331 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
2332 dev_if->out_ep_regs[ep->dwc_ep.num];
2334 if (core_if->dma_enable) {
2335 if (core_if->dma_desc_enable) {
2336 dma_desc = ep->dwc_ep.desc_addr;
2338 ep->dwc_ep.sent_zlp = 0;
2341 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2342 ep->dwc_ep.buff_mode);
2343 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2345 residue = cfi_calc_desc_residue(ep);
2348 byte_count = residue;
2352 for (i = 0; i < ep->dwc_ep.desc_cnt;
2354 desc_sts = dma_desc->status;
2355 byte_count += desc_sts.b.bytes;
2362 /* Checking for interrupt Out transfers with not
2363 * dword aligned mps sizes
2365 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_INTR &&
2366 (ep->dwc_ep.maxpacket % 4)) {
2367 ep->dwc_ep.xfer_count =
2368 ep->dwc_ep.total_len - byte_count;
2369 if ((ep->dwc_ep.xfer_len %
2370 ep->dwc_ep.maxpacket)
2371 && (ep->dwc_ep.xfer_len /
2372 ep->dwc_ep.maxpacket <
2374 ep->dwc_ep.xfer_len -=
2375 (ep->dwc_ep.desc_cnt -
2376 1)*ep->dwc_ep.maxpacket +
2377 ep->dwc_ep.xfer_len %
2378 ep->dwc_ep.maxpacket;
2380 ep->dwc_ep.xfer_len -=
2381 ep->dwc_ep.desc_cnt *
2382 ep->dwc_ep.maxpacket;
2383 if (ep->dwc_ep.xfer_len > 0) {
2384 dwc_otg_ep_start_transfer
2385 (core_if, &ep->dwc_ep);
2390 ep->dwc_ep.xfer_count =
2391 ep->dwc_ep.total_len - byte_count +
2394 total_len & 0x3)) & 0x3);
2400 DWC_READ_REG32(&out_ep_regs->doeptsiz);
2402 byte_count = (ep->dwc_ep.xfer_len -
2403 ep->dwc_ep.xfer_count -
2404 deptsiz.b.xfersize);
2405 ep->dwc_ep.xfer_buff += byte_count;
2406 ep->dwc_ep.dma_addr += byte_count;
2407 ep->dwc_ep.xfer_count += byte_count;
2409 /* Check if the whole transfer was completed,
2410 * if no, setup transfer for next portion of data
2412 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2413 dwc_otg_ep_start_transfer(core_if,
2415 } else if (ep->dwc_ep.sent_zlp) {
2417 * This fragment of code should initiate 0
2418 * length trasfer in case if it is queued
2419 * a trasfer with size divisible to EPs max
2420 * packet size and with usb_request zero field
2421 * is set, which means that after data is transfered,
2422 * it is also should be transfered
2423 * a 0 length packet at the end. For Slave and
2424 * Buffer DMA modes in this case SW has
2425 * to initiate 2 transfers one with transfer size,
2426 * and the second with 0 size. For Desriptor
2427 * DMA mode SW is able to initiate a transfer,
2428 * which will handle all the packets including
2431 ep->dwc_ep.sent_zlp = 0;
2432 dwc_otg_ep_start_zl_transfer(core_if,
2439 /* Check if the whole transfer was completed,
2440 * if no, setup transfer for next portion of data
2442 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2443 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
2444 } else if (ep->dwc_ep.sent_zlp) {
2446 * This fragment of code should initiate 0
2447 * length transfer in case if it is queued
2448 * a transfer with size divisible to EPs max
2449 * packet size and with usb_request zero field
2450 * is set, which means that after data is transfered,
2451 * it is also should be transfered
2452 * a 0 length packet at the end. For Slave and
2453 * Buffer DMA modes in this case SW has
2454 * to initiate 2 transfers one with transfer size,
2455 * and the second with 0 size. For Descriptor
2456 * DMA mode SW is able to initiate a transfer,
2457 * which will handle all the packets including
2458 * the last 0 length.
2460 ep->dwc_ep.sent_zlp = 0;
2461 dwc_otg_ep_start_zl_transfer(core_if,
2468 DWC_DEBUGPL(DBG_PCDV,
2469 "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
2470 &out_ep_regs->doeptsiz, ep->dwc_ep.num,
2471 ep->dwc_ep.is_in ? "IN" : "OUT",
2472 ep->dwc_ep.xfer_len, ep->dwc_ep.xfer_count,
2473 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2476 /* Complete the request */
2479 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2480 req->actual = ep->dwc_ep.cfi_req_len - byte_count;
2483 req->actual = ep->dwc_ep.xfer_count;
2487 if (req->dw_align_buf) {
2488 if (!ep->dwc_ep.is_in) {
2489 dwc_memcpy(req->buf, req->dw_align_buf,
2492 DWC_DEV_DMA_FREE(req->length, req->dw_align_buf,
2493 req->dw_align_buf_dma);
2496 dwc_otg_request_done(ep, req, 0);
2498 ep->dwc_ep.start_xfer_buff = 0;
2499 ep->dwc_ep.xfer_buff = 0;
2500 ep->dwc_ep.xfer_len = 0;
2502 /* If there is a request in the queue start it. */
2503 start_next_request(ep);
2510 * This function BNA interrupt for Isochronous EPs
2513 static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep)
2515 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2516 volatile uint32_t *addr;
2517 depctl_data_t depctl = {
2519 dwc_otg_pcd_t *pcd = ep->pcd;
2520 dwc_otg_dev_dma_desc_t *dma_desc;
2524 dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num);
2526 if (dwc_ep->is_in) {
2527 dev_dma_desc_sts_t sts = {
2529 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2530 sts.d32 = dma_desc->status.d32;
2531 sts.b_iso_in.bs = BS_HOST_READY;
2532 dma_desc->status.d32 = sts.d32;
2535 dev_dma_desc_sts_t sts = {
2537 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2538 sts.d32 = dma_desc->status.d32;
2539 sts.b_iso_out.bs = BS_HOST_READY;
2540 dma_desc->status.d32 = sts.d32;
2544 if (dwc_ep->is_in == 0) {
2546 &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->
2550 &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2553 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
2557 * This function sets latest iso packet information(non-PTI mode)
2559 * @param core_if Programming view of DWC_otg controller.
2560 * @param ep The EP to start the transfer on.
2563 void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2565 deptsiz_data_t deptsiz = {
2567 dma_addr_t dma_addr;
2570 if (ep->proc_buf_num)
2571 dma_addr = ep->dma_addr1;
2573 dma_addr = ep->dma_addr0;
2577 DWC_READ_REG32(&core_if->dev_if->
2578 in_ep_regs[ep->num]->dieptsiz);
2579 offset = ep->data_per_frame;
2582 DWC_READ_REG32(&core_if->dev_if->
2583 out_ep_regs[ep->num]->doeptsiz);
2585 ep->data_per_frame +
2586 (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
2589 if (!deptsiz.b.xfersize) {
2590 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2591 ep->pkt_info[ep->cur_pkt].offset =
2592 ep->cur_pkt_dma_addr - dma_addr;
2593 ep->pkt_info[ep->cur_pkt].status = 0;
2595 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2596 ep->pkt_info[ep->cur_pkt].offset =
2597 ep->cur_pkt_dma_addr - dma_addr;
2598 ep->pkt_info[ep->cur_pkt].status = -DWC_E_NO_DATA;
2600 ep->cur_pkt_addr += offset;
2601 ep->cur_pkt_dma_addr += offset;
2606 * This function sets latest iso packet information(DDMA mode)
2608 * @param core_if Programming view of DWC_otg controller.
2609 * @param dwc_ep The EP to start the transfer on.
2612 static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if,
2614 dwc_otg_dev_dma_desc_t *dma_desc;
2615 dev_dma_desc_sts_t sts = {
2617 iso_pkt_info_t *iso_packet;
2618 uint32_t data_per_desc;
2622 iso_packet = dwc_ep->pkt_info;
2624 /** Reinit closed DMA Descriptors*/
2626 if (dwc_ep->is_in == 0) {
2628 dwc_ep->iso_desc_addr +
2629 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2632 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2633 i += dwc_ep->pkt_per_frm) {
2634 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2636 ((j + 1)*dwc_ep->maxpacket >
2638 data_per_frame) ? dwc_ep->data_per_frame -
2639 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2641 (data_per_desc % 4) ? (4 -
2645 sts.d32 = dma_desc->status.d32;
2647 /* Write status in iso_packet_decsriptor */
2648 iso_packet->status =
2649 sts.b_iso_out.rxsts +
2650 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2651 if (iso_packet->status) {
2652 iso_packet->status = -DWC_E_NO_DATA;
2655 /* Received data length */
2656 if (!sts.b_iso_out.rxbytes) {
2657 iso_packet->length =
2659 sts.b_iso_out.rxbytes;
2661 iso_packet->length =
2663 sts.b_iso_out.rxbytes + (4 -
2664 dwc_ep->data_per_frame
2668 iso_packet->offset = offset;
2670 offset += data_per_desc;
2676 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2678 ((j + 1)*dwc_ep->maxpacket >
2679 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2680 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2682 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2684 sts.d32 = dma_desc->status.d32;
2686 /* Write status in iso_packet_decsriptor */
2687 iso_packet->status =
2688 sts.b_iso_out.rxsts +
2689 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2690 if (iso_packet->status) {
2691 iso_packet->status = -DWC_E_NO_DATA;
2694 /* Received data length */
2695 iso_packet->length =
2696 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2698 iso_packet->offset = offset;
2700 offset += data_per_desc;
2705 sts.d32 = dma_desc->status.d32;
2707 /* Write status in iso_packet_decsriptor */
2708 iso_packet->status =
2709 sts.b_iso_out.rxsts + (sts.b_iso_out.bs ^ BS_DMA_DONE);
2710 if (iso_packet->status) {
2711 iso_packet->status = -DWC_E_NO_DATA;
2713 /* Received data length */
2714 if (!sts.b_iso_out.rxbytes) {
2715 iso_packet->length =
2716 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2718 iso_packet->length =
2719 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes +
2720 (4 - dwc_ep->data_per_frame % 4);
2723 iso_packet->offset = offset;
2728 dwc_ep->iso_desc_addr +
2729 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2731 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2732 sts.d32 = dma_desc->status.d32;
2734 /* Write status in iso packet descriptor */
2735 iso_packet->status =
2736 sts.b_iso_in.txsts +
2737 (sts.b_iso_in.bs ^ BS_DMA_DONE);
2738 if (iso_packet->status != 0) {
2739 iso_packet->status = -DWC_E_NO_DATA;
2742 /* Bytes has been transfered */
2743 iso_packet->length =
2744 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2750 sts.d32 = dma_desc->status.d32;
2751 while (sts.b_iso_in.bs == BS_DMA_BUSY) {
2752 sts.d32 = dma_desc->status.d32;
2755 /* Write status in iso packet descriptor ??? do be done with ERROR codes */
2756 iso_packet->status =
2757 sts.b_iso_in.txsts + (sts.b_iso_in.bs ^ BS_DMA_DONE);
2758 if (iso_packet->status != 0) {
2759 iso_packet->status = -DWC_E_NO_DATA;
2762 /* Bytes has been transfered */
2763 iso_packet->length =
2764 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2769 * This function reinitialize DMA Descriptors for Isochronous transfer
2771 * @param core_if Programming view of DWC_otg controller.
2772 * @param dwc_ep The EP to start the transfer on.
2775 static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep)
2778 dwc_otg_dev_dma_desc_t *dma_desc;
2780 volatile uint32_t *addr;
2781 dev_dma_desc_sts_t sts = {
2783 uint32_t data_per_desc;
2785 if (dwc_ep->is_in == 0) {
2786 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
2788 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2791 if (dwc_ep->proc_buf_num == 0) {
2792 /** Buffer 0 descriptors setup */
2793 dma_ad = dwc_ep->dma_addr0;
2795 /** Buffer 1 descriptors setup */
2796 dma_ad = dwc_ep->dma_addr1;
2799 /** Reinit closed DMA Descriptors*/
2801 if (dwc_ep->is_in == 0) {
2803 dwc_ep->iso_desc_addr +
2804 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2806 sts.b_iso_out.bs = BS_HOST_READY;
2807 sts.b_iso_out.rxsts = 0;
2808 sts.b_iso_out.l = 0;
2809 sts.b_iso_out.sp = 0;
2810 sts.b_iso_out.ioc = 0;
2811 sts.b_iso_out.pid = 0;
2812 sts.b_iso_out.framenum = 0;
2814 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2815 i += dwc_ep->pkt_per_frm) {
2816 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2818 ((j + 1)*dwc_ep->maxpacket >
2820 data_per_frame) ? dwc_ep->data_per_frame -
2821 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2823 (data_per_desc % 4) ? (4 -
2826 sts.b_iso_out.rxbytes = data_per_desc;
2827 dma_desc->buf = dma_ad;
2828 dma_desc->status.d32 = sts.d32;
2830 dma_ad += data_per_desc;
2835 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2838 ((j + 1)*dwc_ep->maxpacket >
2839 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2840 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2842 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2843 sts.b_iso_out.rxbytes = data_per_desc;
2845 dma_desc->buf = dma_ad;
2846 dma_desc->status.d32 = sts.d32;
2849 dma_ad += data_per_desc;
2852 sts.b_iso_out.ioc = 1;
2853 sts.b_iso_out.l = dwc_ep->proc_buf_num;
2856 ((j + 1)*dwc_ep->maxpacket >
2857 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2858 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
2860 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2861 sts.b_iso_out.rxbytes = data_per_desc;
2863 dma_desc->buf = dma_ad;
2864 dma_desc->status.d32 = sts.d32;
2869 dwc_ep->iso_desc_addr +
2870 dwc_ep->desc_cnt*dwc_ep->proc_buf_num;
2872 sts.b_iso_in.bs = BS_HOST_READY;
2873 sts.b_iso_in.txsts = 0;
2874 sts.b_iso_in.sp = 0;
2875 sts.b_iso_in.ioc = 0;
2876 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
2877 sts.b_iso_in.framenum = dwc_ep->next_frame;
2878 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
2881 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2882 dma_desc->buf = dma_ad;
2883 dma_desc->status.d32 = sts.d32;
2885 sts.b_iso_in.framenum += dwc_ep->bInterval;
2886 dma_ad += dwc_ep->data_per_frame;
2890 sts.b_iso_in.ioc = 1;
2891 sts.b_iso_in.l = dwc_ep->proc_buf_num;
2893 dma_desc->buf = dma_ad;
2894 dma_desc->status.d32 = sts.d32;
2896 dwc_ep->next_frame =
2897 sts.b_iso_in.framenum + dwc_ep->bInterval * 1;
2899 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2903 * This function is to handle Iso EP transfer complete interrupt
2904 * in case Iso out packet was dropped
2906 * @param core_if Programming view of DWC_otg controller.
2907 * @param dwc_ep The EP for wihich transfer complete was asserted
2910 static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if,
2914 uint32_t drp_pkt_cnt;
2915 deptsiz_data_t deptsiz = {
2917 depctl_data_t depctl = {
2922 DWC_READ_REG32(&core_if->dev_if->
2923 out_ep_regs[dwc_ep->num]->doeptsiz);
2925 drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt;
2926 drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm);
2928 /* Setting dropped packets status */
2929 for (i = 0; i < drp_pkt_cnt; ++i) {
2930 dwc_ep->pkt_info[drp_pkt].status = -DWC_E_NO_DATA;
2935 if (deptsiz.b.pktcnt > 0) {
2936 deptsiz.b.xfersize =
2937 dwc_ep->xfer_len - (dwc_ep->pkt_cnt -
2938 deptsiz.b.pktcnt)*dwc_ep->maxpacket;
2940 deptsiz.b.xfersize = 0;
2941 deptsiz.b.pktcnt = 0;
2944 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz,
2947 if (deptsiz.b.pktcnt > 0) {
2948 if (dwc_ep->proc_buf_num) {
2950 dwc_ep->dma_addr1 + dwc_ep->xfer_len -
2954 dwc_ep->dma_addr0 + dwc_ep->xfer_len -
2955 deptsiz.b.xfersize;;
2958 DWC_WRITE_REG32(&core_if->dev_if->
2959 out_ep_regs[dwc_ep->num]->doepdma, dma_addr);
2961 /** Re-enable endpoint, clear nak */
2966 DWC_MODIFY_REG32(&core_if->dev_if->
2967 out_ep_regs[dwc_ep->num]->doepctl, depctl.d32,
2976 * This function sets iso packets information(PTI mode)
2978 * @param core_if Programming view of DWC_otg controller.
2979 * @param ep The EP to start the transfer on.
2982 static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2986 iso_pkt_info_t *packet_info = ep->pkt_info;
2988 uint32_t frame_data;
2989 deptsiz_data_t deptsiz;
2991 if (ep->proc_buf_num == 0) {
2992 /** Buffer 0 descriptors setup */
2993 dma_ad = ep->dma_addr0;
2995 /** Buffer 1 descriptors setup */
2996 dma_ad = ep->dma_addr1;
3001 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
3005 DWC_READ_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
3009 if (!deptsiz.b.xfersize) {
3011 for (i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) {
3012 frame_data = ep->data_per_frame;
3013 for (j = 0; j < ep->pkt_per_frm; ++j) {
3015 /* Packet status - is not set as initially
3016 * it is set to 0 and if packet was sent
3017 successfully, status field will remain 0*/
3019 /* Bytes has been transfered */
3020 packet_info->length =
3022 frame_data) ? ep->maxpacket : frame_data;
3024 /* Received packet offset */
3025 packet_info->offset = offset;
3026 offset += packet_info->length;
3027 frame_data -= packet_info->length;
3034 /* This is a workaround for in case of Transfer Complete with
3035 * PktDrpSts interrupts merging - in this case Transfer complete
3036 * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
3037 * set and with DOEPTSIZ register non zero. Investigations showed,
3038 * that this happens when Out packet is dropped, but because of
3039 * interrupts merging during first interrupt handling PktDrpSts
3040 * bit is cleared and for next merged interrupts it is not reset.
3041 * In this case SW hadles the interrupt as if PktDrpSts bit is set.
3046 return handle_iso_out_pkt_dropped(core_if, ep);
3052 * This function is to handle Iso EP transfer complete interrupt
3054 * @param pcd The PCD
3055 * @param ep The EP for which transfer complete was asserted
3058 static void complete_iso_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep)
3060 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
3061 dwc_ep_t *dwc_ep = &ep->dwc_ep;
3062 uint8_t is_last = 0;
3064 if (ep->dwc_ep.next_frame == 0xffffffff) {
3065 DWC_WARN("Next frame is not set!\n");
3069 if (core_if->dma_enable) {
3070 if (core_if->dma_desc_enable) {
3071 set_ddma_iso_pkts_info(core_if, dwc_ep);
3072 reinit_ddma_iso_xfer(core_if, dwc_ep);
3075 if (core_if->pti_enh_enable) {
3076 if (set_iso_pkts_info(core_if, dwc_ep)) {
3077 dwc_ep->proc_buf_num =
3078 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3079 dwc_otg_iso_ep_start_buf_transfer
3084 set_current_pkt_info(core_if, dwc_ep);
3085 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3087 dwc_ep->cur_pkt = 0;
3088 dwc_ep->proc_buf_num =
3089 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3090 if (dwc_ep->proc_buf_num) {
3091 dwc_ep->cur_pkt_addr =
3093 dwc_ep->cur_pkt_dma_addr =
3096 dwc_ep->cur_pkt_addr =
3098 dwc_ep->cur_pkt_dma_addr =
3103 dwc_otg_iso_ep_start_frm_transfer(core_if,
3108 set_current_pkt_info(core_if, dwc_ep);
3109 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3111 dwc_ep->cur_pkt = 0;
3112 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
3113 if (dwc_ep->proc_buf_num) {
3114 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
3115 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
3117 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
3118 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
3122 dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep);
3125 dwc_otg_iso_buffer_done(pcd, ep, ep->iso_req_handle);
3127 #endif /* DWC_EN_ISOC */
3130 * This function handle BNA interrupt for Non Isochronous EPs
3133 static void dwc_otg_pcd_handle_noniso_bna(dwc_otg_pcd_ep_t *ep)
3135 dwc_ep_t *dwc_ep = &ep->dwc_ep;
3136 volatile uint32_t *addr;
3137 depctl_data_t depctl = {
3139 dwc_otg_pcd_t *pcd = ep->pcd;
3140 dwc_otg_dev_dma_desc_t *dma_desc;
3141 dev_dma_desc_sts_t sts = {
3143 dwc_otg_core_if_t *core_if = ep->pcd->core_if;
3146 if (!dwc_ep->desc_cnt)
3147 DWC_WARN("Ep%d %s Descriptor count = %d \n", dwc_ep->num,
3148 (dwc_ep->is_in ? "IN" : "OUT"), dwc_ep->desc_cnt);
3150 if (core_if->core_params->cont_on_bna && !dwc_ep->is_in
3151 && dwc_ep->type != DWC_OTG_EP_TYPE_CONTROL) {
3153 dwc_otg_dev_out_ep_regs_t *out_regs =
3154 core_if->dev_if->out_ep_regs[dwc_ep->num];
3155 doepdma = DWC_READ_REG32(&(out_regs->doepdma));
3158 dwc_ep->dma_desc_addr) / sizeof(dwc_otg_dev_dma_desc_t);
3159 dma_desc = &(dwc_ep->desc_addr[start]);
3162 dma_desc = dwc_ep->desc_addr;
3165 for (i = start; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
3166 sts.d32 = dma_desc->status.d32;
3167 sts.b.bs = BS_HOST_READY;
3168 dma_desc->status.d32 = sts.d32;
3171 if (dwc_ep->is_in == 0) {
3173 &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->
3177 &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
3181 DWC_MODIFY_REG32(addr, 0, depctl.d32);
3185 * This function handles EP0 Control transfers.
3187 * The state of the control transfers are tracked in
3188 * <code>ep0state</code>.
3190 static void handle_ep0(dwc_otg_pcd_t *pcd)
3192 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3193 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
3194 dev_dma_desc_sts_t desc_sts;
3195 deptsiz0_data_t deptsiz;
3196 uint32_t byte_count;
3199 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
3200 print_ep0_state(pcd);
3203 switch (pcd->ep0state) {
3204 case EP0_DISCONNECT:
3208 pcd->request_config = 0;
3213 case EP0_IN_DATA_PHASE:
3215 DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
3216 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
3217 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
3220 if (core_if->dma_enable != 0) {
3222 * For EP0 we can only program 1 packet at a time so we
3223 * need to do the make calculations after each complete.
3224 * Call write_packet to make the calculations, as in
3225 * slave mode, and use those values to determine if we
3228 if (core_if->dma_desc_enable == 0) {
3230 DWC_READ_REG32(&core_if->
3231 dev_if->in_ep_regs[0]->
3234 ep0->dwc_ep.xfer_len - deptsiz.b.xfersize;
3237 core_if->dev_if->in_desc_addr->status;
3239 ep0->dwc_ep.xfer_len - desc_sts.b.bytes;
3241 ep0->dwc_ep.xfer_count += byte_count;
3242 ep0->dwc_ep.xfer_buff += byte_count;
3243 ep0->dwc_ep.dma_addr += byte_count;
3245 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
3246 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3248 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
3249 } else if (ep0->dwc_ep.sent_zlp) {
3250 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3252 ep0->dwc_ep.sent_zlp = 0;
3253 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
3255 ep0_complete_request(ep0);
3256 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
3259 case EP0_OUT_DATA_PHASE:
3261 DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
3262 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
3263 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
3265 if (core_if->dma_enable != 0) {
3266 if (core_if->dma_desc_enable == 0) {
3268 DWC_READ_REG32(&core_if->
3269 dev_if->out_ep_regs[0]->
3272 ep0->dwc_ep.maxpacket - deptsiz.b.xfersize;
3275 core_if->dev_if->out_desc_addr->status;
3277 ep0->dwc_ep.maxpacket - desc_sts.b.bytes;
3279 ep0->dwc_ep.xfer_count += byte_count;
3280 ep0->dwc_ep.xfer_buff += byte_count;
3281 ep0->dwc_ep.dma_addr += byte_count;
3283 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
3284 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3286 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
3287 } else if (ep0->dwc_ep.sent_zlp) {
3288 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
3290 ep0->dwc_ep.sent_zlp = 0;
3291 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
3293 ep0_complete_request(ep0);
3294 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
3298 case EP0_IN_STATUS_PHASE:
3299 case EP0_OUT_STATUS_PHASE:
3300 DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
3301 ep0_complete_request(ep0);
3302 pcd->ep0state = EP0_IDLE;
3304 ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */
3306 /* Prepare for more SETUP Packets */
3307 if (core_if->dma_enable) {
3308 ep0_out_start(core_if, pcd);
3313 DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
3317 print_ep0_state(pcd);
3324 static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum)
3326 dwc_otg_core_if_t *core_if;
3327 dwc_otg_dev_if_t *dev_if;
3328 deptsiz_data_t dieptsiz = {
3330 dwc_otg_pcd_ep_t *ep;
3332 ep = get_in_ep(pcd, epnum);
3335 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
3338 #endif /* DWC_EN_ISOC */
3340 core_if = GET_CORE_IF(pcd);
3341 dev_if = core_if->dev_if;
3343 dieptsiz.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3345 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
3346 " stopped=%d\n", ep->dwc_ep.xfer_buff,
3347 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len, ep->stopped);
3349 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
3351 if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
3352 ep->dwc_ep.start_xfer_buff != 0) {
3353 if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) {
3354 ep->dwc_ep.xfer_count = 0;
3355 ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
3356 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3358 ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
3359 /* convert packet size to dwords. */
3360 ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
3361 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3364 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
3365 "xfer_len=%0x stopped=%d\n",
3366 ep->dwc_ep.xfer_buff,
3367 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
3370 dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
3372 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
3378 * This function create new nextep sequnce based on Learn Queue.
3380 * @param core_if Programming view of DWC_otg controller
3382 void predict_nextep_seq(dwc_otg_core_if_t *core_if)
3384 dwc_otg_device_global_regs_t *dev_global_regs =
3385 core_if->dev_if->dev_global_regs;
3386 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
3387 /* Number of Token Queue Registers */
3388 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
3389 dtknq1_data_t dtknqr1;
3390 uint32_t in_tkn_epnums[4];
3391 uint8_t seqnum[MAX_EPS_CHANNELS];
3392 uint8_t intkn_seq[TOKEN_Q_DEPTH];
3393 grstctl_t resetctl = {
3401 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
3403 DWC_DEBUGPL(DBG_PCD, "dev_token_q_depth=%d\n", TOKEN_Q_DEPTH);
3405 /* Read the DTKNQ Registers */
3406 for (i = 0; i < DTKNQ_REG_CNT; i++) {
3407 in_tkn_epnums[i] = DWC_READ_REG32(addr);
3408 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
3410 if (addr == &dev_global_regs->dvbusdis) {
3411 addr = &dev_global_regs->dtknqr3_dthrctl;
3418 /* Copy the DTKNQR1 data to the bit field. */
3419 dtknqr1.d32 = in_tkn_epnums[0];
3420 if (dtknqr1.b.wrap_bit) {
3421 ndx = dtknqr1.b.intknwptr;
3424 end = TOKEN_Q_DEPTH - 1;
3427 end = dtknqr1.b.intknwptr - 1;
3433 /* Fill seqnum[] by initial values: EP number + 31 */
3434 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3438 /* Fill intkn_seq[] from in_tkn_epnums[0] */
3439 for (i = 0; i < 6; i++)
3440 intkn_seq[i] = (in_tkn_epnums[0] >> ((7 - i) * 4)) & 0xf;
3442 if (TOKEN_Q_DEPTH > 6) {
3443 /* Fill intkn_seq[] from in_tkn_epnums[1] */
3444 for (i = 6; i < 14; i++)
3446 (in_tkn_epnums[1] >> ((7 - (i - 6)) * 4)) & 0xf;
3449 if (TOKEN_Q_DEPTH > 14) {
3450 /* Fill intkn_seq[] from in_tkn_epnums[1] */
3451 for (i = 14; i < 22; i++)
3453 (in_tkn_epnums[2] >> ((7 - (i - 14)) * 4)) & 0xf;
3456 if (TOKEN_Q_DEPTH > 22) {
3457 /* Fill intkn_seq[] from in_tkn_epnums[1] */
3458 for (i = 22; i < 30; i++)
3460 (in_tkn_epnums[3] >> ((7 - (i - 22)) * 4)) & 0xf;
3463 DWC_DEBUGPL(DBG_PCDV, "%s start=%d end=%d intkn_seq[]:\n", __func__,
3465 for (i = 0; i < TOKEN_Q_DEPTH; i++)
3466 DWC_DEBUGPL(DBG_PCDV, "%d\n", intkn_seq[i]);
3468 /* Update seqnum based on intkn_seq[] */
3471 seqnum[intkn_seq[ndx]] = i;
3474 if (ndx == TOKEN_Q_DEPTH)
3476 } while (i < TOKEN_Q_DEPTH);
3478 /* Mark non active EP's in seqnum[] by 0xff */
3479 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3480 if (core_if->nextep_seq[i] == 0xff)
3486 while (!sort_done) {
3488 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
3489 if (seqnum[i] > seqnum[i + 1]) {
3491 seqnum[i] = seqnum[i + 1];
3492 seqnum[i + 1] = temp;
3498 ndx = start + seqnum[0];
3499 if (ndx >= TOKEN_Q_DEPTH)
3500 ndx = ndx % TOKEN_Q_DEPTH;
3501 core_if->first_in_nextep_seq = intkn_seq[ndx];
3503 /* Update seqnum[] by EP numbers */
3504 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3506 if (seqnum[i] < 31) {
3507 ndx = start + seqnum[i];
3508 if (ndx >= TOKEN_Q_DEPTH)
3509 ndx = ndx % TOKEN_Q_DEPTH;
3510 seqnum[i] = intkn_seq[ndx];
3512 if (seqnum[i] < 0xff) {
3513 seqnum[i] = seqnum[i] - 31;
3520 /* Update nextep_seq[] based on seqnum[] */
3521 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
3522 if (seqnum[i] != 0xff) {
3523 if (seqnum[i + 1] != 0xff) {
3524 core_if->nextep_seq[seqnum[i]] = seqnum[i + 1];
3526 core_if->nextep_seq[seqnum[i]] =
3527 core_if->first_in_nextep_seq;
3535 DWC_DEBUGPL(DBG_PCDV, "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
3536 __func__, core_if->first_in_nextep_seq);
3537 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3538 DWC_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
3541 /* Flush the Learning Queue */
3542 resetctl.d32 = DWC_READ_REG32(&core_if->core_global_regs->grstctl);
3543 resetctl.b.intknqflsh = 1;
3544 DWC_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
3549 * handle the IN EP disable interrupt.
3551 static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd,
3552 const uint32_t epnum)
3554 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3555 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3556 deptsiz_data_t dieptsiz = {
3558 dctl_data_t dctl = {
3560 dwc_otg_pcd_ep_t *ep;
3562 gintmsk_data_t gintmsk_data;
3563 depctl_data_t depctl;
3565 uint32_t remain_to_transfer = 0;
3569 ep = get_in_ep(pcd, epnum);
3570 dwc_ep = &ep->dwc_ep;
3572 if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3573 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
3578 DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", epnum,
3579 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl));
3580 dieptsiz.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3581 depctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
3583 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3584 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3586 if ((core_if->start_predict == 0) || (depctl.b.eptype & 1)) {
3588 if (core_if->en_multiple_tx_fifo)
3589 /* Flush the Tx FIFO */
3590 dwc_otg_flush_tx_fifo(core_if,
3591 dwc_ep->tx_fifo_num);
3592 /* Clear the Global IN NP NAK */
3594 dctl.b.cgnpinnak = 1;
3595 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl,
3596 dctl.d32, dctl.d32);
3597 /* Restart the transaction */
3598 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3599 restart_transfer(pcd, epnum);
3602 /* Restart the transaction */
3603 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3604 restart_transfer(pcd, epnum);
3606 DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
3611 if (core_if->start_predict > 2) {
3613 core_if->start_predict--;
3617 core_if->start_predict--;
3619 if (core_if->start_predict == 1) {
3620 /* All NP IN Ep's disabled now */
3621 predict_nextep_seq(core_if);
3623 /* Update all active IN EP's NextEP field based of nextep_seq[] */
3624 for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
3626 DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
3627 if (core_if->nextep_seq[i] != 0xff) {
3628 /* Active NP IN EP */
3629 depctl.b.nextep = core_if->nextep_seq[i];
3630 DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl,
3634 /* Flush Shared NP TxFIFO */
3635 dwc_otg_flush_tx_fifo(core_if, 0);
3636 /* Rewind buffers */
3637 if (!core_if->dma_desc_enable) {
3638 i = core_if->first_in_nextep_seq;
3640 ep = get_in_ep(pcd, i);
3642 DWC_READ_REG32(&dev_if->
3643 in_ep_regs[i]->dieptsiz);
3645 ep->dwc_ep.total_len -
3646 ep->dwc_ep.xfer_count;
3647 if (xfer_size > ep->dwc_ep.maxxfer)
3648 xfer_size = ep->dwc_ep.maxxfer;
3650 DWC_READ_REG32(&dev_if->
3651 in_ep_regs[i]->diepctl);
3652 if (dieptsiz.b.pktcnt != 0) {
3653 if (xfer_size == 0) {
3654 remain_to_transfer = 0;
3657 ep->dwc_ep.maxpacket) ==
3659 remain_to_transfer =
3664 remain_to_transfer =
3677 DWC_READ_REG32(&dev_if->in_ep_regs
3679 dieptsiz.b.xfersize =
3681 DWC_WRITE_REG32(&dev_if->
3682 in_ep_regs[i]->dieptsiz,
3685 ep->dwc_ep.dma_addr + (xfer_size -
3686 remain_to_transfer);
3687 DWC_WRITE_REG32(&dev_if->
3688 in_ep_regs[i]->diepdma,
3691 i = core_if->nextep_seq[i];
3692 } while (i != core_if->first_in_nextep_seq);
3693 } else { /* dma_desc_enable */
3694 DWC_PRINTF("%s Learning Queue not supported in DDMA\n",
3698 /* Restart transfers in predicted sequences */
3699 i = core_if->first_in_nextep_seq;
3702 DWC_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
3704 DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
3705 if (dieptsiz.b.pktcnt != 0) {
3707 DWC_READ_REG32(&dev_if->
3708 in_ep_regs[i]->diepctl);
3711 DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl,
3714 i = core_if->nextep_seq[i];
3715 } while (i != core_if->first_in_nextep_seq);
3717 /* Clear the global non-periodic IN NAK handshake */
3719 dctl.b.cgnpinnak = 1;
3720 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32,
3723 /* Unmask EP Mismatch interrupt */
3724 gintmsk_data.d32 = 0;
3725 gintmsk_data.b.epmismatch = 1;
3726 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
3729 core_if->start_predict = 0;
3735 * Handler for the IN EP timeout handshake interrupt.
3737 static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd,
3738 const uint32_t epnum)
3740 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3741 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3744 deptsiz_data_t dieptsiz = {
3748 dctl_data_t dctl = {
3750 dwc_otg_pcd_ep_t *ep;
3752 gintmsk_data_t intr_mask = {
3755 ep = get_in_ep(pcd, epnum);
3757 /* Disable the NP Tx Fifo Empty Interrrupt */
3758 if (!core_if->dma_enable) {
3759 intr_mask.b.nptxfempty = 1;
3760 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
3763 /** @todo NGS Check EP type.
3764 * Implement for Periodic EPs */
3768 /* Enable the Global IN NAK Effective Interrupt */
3769 intr_mask.b.ginnakeff = 1;
3770 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
3772 /* Set Global IN NAK */
3773 dctl.b.sgnpinnak = 1;
3774 DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
3779 dieptsiz.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[num]->dieptsiz);
3780 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3781 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3784 #ifdef DISABLE_PERIODIC_EP
3786 * Set the NAK bit for this EP to
3787 * start the disable process.
3791 DWC_MODIFY_REG32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32,
3799 * Handler for the IN EP NAK interrupt.
3801 static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd,
3802 const uint32_t epnum)
3804 /** @todo implement ISR */
3805 dwc_otg_core_if_t *core_if;
3806 diepmsk_data_t intr_mask = {
3809 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
3810 core_if = GET_CORE_IF(pcd);
3811 intr_mask.b.nak = 1;
3813 if (core_if->multiproc_int_enable) {
3814 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3815 diepeachintmsk[epnum], intr_mask.d32, 0);
3817 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->diepmsk,
3825 * Handler for the OUT EP Babble interrupt.
3827 static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd,
3828 const uint32_t epnum)
3830 /** @todo implement ISR */
3831 dwc_otg_core_if_t *core_if;
3832 doepmsk_data_t intr_mask = {
3835 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n",
3837 core_if = GET_CORE_IF(pcd);
3838 intr_mask.b.babble = 1;
3840 if (core_if->multiproc_int_enable) {
3841 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3842 doepeachintmsk[epnum], intr_mask.d32, 0);
3844 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
3852 * Handler for the OUT EP NAK interrupt.
3854 static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd,
3855 const uint32_t epnum)
3857 /** @todo implement ISR */
3858 dwc_otg_core_if_t *core_if;
3859 doepmsk_data_t intr_mask = {
3862 DWC_DEBUGPL(DBG_ANY, "INTERRUPT Handler not implemented for %s\n",
3864 core_if = GET_CORE_IF(pcd);
3865 intr_mask.b.nak = 1;
3867 if (core_if->multiproc_int_enable) {
3868 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3869 doepeachintmsk[epnum], intr_mask.d32, 0);
3871 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
3879 * Handler for the OUT EP NYET interrupt.
3881 static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd,
3882 const uint32_t epnum)
3884 /** @todo implement ISR */
3885 dwc_otg_core_if_t *core_if;
3886 doepmsk_data_t intr_mask = {
3889 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
3890 core_if = GET_CORE_IF(pcd);
3891 intr_mask.b.nyet = 1;
3893 if (core_if->multiproc_int_enable) {
3894 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
3895 doepeachintmsk[epnum], intr_mask.d32, 0);
3897 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
3905 * This interrupt indicates that an IN EP has a pending Interrupt.
3906 * The sequence for handling the IN EP interrupt is shown below:
3907 * -# Read the Device All Endpoint Interrupt register
3908 * -# Repeat the following for each IN EP interrupt bit set (from
3910 * -# Read the Device Endpoint Interrupt (DIEPINTn) register
3911 * -# If "Transfer Complete" call the request complete function
3912 * -# If "Endpoint Disabled" complete the EP disable procedure.
3913 * -# If "AHB Error Interrupt" log error
3914 * -# If "Time-out Handshake" log error
3915 * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
3917 * -# If "IN Token EP Mismatch" (disable, this is handled by EP
3918 * Mismatch Interrupt)
3920 static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd)
3922 #define CLEAR_IN_EP_INTR(__core_if, __epnum, __intr) \
3924 diepint_data_t diepint = {.d32 = 0}; \
3925 diepint.b.__intr = 1; \
3926 DWC_WRITE_REG32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
3930 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3931 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3932 diepint_data_t diepint = {
3934 depctl_data_t depctl = {
3938 dwc_otg_pcd_ep_t *ep;
3940 gintmsk_data_t intr_mask = {
3942 dctl_data_t dctl = {
3945 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
3947 /* Read in the device interrupt bits */
3948 ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
3950 /* Service the Device IN interrupts for each endpoint */
3952 if (ep_intr & 0x1) {
3954 /* Get EP pointer */
3955 ep = get_in_ep(pcd, epnum);
3956 dwc_ep = &ep->dwc_ep;
3959 DWC_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
3961 DWC_READ_REG32(&dev_if->
3962 dev_global_regs->dtknqr4_fifoemptymsk);
3964 DWC_DEBUGPL(DBG_PCDV,
3965 "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
3966 epnum, empty_msk, depctl.d32);
3968 DWC_DEBUGPL(DBG_PCD,
3969 "EP%d-%s: type=%d, mps=%d\n",
3970 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
3971 dwc_ep->type, dwc_ep->maxpacket);
3974 dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
3976 DWC_DEBUGPL(DBG_PCDV,
3977 "EP %d Interrupt Register - 0x%x\n", epnum,
3979 /* Transfer complete */
3980 if (diepint.b.xfercompl) {
3981 /* Disable the NP Tx FIFO Empty
3983 if (core_if->en_multiple_tx_fifo == 0) {
3984 intr_mask.b.nptxfempty = 1;
3987 core_global_regs->gintmsk,
3990 /* Disable the Tx FIFO Empty Interrupt for this EP */
3991 uint32_t fifoemptymsk =
3993 DWC_MODIFY_REG32(&core_if->
3994 dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
3997 /* Clear the bit in DIEPINTn for this interrupt */
3998 CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
4000 /* Complete the transfer */
4005 else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4007 complete_iso_ep(pcd, ep);
4009 #endif /* DWC_EN_ISOC */
4010 #ifdef DWC_UTE_PER_IO
4011 else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4013 complete_xiso_ep(ep);
4015 #endif /* DWC_UTE_PER_IO */
4017 if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC
4018 && dwc_ep->bInterval > 1) {
4019 dwc_ep->frame_num +=
4021 if (dwc_ep->frame_num > 0x3FFF) {
4022 dwc_ep->frm_overrun = 1;
4023 dwc_ep->frame_num &=
4026 dwc_ep->frm_overrun = 0;
4030 CLEAR_IN_EP_INTR(core_if, epnum,
4034 /* Endpoint disable */
4035 if (diepint.b.epdisabled) {
4036 DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
4038 handle_in_ep_disable_intr(pcd, epnum);
4040 /* Clear the bit in DIEPINTn for this interrupt */
4041 CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
4044 if (diepint.b.ahberr) {
4045 DWC_ERROR("EP%d IN AHB Error\n", epnum);
4046 /* Clear the bit in DIEPINTn for this interrupt */
4047 DWC_ERROR("EP%d DEPDMA=0x%08x \n",
4050 in_ep_regs[epnum]->diepdma);
4051 CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
4053 DWC_READ_REG32(&core_if->
4054 dev_if->dev_global_regs->
4056 dctl.b.sftdiscon = 1;
4057 DWC_WRITE_REG32(&core_if->
4058 dev_if->dev_global_regs->dctl,
4060 dwc_otg_disable_global_interrupts(core_if);
4061 ep->pcd->vbus_status = 0;
4062 if (ep->pcd->conn_status) {
4063 ep->pcd->conn_status = 0;
4065 DWC_SPINUNLOCK(pcd->lock);
4066 cil_pcd_stop(core_if);
4067 DWC_SPINLOCK(pcd->lock);
4069 /* TimeOUT Handshake (non-ISOC IN EPs) */
4070 if (diepint.b.timeout) {
4071 DWC_ERROR("EP%d IN Time-out\n", epnum);
4072 handle_in_ep_timeout_intr(pcd, epnum);
4074 CLEAR_IN_EP_INTR(core_if, epnum, timeout);
4076 /** IN Token received with TxF Empty */
4077 if (diepint.b.intktxfemp) {
4078 DWC_DEBUGPL(DBG_ANY,
4079 "EP%d IN TKN TxFifo Empty\n",
4081 if (!ep->stopped && epnum != 0) {
4083 diepmsk_data_t diepmsk = {
4085 diepmsk.b.intktxfemp = 1;
4087 if (core_if->multiproc_int_enable) {
4090 dev_global_regs->diepeachintmsk
4091 [epnum], diepmsk.d32, 0);
4095 dev_global_regs->diepmsk,
4098 } else if (core_if->dma_desc_enable
4101 EP0_OUT_STATUS_PHASE) {
4102 /* EP0 IN set STALL */
4104 DWC_READ_REG32(&dev_if->in_ep_regs
4107 /* set the disable and stall bits */
4108 if (depctl.b.epena) {
4112 DWC_WRITE_REG32(&dev_if->in_ep_regs
4116 CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
4118 /** IN Token Received with EP mismatch */
4119 if (diepint.b.intknepmis) {
4120 DWC_DEBUGPL(DBG_ANY,
4121 "EP%d IN TKN EP Mismatch\n", epnum);
4122 CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
4124 /** IN Endpoint NAK Effective */
4125 if (diepint.b.inepnakeff) {
4126 DWC_DEBUGPL(DBG_ANY,
4127 "EP%d IN EP NAK Effective\n",
4130 if (ep->disabling) {
4134 DWC_MODIFY_REG32(&dev_if->in_ep_regs
4139 CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
4143 /** IN EP Tx FIFO Empty Intr */
4144 if (diepint.b.emptyintr) {
4145 DWC_DEBUGPL(DBG_ANY,
4146 "EP%d Tx FIFO Empty Intr \n",
4148 write_empty_tx_fifo(pcd, epnum);
4150 CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
4154 /** IN EP BNA Intr */
4155 if (diepint.b.bna) {
4156 CLEAR_IN_EP_INTR(core_if, epnum, bna);
4157 if (core_if->dma_desc_enable) {
4160 DWC_OTG_EP_TYPE_ISOC) {
4162 * This checking is performed to prevent first "false" BNA
4163 * handling occuring right after reconnect
4165 if (dwc_ep->next_frame !=
4167 dwc_otg_pcd_handle_iso_bna
4170 #endif /* DWC_EN_ISOC */
4172 dwc_otg_pcd_handle_noniso_bna
4178 if (diepint.b.nak) {
4179 DWC_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
4181 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
4182 depctl_data_t depctl;
4183 if (ep->dwc_ep.frame_num == 0xFFFFFFFF) {
4184 ep->dwc_ep.frame_num =
4186 if (ep->dwc_ep.bInterval > 1) {
4190 (&dev_if->in_ep_regs
4210 (&dev_if->in_ep_regs
4214 start_next_request(ep);
4216 ep->dwc_ep.frame_num +=
4217 ep->dwc_ep.bInterval;
4218 if (dwc_ep->frame_num > 0x3FFF) {
4219 dwc_ep->frm_overrun = 1;
4220 dwc_ep->frame_num &= 0x3FFF;
4222 dwc_ep->frm_overrun = 0;
4225 CLEAR_IN_EP_INTR(core_if, epnum, nak);
4233 #undef CLEAR_IN_EP_INTR
4237 * This interrupt indicates that an OUT EP has a pending Interrupt.
4238 * The sequence for handling the OUT EP interrupt is shown below:
4239 * -# Read the Device All Endpoint Interrupt register
4240 * -# Repeat the following for each OUT EP interrupt bit set (from
4242 * -# Read the Device Endpoint Interrupt (DOEPINTn) register
4243 * -# If "Transfer Complete" call the request complete function
4244 * -# If "Endpoint Disabled" complete the EP disable procedure.
4245 * -# If "AHB Error Interrupt" log error
4246 * -# If "Setup Phase Done" process Setup Packet (See Standard USB
4247 * Command Processing)
4249 static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd)
4251 #define CLEAR_OUT_EP_INTR(__core_if, __epnum, __intr) \
4253 doepint_data_t doepint = {.d32 = 0}; \
4254 doepint.b.__intr = 1; \
4255 DWC_WRITE_REG32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
4259 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
4261 doepint_data_t doepint = {
4264 dwc_otg_pcd_ep_t *ep;
4266 dctl_data_t dctl = {
4268 gintmsk_data_t gintmsk = {
4271 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
4273 /* Read in the device interrupt bits */
4274 ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
4277 if (ep_intr & 0x1) {
4278 /* Get EP pointer */
4279 ep = get_out_ep(pcd, epnum);
4280 dwc_ep = &ep->dwc_ep;
4283 DWC_DEBUGPL(DBG_PCDV,
4284 "EP%d-%s: type=%d, mps=%d\n",
4285 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
4286 dwc_ep->type, dwc_ep->maxpacket);
4289 dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
4290 /* Transfer complete */
4291 if (doepint.b.xfercompl) {
4294 /* Clear the bit in DOEPINTn for this interrupt */
4295 CLEAR_OUT_EP_INTR(core_if, epnum,
4297 if (core_if->snpsid >=
4298 OTG_CORE_REV_3_00a) {
4299 DWC_DEBUGPL(DBG_PCDV,
4300 "in xfer xomplete DOEPINT=%x doepint=%x\n",
4306 DWC_DEBUGPL(DBG_PCDV,
4313 if (core_if->snpsid >=
4315 && core_if->dma_enable ==
4317 doepint_data_t doepint;
4323 if (pcd->ep0state ==
4329 goto exit_xfercompl;
4332 /* In case of DDMA look at SR bit to go to the Data Stage */
4333 if (core_if->dma_desc_enable) {
4337 if (pcd->ep0state ==
4340 core_if->dev_if->setup_desc_addr
4342 dev_if->setup_desc_index]->status.
4344 if (pcd->data_terminated) {
4345 pcd->data_terminated
4349 core_if->dev_if->out_desc_addr->status.d32;
4351 (&pcd->setup_pkt->req,
4356 if (doepint.b.setup) {
4359 "DMA DESC EP0_IDLE SR=1 setup=1\n");
4360 /* Already started data stage, clear setup */
4370 /* Prepare for more setup packets */
4371 if (pcd->ep0state == EP0_IN_STATUS_PHASE || pcd->ep0state == EP0_IN_DATA_PHASE) {
4377 goto exit_xfercompl;
4379 /* Prepare for more setup packets */
4382 "EP0_IDLE SR=1 setup=0 new setup comes\n");
4389 dwc_otg_pcd_request_t
4402 if (pcd->ep0state == EP0_STALL || pcd->ep0state == EP0_DISCONNECT) {
4404 ("EP0 is stalled/disconnected\n");
4407 /* Clear IN xfercompl if set */
4417 EP0_IN_DATA_PHASE)) {
4426 core_if->dev_if->setup_desc_addr
4428 dev_if->setup_desc_index]->status.
4439 EP0_OUT_DATA_PHASE))
4442 core_if->dev_if->out_desc_addr->status.d32;
4443 if (pcd->ep0state == EP0_OUT_STATUS_PHASE)
4447 out_desc_addr->status.d32;
4450 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
4453 "Request queue empty!!\n");
4457 "complete req!!\n");
4458 req = DWC_CIRCLEQ_FIRST(&ep->queue);
4459 if (ep->dwc_ep.xfer_count != ep->dwc_ep.total_len && pcd->ep0state == EP0_OUT_DATA_PHASE) {
4460 /* Read arrived setup packet from req->buf */
4462 (&pcd->setup_pkt->req,
4465 ep->dwc_ep.xfer_count,
4470 ep->dwc_ep.xfer_count;
4471 dwc_otg_request_done
4475 ep->dwc_ep.start_xfer_buff = 0;
4476 ep->dwc_ep.xfer_buff = 0;
4477 ep->dwc_ep.xfer_len = 0;
4482 if (doepint.b.setup) {
4485 "EP0_IDLE SR=1 setup=1\n");
4486 /* Data stage started, clear setup */
4496 /* Prepare for setup packets if ep0in was enabled */
4497 if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
4503 goto exit_xfercompl;
4505 /* Prepare for more setup packets */
4508 "EP0_IDLE SR=1 setup=0 new setup comes 2\n");
4516 if (core_if->snpsid >=
4518 && core_if->dma_enable
4519 && core_if->dma_desc_enable
4554 EP0_IN_STATUS_PHASE)){
4561 "WA for xfercompl along with stsphs \n");
4568 goto exit_xfercompl;
4571 if (pcd->ep0state ==
4573 if (doepint_temp.b.sr) {
4579 /* Delay is needed for core to update setup
4580 * packet count from 3 to 2 after receiving
4588 if (doeptsize0.b.supcnt == 3) {
4591 "Rolling over!!!!!!!\n");
4592 ep->dwc_ep.stp_rollover = 1;
4597 /* Already started data stage, clear setup */
4598 CLEAR_OUT_EP_INTR(core_if, epnum, setup);
4603 ep->dwc_ep.stp_rollover = 0;
4604 /* Prepare for more setup packets */
4605 if (pcd->ep0state == EP0_IN_STATUS_PHASE || pcd->ep0state == EP0_IN_DATA_PHASE) {
4616 /* Core not updating setup packet count
4617 * in case of PET testing - @TODO vahrama
4618 * to check with HW team further */
4619 if (!core_if->otg_ver) {
4628 goto exit_xfercompl;
4630 /* Prepare for more setup packets */
4633 "EP0_IDLE SR=1 setup=0 new setup comes\n");
4640 if (doepint.b.setup)
4647 dwc_otg_pcd_request_t
4669 if (pcd->ep0state == EP0_IN_DATA_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) {
4670 if (diepint0.b.xfercompl) {
4677 if (diepctl0.b.epena) {
4699 } while (!diepint.b.inepnakeff);
4700 diepint.b.inepnakeff
4715 (&core_if->dev_if->in_ep_regs
4727 } while (!diepint.b.epdisabled);
4728 diepint.b.epdisabled
4732 (&core_if->dev_if->in_ep_regs
4742 [ep->dwc_ep.num]->doepint);
4743 if (doepint_temp.b.sr) {
4748 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
4751 "Request queue empty!!\n");
4755 "complete req!!\n");
4756 req = DWC_CIRCLEQ_FIRST(&ep->queue);
4757 if (ep->dwc_ep.xfer_count != ep->dwc_ep.total_len && pcd->ep0state == EP0_OUT_DATA_PHASE) {
4758 /* Read arrived setup packet from req->buf */
4760 (&pcd->setup_pkt->req,
4763 ep->dwc_ep.xfer_count,
4768 ep->dwc_ep.xfer_count;
4769 dwc_otg_request_done
4773 ep->dwc_ep.start_xfer_buff = 0;
4774 ep->dwc_ep.xfer_buff = 0;
4775 ep->dwc_ep.xfer_len = 0;
4780 if (doepint.b.setup) {
4783 "EP0_IDLE SR=1 setup=1\n");
4784 /* Data stage started, clear setup */
4794 /* Prepare for setup packets if ep0in was enabled */
4795 if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
4806 /* Core not updating setup packet count
4807 * in case of PET testing - @TODO vahrama
4808 * to check with HW team further */
4809 if (!core_if->otg_ver) {
4818 goto exit_xfercompl;
4820 /* Prepare for more setup packets */
4823 "EP0_IDLE SR=1 setup=0 new setup comes 2\n");
4831 if (core_if->dma_enable == 0
4836 DWC_DEBUGPL(DBG_PCDV,
4837 "after DOEPINT=%x doepint=%x\n",
4838 dwc_otg_read_dev_out_ep_intr
4842 if (core_if->dma_desc_enable ==
4849 } else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4850 if (doepint.b.pktdrpsts == 0) {
4851 /* Clear the bit in DOEPINTn for this interrupt */
4852 CLEAR_OUT_EP_INTR(core_if,
4855 complete_iso_ep(pcd, ep);
4858 doepint_data_t doepint = {
4860 doepint.b.xfercompl = 1;
4861 doepint.b.pktdrpsts = 1;
4867 if (handle_iso_out_pkt_dropped
4868 (core_if, dwc_ep)) {
4869 complete_iso_ep(pcd,
4873 #endif /* DWC_EN_ISOC */
4874 #ifdef DWC_UTE_PER_IO
4875 } else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
4876 CLEAR_OUT_EP_INTR(core_if, epnum,
4879 complete_xiso_ep(ep);
4880 #endif /* DWC_UTE_PER_IO */
4882 /* Clear the bit in DOEPINTn for this interrupt */
4883 CLEAR_OUT_EP_INTR(core_if, epnum,
4886 if (core_if->core_params->dev_out_nak) {
4887 DWC_TIMER_CANCEL(pcd->
4888 core_if->ep_xfer_timer
4891 core_if->ep_xfer_info
4894 print_memory_payload(pcd,
4903 if (doepint.b.stsphsercvd) {
4904 deptsiz0_data_t deptsiz;
4905 CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
4907 DWC_READ_REG32(&core_if->dev_if->
4908 out_ep_regs[0]->doeptsiz);
4909 if ((core_if->dma_desc_enable)
4910 || (core_if->dma_enable
4911 && core_if->snpsid >=
4912 OTG_CORE_REV_3_00a)) {
4913 do_setup_in_status_phase(pcd);
4917 /* Endpoint disable */
4918 if (doepint.b.epdisabled) {
4920 /* Clear the bit in DOEPINTn for this interrupt */
4921 CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
4922 if (core_if->core_params->dev_out_nak) {
4924 print_memory_payload(pcd, dwc_ep);
4926 /* In case of timeout condition */
4928 ep_xfer_info[epnum].state == 2) {
4932 dev_global_regs->dctl);
4933 dctl.b.cgoutnak = 1;
4935 (&core_if->dev_if->dev_global_regs->dctl,
4937 /* Unmask goutnakeff interrupt which was masked
4938 * during handle nak out interrupt */
4939 gintmsk.b.goutnakeff = 1;
4941 (&core_if->core_global_regs->gintmsk,
4947 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
4949 gintmsk_data_t intr_mask = {
4951 dwc_otg_pcd_request_t *req = 0;
4954 DWC_READ_REG32(&core_if->dev_if->
4955 dev_global_regs->dctl);
4956 dctl.b.cgoutnak = 1;
4957 DWC_WRITE_REG32(&core_if->
4958 dev_if->dev_global_regs->
4962 intr_mask.b.incomplisoout = 1;
4964 /* Get any pending requests */
4965 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
4971 ("complete_ep 0x%p, req = NULL!\n",
4974 dwc_otg_request_done(ep,
4977 start_next_request(ep);
4981 ("complete_ep 0x%p, ep->queue empty!\n",
4987 if (doepint.b.ahberr) {
4988 DWC_ERROR("EP%d OUT AHB Error\n", epnum);
4989 DWC_ERROR("EP%d DEPDMA=0x%08x \n",
4992 out_ep_regs[epnum]->doepdma);
4993 CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
4995 /* Setup Phase Done (contorl EPs) */
4996 if (doepint.b.setup) {
4998 DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n",
5001 CLEAR_OUT_EP_INTR(core_if, epnum, setup);
5006 /** OUT EP BNA Intr */
5007 if (doepint.b.bna) {
5008 CLEAR_OUT_EP_INTR(core_if, epnum, bna);
5009 if (core_if->dma_desc_enable) {
5012 DWC_OTG_EP_TYPE_ISOC) {
5014 * This checking is performed to prevent first "false" BNA
5015 * handling occuring right after reconnect
5017 if (dwc_ep->next_frame !=
5019 dwc_otg_pcd_handle_iso_bna
5022 #endif /* DWC_EN_ISOC */
5024 dwc_otg_pcd_handle_noniso_bna
5029 /* Babble Interrupt */
5030 if (doepint.b.babble) {
5031 DWC_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
5033 handle_out_ep_babble_intr(pcd, epnum);
5035 CLEAR_OUT_EP_INTR(core_if, epnum, babble);
5037 if (doepint.b.outtknepdis) {
5038 DWC_DEBUGPL(DBG_ANY, "EP%d OUT Token received when EP is \
5041 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
5042 doepmsk_data_t doepmsk = {
5044 ep->dwc_ep.frame_num =
5046 if (ep->dwc_ep.bInterval > 1) {
5047 depctl_data_t depctl;
5053 if (ep->dwc_ep.frame_num & 0x1) {
5054 depctl.b.setd1pid = 1;
5055 depctl.b.setd0pid = 0;
5057 depctl.b.setd0pid = 1;
5058 depctl.b.setd1pid = 0;
5066 start_next_request(ep);
5067 doepmsk.b.outtknepdis = 1;
5068 DWC_MODIFY_REG32(&core_if->
5069 dev_if->dev_global_regs->doepmsk,
5072 CLEAR_OUT_EP_INTR(core_if, epnum, outtknepdis);
5076 if (doepint.b.nak) {
5077 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
5078 handle_out_ep_nak_intr(pcd, epnum);
5080 CLEAR_OUT_EP_INTR(core_if, epnum, nak);
5082 /* NYET Interrutp */
5083 if (doepint.b.nyet) {
5084 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
5085 handle_out_ep_nyet_intr(pcd, epnum);
5087 CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
5097 #undef CLEAR_OUT_EP_INTR
5100 static int drop_transfer(uint32_t trgt_fr, uint32_t curr_fr,
5101 uint8_t frm_overrun)
5104 if (!frm_overrun && curr_fr >= trgt_fr)
5106 else if (frm_overrun
5107 && (curr_fr >= trgt_fr && ((curr_fr - trgt_fr) < 0x3FFF / 2)))
5113 * Incomplete ISO IN Transfer Interrupt.
5114 * This interrupt indicates one of the following conditions occurred
5115 * while transmitting an ISOC transaction.
5116 * - Corrupted IN Token for ISOC EP.
5117 * - Packet not complete in FIFO.
5118 * The follow actions will be taken:
5119 * -# Determine the EP
5120 * -# Set incomplete flag in dwc_ep structure
5121 * -# Disable EP; when "Endpoint Disabled" interrupt is received
5124 int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd)
5126 gintsts_data_t gintsts;
5129 dwc_otg_dev_if_t *dev_if;
5130 deptsiz_data_t deptsiz = {
5132 depctl_data_t depctl = {
5134 dsts_data_t dsts = {
5139 dev_if = GET_CORE_IF(pcd)->dev_if;
5141 for (i = 1; i <= dev_if->num_in_eps; ++i) {
5142 dwc_ep = &pcd->in_ep[i].dwc_ep;
5143 if (dwc_ep->active && dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
5145 DWC_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
5147 DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
5149 if (depctl.b.epdis && deptsiz.d32) {
5150 set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep);
5151 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
5152 dwc_ep->cur_pkt = 0;
5153 dwc_ep->proc_buf_num =
5154 (dwc_ep->proc_buf_num ^ 1) & 0x1;
5156 if (dwc_ep->proc_buf_num) {
5157 dwc_ep->cur_pkt_addr =
5159 dwc_ep->cur_pkt_dma_addr =
5162 dwc_ep->cur_pkt_addr =
5164 dwc_ep->cur_pkt_dma_addr =
5171 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
5172 dev_global_regs->dsts);
5173 dwc_ep->next_frame = dsts.b.soffn;
5175 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
5183 depctl_data_t depctl = {
5186 dwc_otg_dev_if_t *dev_if;
5188 dev_if = GET_CORE_IF(pcd)->dev_if;
5190 DWC_DEBUGPL(DBG_PCD, "Incomplete ISO IN \n");
5192 for (i = 1; i <= dev_if->num_in_eps; ++i) {
5193 dwc_ep = &pcd->in_ep[i - 1].dwc_ep;
5194 depctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
5195 if (depctl.b.epena && dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
5197 (dwc_ep->frame_num, GET_CORE_IF(pcd)->frame_num,
5198 dwc_ep->frm_overrun)) {
5200 DWC_READ_REG32(&dev_if->
5201 in_ep_regs[i]->diepctl);
5204 DWC_MODIFY_REG32(&dev_if->
5205 in_ep_regs[i]->diepctl,
5206 depctl.d32, depctl.d32);
5211 /*intr_mask.b.incomplisoin = 1;
5212 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
5213 intr_mask.d32, 0); */
5214 #endif /* DWC_EN_ISOC */
5216 /* Clear interrupt */
5218 gintsts.b.incomplisoin = 1;
5219 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5226 * Incomplete ISO OUT Transfer Interrupt.
5228 * This interrupt indicates that the core has dropped an ISO OUT
5229 * packet. The following conditions can be the cause:
5230 * - FIFO Full, the entire packet would not fit in the FIFO.
5233 * The follow actions will be taken:
5234 * -# Determine the EP
5235 * -# Set incomplete flag in dwc_ep structure
5236 * -# Read any data from the FIFO
5237 * -# Disable EP. When "Endpoint Disabled" interrupt is received
5240 int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd)
5243 gintsts_data_t gintsts;
5246 dwc_otg_dev_if_t *dev_if;
5247 deptsiz_data_t deptsiz = {
5249 depctl_data_t depctl = {
5251 dsts_data_t dsts = {
5256 dev_if = GET_CORE_IF(pcd)->dev_if;
5258 for (i = 1; i <= dev_if->num_out_eps; ++i) {
5259 dwc_ep = &pcd->in_ep[i].dwc_ep;
5260 if (pcd->out_ep[i].dwc_ep.active &&
5261 pcd->out_ep[i].dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
5263 DWC_READ_REG32(&dev_if->out_ep_regs[i]->doeptsiz);
5265 DWC_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
5267 if (depctl.b.epdis && deptsiz.d32) {
5268 set_current_pkt_info(GET_CORE_IF(pcd),
5269 &pcd->out_ep[i].dwc_ep);
5270 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
5271 dwc_ep->cur_pkt = 0;
5272 dwc_ep->proc_buf_num =
5273 (dwc_ep->proc_buf_num ^ 1) & 0x1;
5275 if (dwc_ep->proc_buf_num) {
5276 dwc_ep->cur_pkt_addr =
5278 dwc_ep->cur_pkt_dma_addr =
5281 dwc_ep->cur_pkt_addr =
5283 dwc_ep->cur_pkt_dma_addr =
5290 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
5291 dev_global_regs->dsts);
5292 dwc_ep->next_frame = dsts.b.soffn;
5294 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
5301 /** @todo implement ISR */
5302 gintmsk_data_t intr_mask = {
5304 dwc_otg_core_if_t *core_if;
5305 deptsiz_data_t deptsiz = {
5307 depctl_data_t depctl = {
5309 dctl_data_t dctl = {
5311 dwc_ep_t *dwc_ep = NULL;
5313 core_if = GET_CORE_IF(pcd);
5315 for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
5316 dwc_ep = &pcd->out_ep[i].dwc_ep;
5318 DWC_READ_REG32(&core_if->dev_if->
5319 out_ep_regs[dwc_ep->num]->doepctl);
5321 && depctl.b.dpid == (core_if->frame_num & 0x1)) {
5322 core_if->dev_if->isoc_ep = dwc_ep;
5324 DWC_READ_REG32(&core_if->
5325 dev_if->out_ep_regs[dwc_ep->num]->
5330 dctl.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
5331 gintsts.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintsts);
5332 intr_mask.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
5334 if (!intr_mask.b.goutnakeff) {
5336 intr_mask.b.goutnakeff = 1;
5337 DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk,
5340 if (!gintsts.b.goutnakeff) {
5341 dctl.b.sgoutnak = 1;
5343 DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
5346 DWC_READ_REG32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl);
5347 if (depctl.b.epena) {
5351 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl,
5355 intr_mask.b.incomplisoout = 1;
5357 #endif /* DWC_EN_ISOC */
5359 /* Clear interrupt */
5361 gintsts.b.incomplisoout = 1;
5362 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5369 * This function handles the Global IN NAK Effective interrupt.
5372 int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd)
5374 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
5375 depctl_data_t diepctl = {
5377 gintmsk_data_t intr_mask = {
5379 gintsts_data_t gintsts;
5380 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
5383 DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
5385 /* Disable all active IN EPs */
5386 for (i = 0; i <= dev_if->num_in_eps; i++) {
5387 diepctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
5388 if (!(diepctl.b.eptype & 1) && diepctl.b.epena) {
5389 if (core_if->start_predict > 0)
5390 core_if->start_predict++;
5391 diepctl.b.epdis = 1;
5393 DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl,
5398 /* Disable the Global IN NAK Effective Interrupt */
5399 intr_mask.b.ginnakeff = 1;
5400 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
5403 /* Clear interrupt */
5405 gintsts.b.ginnakeff = 1;
5406 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5413 * OUT NAK Effective.
5416 int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd)
5418 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
5419 gintmsk_data_t intr_mask = {
5421 gintsts_data_t gintsts;
5422 depctl_data_t doepctl;
5425 /* Disable the Global OUT NAK Effective Interrupt */
5426 intr_mask.b.goutnakeff = 1;
5427 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
5430 /* If DEV OUT NAK enabled */
5431 if (pcd->core_if->core_params->dev_out_nak) {
5432 /* Run over all out endpoints to determine the ep number on
5433 * which the timeout has happened
5435 for (i = 0; i <= dev_if->num_out_eps; i++) {
5436 if (pcd->core_if->ep_xfer_info[i].state == 2)
5439 if (i > dev_if->num_out_eps) {
5442 DWC_READ_REG32(&dev_if->dev_global_regs->dctl);
5443 dctl.b.cgoutnak = 1;
5444 DWC_WRITE_REG32(&dev_if->dev_global_regs->dctl,
5449 /* Disable the endpoint */
5450 doepctl.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
5451 if (doepctl.b.epena) {
5452 doepctl.b.epdis = 1;
5455 DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
5458 /* We come here from Incomplete ISO OUT handler */
5459 if (dev_if->isoc_ep) {
5460 dwc_ep_t *dwc_ep = (dwc_ep_t *) dev_if->isoc_ep;
5461 uint32_t epnum = dwc_ep->num;
5462 doepint_data_t doepint;
5464 DWC_READ_REG32(&dev_if->out_ep_regs[dwc_ep->num]->doepint);
5465 dev_if->isoc_ep = NULL;
5467 DWC_READ_REG32(&dev_if->out_ep_regs[epnum]->doepctl);
5468 DWC_PRINTF("Before disable DOEPCTL = %08x\n", doepctl.d32);
5469 if (doepctl.b.epena) {
5470 doepctl.b.epdis = 1;
5473 DWC_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepctl,
5477 DWC_PRINTF("INTERRUPT Handler not implemented for %s\n",
5478 "Global OUT NAK Effective\n");
5481 /* Clear interrupt */
5483 gintsts.b.goutnakeff = 1;
5484 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
5491 * PCD interrupt handler.
5493 * The PCD handles the device interrupts. Many conditions can cause a
5494 * device interrupt. When an interrupt occurs, the device interrupt
5495 * service routine determines the cause of the interrupt and
5496 * dispatches handling to the appropriate function. These interrupt
5497 * handling functions are described below.
5499 * All interrupt registers are processed from LSB to MSB.
5502 int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd)
5504 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
5506 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
5508 gintsts_data_t gintr_status;
5511 if (dwc_otg_check_haps_status(core_if) == -1) {
5512 DWC_WARN("HAPS is disconnected");
5516 /* Exit from ISR if core is hibernated */
5517 if (core_if->hibernation_suspend == 1) {
5521 DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n",
5523 DWC_READ_REG32(&global_regs->gintsts),
5524 DWC_READ_REG32(&global_regs->gintmsk));
5527 if (dwc_otg_is_device_mode(core_if)) {
5528 DWC_SPINLOCK(pcd->lock);
5530 DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n",
5532 DWC_READ_REG32(&global_regs->gintsts),
5533 DWC_READ_REG32(&global_regs->gintmsk));
5536 gintr_status.d32 = dwc_otg_read_core_intr(core_if);
5538 DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
5539 __func__, gintr_status.d32);
5541 if (gintr_status.b.sofintr) {
5542 retval |= dwc_otg_pcd_handle_sof_intr(pcd);
5544 if (gintr_status.b.rxstsqlvl) {
5546 dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
5548 if (gintr_status.b.nptxfempty) {
5549 retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
5551 if (gintr_status.b.goutnakeff) {
5552 retval |= dwc_otg_pcd_handle_out_nak_effective(pcd);
5554 if (gintr_status.b.i2cintr) {
5555 retval |= dwc_otg_pcd_handle_i2c_intr(pcd);
5557 if (gintr_status.b.erlysuspend) {
5558 retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd);
5560 if (gintr_status.b.usbreset) {
5561 retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd);
5562 pcd->conn_status = -1;
5564 if (gintr_status.b.enumdone) {
5565 retval |= dwc_otg_pcd_handle_enum_done_intr(pcd);
5567 if (gintr_status.b.isooutdrop) {
5569 dwc_otg_pcd_handle_isoc_out_packet_dropped_intr
5572 if (gintr_status.b.eopframe) {
5574 dwc_otg_pcd_handle_end_periodic_frame_intr(pcd);
5576 if (gintr_status.b.inepint) {
5577 if (!core_if->multiproc_int_enable) {
5578 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
5581 if (gintr_status.b.outepintr) {
5582 if (!core_if->multiproc_int_enable) {
5583 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
5586 if (gintr_status.b.epmismatch) {
5587 retval |= dwc_otg_pcd_handle_ep_mismatch_intr(pcd);
5589 if (gintr_status.b.fetsusp) {
5590 retval |= dwc_otg_pcd_handle_ep_fetsusp_intr(pcd);
5592 if (gintr_status.b.ginnakeff) {
5593 retval |= dwc_otg_pcd_handle_in_nak_effective(pcd);
5595 if (gintr_status.b.incomplisoin) {
5597 dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
5599 if (gintr_status.b.incomplisoout) {
5601 dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
5604 /* In MPI mode Device Endpoints interrupts are asserted
5605 * without setting outepintr and inepint bits set, so these
5606 * Interrupt handlers are called without checking these bit-fields
5608 if (core_if->multiproc_int_enable) {
5609 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
5610 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
5613 DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
5614 DWC_READ_REG32(&global_regs->gintsts));
5616 DWC_SPINUNLOCK(pcd->lock);
5621 #endif /* DWC_HOST_ONLY */