regmap: rbtree: Fixed node range check on sync
[firefly-linux-kernel-4.4.55.git] / drivers / staging / dwc2 / hcd_intr.c
1 /*
2  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 /*
38  * This file contains the interrupt handlers for Host mode
39  */
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/io.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
48
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
51
52 #include "core.h"
53 #include "hcd.h"
54
55 /* This function is for debug only */
56 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
57 {
58 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
59 #warning Compiling code to track missed SOFs
60
61         u16 curr_frame_number = hsotg->frame_number;
62
63         if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
64                 if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
65                     curr_frame_number) {
66                         hsotg->frame_num_array[hsotg->frame_num_idx] =
67                                         curr_frame_number;
68                         hsotg->last_frame_num_array[hsotg->frame_num_idx] =
69                                         hsotg->last_frame_num;
70                         hsotg->frame_num_idx++;
71                 }
72         } else if (!hsotg->dumped_frame_num_array) {
73                 int i;
74
75                 dev_info(hsotg->dev, "Frame     Last Frame\n");
76                 dev_info(hsotg->dev, "-----     ----------\n");
77                 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
78                         dev_info(hsotg->dev, "0x%04x    0x%04x\n",
79                                  hsotg->frame_num_array[i],
80                                  hsotg->last_frame_num_array[i]);
81                 }
82                 hsotg->dumped_frame_num_array = 1;
83         }
84         hsotg->last_frame_num = curr_frame_number;
85 #endif
86 }
87
88 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
89                                     struct dwc2_host_chan *chan,
90                                     struct dwc2_qtd *qtd)
91 {
92         struct urb *usb_urb;
93
94         if (!chan->qh || !qtd->urb)
95                 return;
96
97         usb_urb = qtd->urb->priv;
98         if (!usb_urb || !usb_urb->dev)
99                 return;
100
101         if (chan->qh->dev_speed != USB_SPEED_HIGH &&
102             qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
103                 chan->qh->tt_buffer_dirty = 1;
104                 if (usb_hub_clear_tt_buffer(usb_urb))
105                         /* Clear failed; let's hope things work anyway */
106                         chan->qh->tt_buffer_dirty = 0;
107         }
108 }
109
110 /*
111  * Handles the start-of-frame interrupt in host mode. Non-periodic
112  * transactions may be queued to the DWC_otg controller for the current
113  * (micro)frame. Periodic transactions may be queued to the controller
114  * for the next (micro)frame.
115  */
116 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
117 {
118         struct list_head *qh_entry;
119         struct dwc2_qh *qh;
120         u32 hfnum;
121         enum dwc2_transaction_type tr_type;
122
123 #ifdef DEBUG_SOF
124         dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
125 #endif
126
127         hfnum = readl(hsotg->regs + HFNUM);
128         hsotg->frame_number = hfnum >> HFNUM_FRNUM_SHIFT &
129                             HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
130
131         dwc2_track_missed_sofs(hsotg);
132
133         /* Determine whether any periodic QHs should be executed */
134         qh_entry = hsotg->periodic_sched_inactive.next;
135         while (qh_entry != &hsotg->periodic_sched_inactive) {
136                 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
137                 qh_entry = qh_entry->next;
138                 if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
139                         /*
140                          * Move QH to the ready list to be executed next
141                          * (micro)frame
142                          */
143                         list_move(&qh->qh_list_entry,
144                                   &hsotg->periodic_sched_ready);
145         }
146         tr_type = dwc2_hcd_select_transactions(hsotg);
147         if (tr_type != DWC2_TRANSACTION_NONE)
148                 dwc2_hcd_queue_transactions(hsotg, tr_type);
149
150         /* Clear interrupt */
151         writel(GINTSTS_SOF, hsotg->regs + GINTSTS);
152 }
153
154 /*
155  * Handles the Rx FIFO Level Interrupt, which indicates that there is
156  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
157  * memory if the DWC_otg controller is operating in Slave mode.
158  */
159 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
160 {
161         u32 grxsts, chnum, bcnt, dpid, pktsts;
162         struct dwc2_host_chan *chan;
163
164         if (dbg_perio())
165                 dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
166
167         grxsts = readl(hsotg->regs + GRXSTSP);
168         chnum = grxsts >> GRXSTS_HCHNUM_SHIFT &
169                 GRXSTS_HCHNUM_MASK >> GRXSTS_HCHNUM_SHIFT;
170         chan = hsotg->hc_ptr_array[chnum];
171         if (!chan) {
172                 dev_err(hsotg->dev, "Unable to get corresponding channel\n");
173                 return;
174         }
175
176         bcnt = grxsts >> GRXSTS_BYTECNT_SHIFT &
177                GRXSTS_BYTECNT_MASK >> GRXSTS_BYTECNT_SHIFT;
178         dpid = grxsts >> GRXSTS_DPID_SHIFT &
179                GRXSTS_DPID_MASK >> GRXSTS_DPID_SHIFT;
180         pktsts = grxsts & GRXSTS_PKTSTS_MASK;
181
182         /* Packet Status */
183         if (dbg_perio()) {
184                 dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
185                 dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
186                 dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
187                          chan->data_pid_start);
188                 dev_vdbg(hsotg->dev, "    PStatus = %d\n",
189                          pktsts >> GRXSTS_PKTSTS_SHIFT &
190                          GRXSTS_PKTSTS_MASK >> GRXSTS_PKTSTS_SHIFT);
191         }
192
193         switch (pktsts) {
194         case GRXSTS_PKTSTS_HCHIN:
195                 /* Read the data into the host buffer */
196                 if (bcnt > 0) {
197                         dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
198
199                         /* Update the HC fields for the next packet received */
200                         chan->xfer_count += bcnt;
201                         chan->xfer_buf += bcnt;
202                 }
203                 break;
204         case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
205         case GRXSTS_PKTSTS_DATATOGGLEERR:
206         case GRXSTS_PKTSTS_HCHHALTED:
207                 /* Handled in interrupt, just ignore data */
208                 break;
209         default:
210                 dev_err(hsotg->dev,
211                         "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
212                 break;
213         }
214 }
215
216 /*
217  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
218  * data packets may be written to the FIFO for OUT transfers. More requests
219  * may be written to the non-periodic request queue for IN transfers. This
220  * interrupt is enabled only in Slave mode.
221  */
222 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
223 {
224         dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
225         dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
226 }
227
228 /*
229  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
230  * packets may be written to the FIFO for OUT transfers. More requests may be
231  * written to the periodic request queue for IN transfers. This interrupt is
232  * enabled only in Slave mode.
233  */
234 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
235 {
236         if (dbg_perio())
237                 dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
238         dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
239 }
240
241 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
242                               u32 *hprt0_modify)
243 {
244         struct dwc2_core_params *params = hsotg->core_params;
245         int do_reset = 0;
246         u32 usbcfg;
247         u32 prtspd;
248         u32 hcfg;
249         u32 hfir;
250
251         dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
252
253         /* Every time when port enables calculate HFIR.FrInterval */
254         hfir = readl(hsotg->regs + HFIR);
255         hfir &= ~HFIR_FRINT_MASK;
256         hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
257                 HFIR_FRINT_MASK;
258         writel(hfir, hsotg->regs + HFIR);
259
260         /* Check if we need to adjust the PHY clock speed for low power */
261         if (!params->host_support_fs_ls_low_power) {
262                 /* Port has been enabled, set the reset change flag */
263                 hsotg->flags.b.port_reset_change = 1;
264                 return;
265         }
266
267         usbcfg = readl(hsotg->regs + GUSBCFG);
268         prtspd = hprt0 & HPRT0_SPD_MASK;
269
270         if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
271                 /* Low power */
272                 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
273                         /* Set PHY low power clock select for FS/LS devices */
274                         usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
275                         writel(usbcfg, hsotg->regs + GUSBCFG);
276                         do_reset = 1;
277                 }
278
279                 hcfg = readl(hsotg->regs + HCFG);
280
281                 if (prtspd == HPRT0_SPD_LOW_SPEED &&
282                     params->host_ls_low_power_phy_clk ==
283                     DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
284                         /* 6 MHZ */
285                         dev_vdbg(hsotg->dev,
286                                  "FS_PHY programming HCFG to 6 MHz\n");
287                         if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
288                             HCFG_FSLSPCLKSEL_6_MHZ) {
289                                 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
290                                 hcfg |= HCFG_FSLSPCLKSEL_6_MHZ;
291                                 writel(hcfg, hsotg->regs + HCFG);
292                                 do_reset = 1;
293                         }
294                 } else {
295                         /* 48 MHZ */
296                         dev_vdbg(hsotg->dev,
297                                  "FS_PHY programming HCFG to 48 MHz\n");
298                         if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
299                             HCFG_FSLSPCLKSEL_48_MHZ) {
300                                 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
301                                 hcfg |= HCFG_FSLSPCLKSEL_48_MHZ;
302                                 writel(hcfg, hsotg->regs + HCFG);
303                                 do_reset = 1;
304                         }
305                 }
306         } else {
307                 /* Not low power */
308                 if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
309                         usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
310                         writel(usbcfg, hsotg->regs + GUSBCFG);
311                         do_reset = 1;
312                 }
313         }
314
315         if (do_reset) {
316                 *hprt0_modify |= HPRT0_RST;
317                 queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
318                                    msecs_to_jiffies(60));
319         } else {
320                 /* Port has been enabled, set the reset change flag */
321                 hsotg->flags.b.port_reset_change = 1;
322         }
323 }
324
325 /*
326  * There are multiple conditions that can cause a port interrupt. This function
327  * determines which interrupt conditions have occurred and handles them
328  * appropriately.
329  */
330 static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
331 {
332         u32 hprt0;
333         u32 hprt0_modify;
334
335         dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
336
337         hprt0 = readl(hsotg->regs + HPRT0);
338         hprt0_modify = hprt0;
339
340         /*
341          * Clear appropriate bits in HPRT0 to clear the interrupt bit in
342          * GINTSTS
343          */
344         hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
345                           HPRT0_OVRCURRCHG);
346
347         /*
348          * Port Connect Detected
349          * Set flag and clear if detected
350          */
351         if (hprt0 & HPRT0_CONNDET) {
352                 dev_vdbg(hsotg->dev,
353                          "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
354                          hprt0);
355                 hsotg->flags.b.port_connect_status_change = 1;
356                 hsotg->flags.b.port_connect_status = 1;
357                 hprt0_modify |= HPRT0_CONNDET;
358
359                 /*
360                  * The Hub driver asserts a reset when it sees port connect
361                  * status change flag
362                  */
363         }
364
365         /*
366          * Port Enable Changed
367          * Clear if detected - Set internal flag if disabled
368          */
369         if (hprt0 & HPRT0_ENACHG) {
370                 dev_vdbg(hsotg->dev,
371                          "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
372                          hprt0, !!(hprt0 & HPRT0_ENA));
373                 hprt0_modify |= HPRT0_ENACHG;
374                 if (hprt0 & HPRT0_ENA)
375                         dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
376                 else
377                         hsotg->flags.b.port_enable_change = 1;
378         }
379
380         /* Overcurrent Change Interrupt */
381         if (hprt0 & HPRT0_OVRCURRCHG) {
382                 dev_vdbg(hsotg->dev,
383                          "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
384                          hprt0);
385                 hsotg->flags.b.port_over_current_change = 1;
386                 hprt0_modify |= HPRT0_OVRCURRCHG;
387         }
388
389         /* Clear Port Interrupts */
390         writel(hprt0_modify, hsotg->regs + HPRT0);
391 }
392
393 /*
394  * Gets the actual length of a transfer after the transfer halts. halt_status
395  * holds the reason for the halt.
396  *
397  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
398  * is set to 1 upon return if less than the requested number of bytes were
399  * transferred. short_read may also be NULL on entry, in which case it remains
400  * unchanged.
401  */
402 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
403                                        struct dwc2_host_chan *chan, int chnum,
404                                        struct dwc2_qtd *qtd,
405                                        enum dwc2_halt_status halt_status,
406                                        int *short_read)
407 {
408         u32 hctsiz, count, length;
409
410         hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
411
412         if (halt_status == DWC2_HC_XFER_COMPLETE) {
413                 if (chan->ep_is_in) {
414                         count = hctsiz >> TSIZ_XFERSIZE_SHIFT &
415                                 TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT;
416                         length = chan->xfer_len - count;
417                         if (short_read != NULL)
418                                 *short_read = (count != 0);
419                 } else if (chan->qh->do_split) {
420                         length = qtd->ssplit_out_xfer_count;
421                 } else {
422                         length = chan->xfer_len;
423                 }
424         } else {
425                 /*
426                  * Must use the hctsiz.pktcnt field to determine how much data
427                  * has been transferred. This field reflects the number of
428                  * packets that have been transferred via the USB. This is
429                  * always an integral number of packets if the transfer was
430                  * halted before its normal completion. (Can't use the
431                  * hctsiz.xfersize field because that reflects the number of
432                  * bytes transferred via the AHB, not the USB).
433                  */
434                 count = hctsiz >> TSIZ_PKTCNT_SHIFT &
435                         TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT;
436                 length = (chan->start_pkt_count - count) * chan->max_packet;
437         }
438
439         return length;
440 }
441
442 /**
443  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
444  * Complete interrupt on the host channel. Updates the actual_length field
445  * of the URB based on the number of bytes transferred via the host channel.
446  * Sets the URB status if the data transfer is finished.
447  *
448  * Return: 1 if the data transfer specified by the URB is completely finished,
449  * 0 otherwise
450  */
451 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
452                                  struct dwc2_host_chan *chan, int chnum,
453                                  struct dwc2_hcd_urb *urb,
454                                  struct dwc2_qtd *qtd)
455 {
456         u32 hctsiz;
457         int xfer_done = 0;
458         int short_read = 0;
459         int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
460                                                       DWC2_HC_XFER_COMPLETE,
461                                                       &short_read);
462
463         if (urb->actual_length + xfer_length > urb->length) {
464                 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
465                 xfer_length = urb->length - urb->actual_length;
466         }
467
468         /* Non DWORD-aligned buffer case handling */
469         if (chan->align_buf && xfer_length && chan->ep_is_in) {
470                 dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
471                 dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
472                                         DMA_FROM_DEVICE);
473                 memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
474                        xfer_length);
475                 dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
476                                            DMA_FROM_DEVICE);
477         }
478
479         dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
480                  urb->actual_length, xfer_length);
481         urb->actual_length += xfer_length;
482
483         if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
484             (urb->flags & URB_SEND_ZERO_PACKET) &&
485             urb->actual_length >= urb->length &&
486             !(urb->length % chan->max_packet)) {
487                 xfer_done = 0;
488         } else if (short_read || urb->actual_length >= urb->length) {
489                 xfer_done = 1;
490                 urb->status = 0;
491         }
492
493         hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
494         dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
495                  __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
496         dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
497         dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
498                  hctsiz >> TSIZ_XFERSIZE_SHIFT &
499                  TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
500         dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
501         dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
502         dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
503                  xfer_done);
504
505         return xfer_done;
506 }
507
508 /*
509  * Save the starting data toggle for the next transfer. The data toggle is
510  * saved in the QH for non-control transfers and it's saved in the QTD for
511  * control transfers.
512  */
513 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
514                                struct dwc2_host_chan *chan, int chnum,
515                                struct dwc2_qtd *qtd)
516 {
517         u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
518         u32 pid = hctsiz & TSIZ_SC_MC_PID_MASK;
519
520         if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
521                 if (pid == TSIZ_SC_MC_PID_DATA0)
522                         chan->qh->data_toggle = DWC2_HC_PID_DATA0;
523                 else
524                         chan->qh->data_toggle = DWC2_HC_PID_DATA1;
525         } else {
526                 if (pid == TSIZ_SC_MC_PID_DATA0)
527                         qtd->data_toggle = DWC2_HC_PID_DATA0;
528                 else
529                         qtd->data_toggle = DWC2_HC_PID_DATA1;
530         }
531 }
532
533 /**
534  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
535  * the transfer is stopped for any reason. The fields of the current entry in
536  * the frame descriptor array are set based on the transfer state and the input
537  * halt_status. Completes the Isochronous URB if all the URB frames have been
538  * completed.
539  *
540  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
541  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
542  */
543 static enum dwc2_halt_status dwc2_update_isoc_urb_state(
544                 struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
545                 int chnum, struct dwc2_qtd *qtd,
546                 enum dwc2_halt_status halt_status)
547 {
548         struct dwc2_hcd_iso_packet_desc *frame_desc;
549         struct dwc2_hcd_urb *urb = qtd->urb;
550
551         if (!urb)
552                 return DWC2_HC_XFER_NO_HALT_STATUS;
553
554         frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
555
556         switch (halt_status) {
557         case DWC2_HC_XFER_COMPLETE:
558                 frame_desc->status = 0;
559                 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
560                                         chan, chnum, qtd, halt_status, NULL);
561
562                 /* Non DWORD-aligned buffer case handling */
563                 if (chan->align_buf && frame_desc->actual_length &&
564                     chan->ep_is_in) {
565                         dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
566                                 __func__);
567                         dma_sync_single_for_cpu(hsotg->dev, urb->dma,
568                                                 urb->length, DMA_FROM_DEVICE);
569                         memcpy(urb->buf + frame_desc->offset +
570                                qtd->isoc_split_offset, chan->qh->dw_align_buf,
571                                frame_desc->actual_length);
572                         dma_sync_single_for_device(hsotg->dev, urb->dma,
573                                                    urb->length,
574                                                    DMA_FROM_DEVICE);
575                 }
576                 break;
577         case DWC2_HC_XFER_FRAME_OVERRUN:
578                 urb->error_count++;
579                 if (chan->ep_is_in)
580                         frame_desc->status = -ENOSR;
581                 else
582                         frame_desc->status = -ECOMM;
583                 frame_desc->actual_length = 0;
584                 break;
585         case DWC2_HC_XFER_BABBLE_ERR:
586                 urb->error_count++;
587                 frame_desc->status = -EOVERFLOW;
588                 /* Don't need to update actual_length in this case */
589                 break;
590         case DWC2_HC_XFER_XACT_ERR:
591                 urb->error_count++;
592                 frame_desc->status = -EPROTO;
593                 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
594                                         chan, chnum, qtd, halt_status, NULL);
595
596                 /* Non DWORD-aligned buffer case handling */
597                 if (chan->align_buf && frame_desc->actual_length &&
598                     chan->ep_is_in) {
599                         dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
600                                 __func__);
601                         dma_sync_single_for_cpu(hsotg->dev, urb->dma,
602                                                 urb->length, DMA_FROM_DEVICE);
603                         memcpy(urb->buf + frame_desc->offset +
604                                qtd->isoc_split_offset, chan->qh->dw_align_buf,
605                                frame_desc->actual_length);
606                         dma_sync_single_for_device(hsotg->dev, urb->dma,
607                                                    urb->length,
608                                                    DMA_FROM_DEVICE);
609                 }
610
611                 /* Skip whole frame */
612                 if (chan->qh->do_split &&
613                     chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
614                     hsotg->core_params->dma_enable > 0) {
615                         qtd->complete_split = 0;
616                         qtd->isoc_split_offset = 0;
617                 }
618
619                 break;
620         default:
621                 dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
622                         halt_status);
623                 break;
624         }
625
626         if (++qtd->isoc_frame_index == urb->packet_count) {
627                 /*
628                  * urb->status is not used for isoc transfers. The individual
629                  * frame_desc statuses are used instead.
630                  */
631                 dwc2_host_complete(hsotg, urb->priv, urb, 0);
632                 halt_status = DWC2_HC_XFER_URB_COMPLETE;
633         } else {
634                 halt_status = DWC2_HC_XFER_COMPLETE;
635         }
636
637         return halt_status;
638 }
639
640 /*
641  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
642  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
643  * still linked to the QH, the QH is added to the end of the inactive
644  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
645  * schedule if no more QTDs are linked to the QH.
646  */
647 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
648                                int free_qtd)
649 {
650         int continue_split = 0;
651         struct dwc2_qtd *qtd;
652
653         if (dbg_qh(qh))
654                 dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
655                          hsotg, qh, free_qtd);
656
657         if (list_empty(&qh->qtd_list)) {
658                 dev_dbg(hsotg->dev, "## QTD list empty ##\n");
659                 goto no_qtd;
660         }
661
662         qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
663
664         if (qtd->complete_split)
665                 continue_split = 1;
666         else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
667                  qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
668                 continue_split = 1;
669
670         if (free_qtd) {
671                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
672                 continue_split = 0;
673         }
674
675 no_qtd:
676         if (qh->channel)
677                 qh->channel->align_buf = 0;
678         qh->channel = NULL;
679         dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
680 }
681
682 /**
683  * dwc2_release_channel() - Releases a host channel for use by other transfers
684  *
685  * @hsotg:       The HCD state structure
686  * @chan:        The host channel to release
687  * @qtd:         The QTD associated with the host channel. This QTD may be
688  *               freed if the transfer is complete or an error has occurred.
689  * @halt_status: Reason the channel is being released. This status
690  *               determines the actions taken by this function.
691  *
692  * Also attempts to select and queue more transactions since at least one host
693  * channel is available.
694  */
695 static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
696                                  struct dwc2_host_chan *chan,
697                                  struct dwc2_qtd *qtd,
698                                  enum dwc2_halt_status halt_status)
699 {
700         enum dwc2_transaction_type tr_type;
701         u32 haintmsk;
702         int free_qtd = 0;
703
704         if (dbg_hc(chan))
705                 dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
706                          __func__, chan->hc_num, halt_status);
707
708         switch (halt_status) {
709         case DWC2_HC_XFER_URB_COMPLETE:
710                 free_qtd = 1;
711                 break;
712         case DWC2_HC_XFER_AHB_ERR:
713         case DWC2_HC_XFER_STALL:
714         case DWC2_HC_XFER_BABBLE_ERR:
715                 free_qtd = 1;
716                 break;
717         case DWC2_HC_XFER_XACT_ERR:
718                 if (qtd && qtd->error_count >= 3) {
719                         dev_vdbg(hsotg->dev,
720                                  "  Complete URB with transaction error\n");
721                         free_qtd = 1;
722                         if (qtd->urb) {
723                                 qtd->urb->status = -EPROTO;
724                                 dwc2_host_complete(hsotg, qtd->urb->priv,
725                                                    qtd->urb, -EPROTO);
726                         }
727                 }
728                 break;
729         case DWC2_HC_XFER_URB_DEQUEUE:
730                 /*
731                  * The QTD has already been removed and the QH has been
732                  * deactivated. Don't want to do anything except release the
733                  * host channel and try to queue more transfers.
734                  */
735                 goto cleanup;
736         case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
737                 dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
738                 free_qtd = 1;
739                 if (qtd && qtd->urb) {
740                         qtd->urb->status = -EIO;
741                         dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
742                                            -EIO);
743                 }
744                 break;
745         case DWC2_HC_XFER_NO_HALT_STATUS:
746         default:
747                 break;
748         }
749
750         dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
751
752 cleanup:
753         /*
754          * Release the host channel for use by other transfers. The cleanup
755          * function clears the channel interrupt enables and conditions, so
756          * there's no need to clear the Channel Halted interrupt separately.
757          */
758         if (!list_empty(&chan->hc_list_entry))
759                 list_del(&chan->hc_list_entry);
760         dwc2_hc_cleanup(hsotg, chan);
761         list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
762
763         switch (chan->ep_type) {
764         case USB_ENDPOINT_XFER_CONTROL:
765         case USB_ENDPOINT_XFER_BULK:
766                 hsotg->non_periodic_channels--;
767                 break;
768         default:
769                 /*
770                  * Don't release reservations for periodic channels here.
771                  * That's done when a periodic transfer is descheduled (i.e.
772                  * when the QH is removed from the periodic schedule).
773                  */
774                 break;
775         }
776
777         haintmsk = readl(hsotg->regs + HAINTMSK);
778         haintmsk &= ~(1 << chan->hc_num);
779         writel(haintmsk, hsotg->regs + HAINTMSK);
780
781         /* Try to queue more transfers now that there's a free channel */
782         tr_type = dwc2_hcd_select_transactions(hsotg);
783         if (tr_type != DWC2_TRANSACTION_NONE)
784                 dwc2_hcd_queue_transactions(hsotg, tr_type);
785 }
786
787 /*
788  * Halts a host channel. If the channel cannot be halted immediately because
789  * the request queue is full, this function ensures that the FIFO empty
790  * interrupt for the appropriate queue is enabled so that the halt request can
791  * be queued when there is space in the request queue.
792  *
793  * This function may also be called in DMA mode. In that case, the channel is
794  * simply released since the core always halts the channel automatically in
795  * DMA mode.
796  */
797 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
798                               struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
799                               enum dwc2_halt_status halt_status)
800 {
801         if (dbg_hc(chan))
802                 dev_vdbg(hsotg->dev, "%s()\n", __func__);
803
804         if (hsotg->core_params->dma_enable > 0) {
805                 if (dbg_hc(chan))
806                         dev_vdbg(hsotg->dev, "DMA enabled\n");
807                 dwc2_release_channel(hsotg, chan, qtd, halt_status);
808                 return;
809         }
810
811         /* Slave mode processing */
812         dwc2_hc_halt(hsotg, chan, halt_status);
813
814         if (chan->halt_on_queue) {
815                 u32 gintmsk;
816
817                 dev_vdbg(hsotg->dev, "Halt on queue\n");
818                 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
819                     chan->ep_type == USB_ENDPOINT_XFER_BULK) {
820                         dev_vdbg(hsotg->dev, "control/bulk\n");
821                         /*
822                          * Make sure the Non-periodic Tx FIFO empty interrupt
823                          * is enabled so that the non-periodic schedule will
824                          * be processed
825                          */
826                         gintmsk = readl(hsotg->regs + GINTMSK);
827                         gintmsk |= GINTSTS_NPTXFEMP;
828                         writel(gintmsk, hsotg->regs + GINTMSK);
829                 } else {
830                         dev_vdbg(hsotg->dev, "isoc/intr\n");
831                         /*
832                          * Move the QH from the periodic queued schedule to
833                          * the periodic assigned schedule. This allows the
834                          * halt to be queued when the periodic schedule is
835                          * processed.
836                          */
837                         list_move(&chan->qh->qh_list_entry,
838                                   &hsotg->periodic_sched_assigned);
839
840                         /*
841                          * Make sure the Periodic Tx FIFO Empty interrupt is
842                          * enabled so that the periodic schedule will be
843                          * processed
844                          */
845                         gintmsk = readl(hsotg->regs + GINTMSK);
846                         gintmsk |= GINTSTS_PTXFEMP;
847                         writel(gintmsk, hsotg->regs + GINTMSK);
848                 }
849         }
850 }
851
852 /*
853  * Performs common cleanup for non-periodic transfers after a Transfer
854  * Complete interrupt. This function should be called after any endpoint type
855  * specific handling is finished to release the host channel.
856  */
857 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
858                                             struct dwc2_host_chan *chan,
859                                             int chnum, struct dwc2_qtd *qtd,
860                                             enum dwc2_halt_status halt_status)
861 {
862         dev_vdbg(hsotg->dev, "%s()\n", __func__);
863
864         qtd->error_count = 0;
865
866         if (chan->hcint & HCINTMSK_NYET) {
867                 /*
868                  * Got a NYET on the last transaction of the transfer. This
869                  * means that the endpoint should be in the PING state at the
870                  * beginning of the next transfer.
871                  */
872                 dev_vdbg(hsotg->dev, "got NYET\n");
873                 chan->qh->ping_state = 1;
874         }
875
876         /*
877          * Always halt and release the host channel to make it available for
878          * more transfers. There may still be more phases for a control
879          * transfer or more data packets for a bulk transfer at this point,
880          * but the host channel is still halted. A channel will be reassigned
881          * to the transfer when the non-periodic schedule is processed after
882          * the channel is released. This allows transactions to be queued
883          * properly via dwc2_hcd_queue_transactions, which also enables the
884          * Tx FIFO Empty interrupt if necessary.
885          */
886         if (chan->ep_is_in) {
887                 /*
888                  * IN transfers in Slave mode require an explicit disable to
889                  * halt the channel. (In DMA mode, this call simply releases
890                  * the channel.)
891                  */
892                 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
893         } else {
894                 /*
895                  * The channel is automatically disabled by the core for OUT
896                  * transfers in Slave mode
897                  */
898                 dwc2_release_channel(hsotg, chan, qtd, halt_status);
899         }
900 }
901
902 /*
903  * Performs common cleanup for periodic transfers after a Transfer Complete
904  * interrupt. This function should be called after any endpoint type specific
905  * handling is finished to release the host channel.
906  */
907 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
908                                         struct dwc2_host_chan *chan, int chnum,
909                                         struct dwc2_qtd *qtd,
910                                         enum dwc2_halt_status halt_status)
911 {
912         u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
913
914         qtd->error_count = 0;
915
916         if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
917                 /* Core halts channel in these cases */
918                 dwc2_release_channel(hsotg, chan, qtd, halt_status);
919         else
920                 /* Flush any outstanding requests from the Tx queue */
921                 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
922 }
923
924 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
925                                        struct dwc2_host_chan *chan, int chnum,
926                                        struct dwc2_qtd *qtd)
927 {
928         struct dwc2_hcd_iso_packet_desc *frame_desc;
929         u32 len;
930
931         if (!qtd->urb)
932                 return 0;
933
934         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
935         len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
936                                           DWC2_HC_XFER_COMPLETE, NULL);
937         if (!len) {
938                 qtd->complete_split = 0;
939                 qtd->isoc_split_offset = 0;
940                 return 0;
941         }
942
943         frame_desc->actual_length += len;
944
945         if (chan->align_buf && len) {
946                 dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
947                 dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
948                                         qtd->urb->length, DMA_FROM_DEVICE);
949                 memcpy(qtd->urb->buf + frame_desc->offset +
950                        qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
951                 dma_sync_single_for_device(hsotg->dev, qtd->urb->dma,
952                                            qtd->urb->length, DMA_FROM_DEVICE);
953         }
954
955         qtd->isoc_split_offset += len;
956
957         if (frame_desc->actual_length >= frame_desc->length) {
958                 frame_desc->status = 0;
959                 qtd->isoc_frame_index++;
960                 qtd->complete_split = 0;
961                 qtd->isoc_split_offset = 0;
962         }
963
964         if (qtd->isoc_frame_index == qtd->urb->packet_count) {
965                 dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
966                 dwc2_release_channel(hsotg, chan, qtd,
967                                      DWC2_HC_XFER_URB_COMPLETE);
968         } else {
969                 dwc2_release_channel(hsotg, chan, qtd,
970                                      DWC2_HC_XFER_NO_HALT_STATUS);
971         }
972
973         return 1;       /* Indicates that channel released */
974 }
975
976 /*
977  * Handles a host channel Transfer Complete interrupt. This handler may be
978  * called in either DMA mode or Slave mode.
979  */
980 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
981                                   struct dwc2_host_chan *chan, int chnum,
982                                   struct dwc2_qtd *qtd)
983 {
984         struct dwc2_hcd_urb *urb = qtd->urb;
985         int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
986         enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
987         int urb_xfer_done;
988
989         if (dbg_hc(chan))
990                 dev_vdbg(hsotg->dev,
991                          "--Host Channel %d Interrupt: Transfer Complete--\n",
992                          chnum);
993
994         if (hsotg->core_params->dma_desc_enable > 0) {
995                 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
996                 if (pipe_type == USB_ENDPOINT_XFER_ISOC)
997                         /* Do not disable the interrupt, just clear it */
998                         return;
999                 goto handle_xfercomp_done;
1000         }
1001
1002         /* Handle xfer complete on CSPLIT */
1003         if (chan->qh->do_split) {
1004                 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1005                     hsotg->core_params->dma_enable > 0) {
1006                         if (qtd->complete_split &&
1007                             dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1008                                                         qtd))
1009                                 goto handle_xfercomp_done;
1010                 } else {
1011                         qtd->complete_split = 0;
1012                 }
1013         }
1014
1015         if (!urb)
1016                 goto handle_xfercomp_done;
1017
1018         /* Update the QTD and URB states */
1019         switch (pipe_type) {
1020         case USB_ENDPOINT_XFER_CONTROL:
1021                 switch (qtd->control_phase) {
1022                 case DWC2_CONTROL_SETUP:
1023                         if (urb->length > 0)
1024                                 qtd->control_phase = DWC2_CONTROL_DATA;
1025                         else
1026                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1027                         dev_vdbg(hsotg->dev,
1028                                  "  Control setup transaction done\n");
1029                         halt_status = DWC2_HC_XFER_COMPLETE;
1030                         break;
1031                 case DWC2_CONTROL_DATA:
1032                         urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1033                                                               chnum, urb, qtd);
1034                         if (urb_xfer_done) {
1035                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1036                                 dev_vdbg(hsotg->dev,
1037                                          "  Control data transfer done\n");
1038                         } else {
1039                                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1040                                                           qtd);
1041                         }
1042                         halt_status = DWC2_HC_XFER_COMPLETE;
1043                         break;
1044                 case DWC2_CONTROL_STATUS:
1045                         dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1046                         if (urb->status == -EINPROGRESS)
1047                                 urb->status = 0;
1048                         dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
1049                         halt_status = DWC2_HC_XFER_URB_COMPLETE;
1050                         break;
1051                 }
1052
1053                 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1054                                                 halt_status);
1055                 break;
1056         case USB_ENDPOINT_XFER_BULK:
1057                 dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1058                 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1059                                                       qtd);
1060                 if (urb_xfer_done) {
1061                         dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
1062                         halt_status = DWC2_HC_XFER_URB_COMPLETE;
1063                 } else {
1064                         halt_status = DWC2_HC_XFER_COMPLETE;
1065                 }
1066
1067                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1068                 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1069                                                 halt_status);
1070                 break;
1071         case USB_ENDPOINT_XFER_INT:
1072                 dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1073                 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1074                                                       qtd);
1075
1076                 /*
1077                  * Interrupt URB is done on the first transfer complete
1078                  * interrupt
1079                  */
1080                 if (urb_xfer_done) {
1081                                 dwc2_host_complete(hsotg, urb->priv, urb,
1082                                                    urb->status);
1083                                 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1084                 } else {
1085                                 halt_status = DWC2_HC_XFER_COMPLETE;
1086                 }
1087
1088                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1089                 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1090                                             halt_status);
1091                 break;
1092         case USB_ENDPOINT_XFER_ISOC:
1093                 if (dbg_perio())
1094                         dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1095                 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1096                         halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1097                                         chnum, qtd, DWC2_HC_XFER_COMPLETE);
1098                 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1099                                             halt_status);
1100                 break;
1101         }
1102
1103 handle_xfercomp_done:
1104         disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1105 }
1106
1107 /*
1108  * Handles a host channel STALL interrupt. This handler may be called in
1109  * either DMA mode or Slave mode.
1110  */
1111 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1112                                struct dwc2_host_chan *chan, int chnum,
1113                                struct dwc2_qtd *qtd)
1114 {
1115         struct dwc2_hcd_urb *urb = qtd->urb;
1116         int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1117
1118         dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1119                 chnum);
1120
1121         if (hsotg->core_params->dma_desc_enable > 0) {
1122                 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1123                                             DWC2_HC_XFER_STALL);
1124                 goto handle_stall_done;
1125         }
1126
1127         if (!urb)
1128                 goto handle_stall_halt;
1129
1130         if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1131                 dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
1132
1133         if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1134             pipe_type == USB_ENDPOINT_XFER_INT) {
1135                 dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
1136                 /*
1137                  * USB protocol requires resetting the data toggle for bulk
1138                  * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1139                  * setup command is issued to the endpoint. Anticipate the
1140                  * CLEAR_FEATURE command since a STALL has occurred and reset
1141                  * the data toggle now.
1142                  */
1143                 chan->qh->data_toggle = 0;
1144         }
1145
1146 handle_stall_halt:
1147         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1148
1149 handle_stall_done:
1150         disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1151 }
1152
1153 /*
1154  * Updates the state of the URB when a transfer has been stopped due to an
1155  * abnormal condition before the transfer completes. Modifies the
1156  * actual_length field of the URB to reflect the number of bytes that have
1157  * actually been transferred via the host channel.
1158  */
1159 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1160                                       struct dwc2_host_chan *chan, int chnum,
1161                                       struct dwc2_hcd_urb *urb,
1162                                       struct dwc2_qtd *qtd,
1163                                       enum dwc2_halt_status halt_status)
1164 {
1165         u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1166                                                       qtd, halt_status, NULL);
1167         u32 hctsiz;
1168
1169         if (urb->actual_length + xfer_length > urb->length) {
1170                 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1171                 xfer_length = urb->length - urb->actual_length;
1172         }
1173
1174         /* Non DWORD-aligned buffer case handling */
1175         if (chan->align_buf && xfer_length && chan->ep_is_in) {
1176                 dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1177                 dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
1178                                         DMA_FROM_DEVICE);
1179                 memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
1180                        xfer_length);
1181                 dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
1182                                            DMA_FROM_DEVICE);
1183         }
1184
1185         urb->actual_length += xfer_length;
1186
1187         hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1188         dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1189                  __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1190         dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1191                  chan->start_pkt_count);
1192         dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1193                  hctsiz >> TSIZ_PKTCNT_SHIFT &
1194                  TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
1195         dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1196         dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1197                  xfer_length);
1198         dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1199                  urb->actual_length);
1200         dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1201                  urb->length);
1202 }
1203
1204 /*
1205  * Handles a host channel NAK interrupt. This handler may be called in either
1206  * DMA mode or Slave mode.
1207  */
1208 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1209                              struct dwc2_host_chan *chan, int chnum,
1210                              struct dwc2_qtd *qtd)
1211 {
1212         if (dbg_hc(chan))
1213                 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1214                          chnum);
1215
1216         /*
1217          * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1218          * interrupt. Re-start the SSPLIT transfer.
1219          */
1220         if (chan->do_split) {
1221                 if (chan->complete_split)
1222                         qtd->error_count = 0;
1223                 qtd->complete_split = 0;
1224                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1225                 goto handle_nak_done;
1226         }
1227
1228         switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1229         case USB_ENDPOINT_XFER_CONTROL:
1230         case USB_ENDPOINT_XFER_BULK:
1231                 if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1232                         /*
1233                          * NAK interrupts are enabled on bulk/control IN
1234                          * transfers in DMA mode for the sole purpose of
1235                          * resetting the error count after a transaction error
1236                          * occurs. The core will continue transferring data.
1237                          */
1238                         qtd->error_count = 0;
1239                         break;
1240                 }
1241
1242                 /*
1243                  * NAK interrupts normally occur during OUT transfers in DMA
1244                  * or Slave mode. For IN transfers, more requests will be
1245                  * queued as request queue space is available.
1246                  */
1247                 qtd->error_count = 0;
1248
1249                 if (!chan->qh->ping_state) {
1250                         dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1251                                                   qtd, DWC2_HC_XFER_NAK);
1252                         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1253
1254                         if (chan->speed == USB_SPEED_HIGH)
1255                                 chan->qh->ping_state = 1;
1256                 }
1257
1258                 /*
1259                  * Halt the channel so the transfer can be re-started from
1260                  * the appropriate point or the PING protocol will
1261                  * start/continue
1262                  */
1263                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1264                 break;
1265         case USB_ENDPOINT_XFER_INT:
1266                 qtd->error_count = 0;
1267                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1268                 break;
1269         case USB_ENDPOINT_XFER_ISOC:
1270                 /* Should never get called for isochronous transfers */
1271                 dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1272                 break;
1273         }
1274
1275 handle_nak_done:
1276         disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1277 }
1278
1279 /*
1280  * Handles a host channel ACK interrupt. This interrupt is enabled when
1281  * performing the PING protocol in Slave mode, when errors occur during
1282  * either Slave mode or DMA mode, and during Start Split transactions.
1283  */
1284 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1285                              struct dwc2_host_chan *chan, int chnum,
1286                              struct dwc2_qtd *qtd)
1287 {
1288         struct dwc2_hcd_iso_packet_desc *frame_desc;
1289
1290         if (dbg_hc(chan))
1291                 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1292                          chnum);
1293
1294         if (chan->do_split) {
1295                 /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1296                 if (!chan->ep_is_in &&
1297                     chan->data_pid_start != DWC2_HC_PID_SETUP)
1298                         qtd->ssplit_out_xfer_count = chan->xfer_len;
1299
1300                 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1301                         qtd->complete_split = 1;
1302                         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1303                 } else {
1304                         /* ISOC OUT */
1305                         switch (chan->xact_pos) {
1306                         case DWC2_HCSPLT_XACTPOS_ALL:
1307                                 break;
1308                         case DWC2_HCSPLT_XACTPOS_END:
1309                                 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1310                                 qtd->isoc_split_offset = 0;
1311                                 break;
1312                         case DWC2_HCSPLT_XACTPOS_BEGIN:
1313                         case DWC2_HCSPLT_XACTPOS_MID:
1314                                 /*
1315                                  * For BEGIN or MID, calculate the length for
1316                                  * the next microframe to determine the correct
1317                                  * SSPLIT token, either MID or END
1318                                  */
1319                                 frame_desc = &qtd->urb->iso_descs[
1320                                                 qtd->isoc_frame_index];
1321                                 qtd->isoc_split_offset += 188;
1322
1323                                 if (frame_desc->length - qtd->isoc_split_offset
1324                                                         <= 188)
1325                                         qtd->isoc_split_pos =
1326                                                         DWC2_HCSPLT_XACTPOS_END;
1327                                 else
1328                                         qtd->isoc_split_pos =
1329                                                         DWC2_HCSPLT_XACTPOS_MID;
1330                                 break;
1331                         }
1332                 }
1333         } else {
1334                 qtd->error_count = 0;
1335
1336                 if (chan->qh->ping_state) {
1337                         chan->qh->ping_state = 0;
1338                         /*
1339                          * Halt the channel so the transfer can be re-started
1340                          * from the appropriate point. This only happens in
1341                          * Slave mode. In DMA mode, the ping_state is cleared
1342                          * when the transfer is started because the core
1343                          * automatically executes the PING, then the transfer.
1344                          */
1345                         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1346                 }
1347         }
1348
1349         /*
1350          * If the ACK occurred when _not_ in the PING state, let the channel
1351          * continue transferring data after clearing the error count
1352          */
1353         disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1354 }
1355
1356 /*
1357  * Handles a host channel NYET interrupt. This interrupt should only occur on
1358  * Bulk and Control OUT endpoints and for complete split transactions. If a
1359  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1360  * handled in the xfercomp interrupt handler, not here. This handler may be
1361  * called in either DMA mode or Slave mode.
1362  */
1363 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1364                               struct dwc2_host_chan *chan, int chnum,
1365                               struct dwc2_qtd *qtd)
1366 {
1367         if (dbg_hc(chan))
1368                 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1369                          chnum);
1370
1371         /*
1372          * NYET on CSPLIT
1373          * re-do the CSPLIT immediately on non-periodic
1374          */
1375         if (chan->do_split && chan->complete_split) {
1376                 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1377                     hsotg->core_params->dma_enable > 0) {
1378                         qtd->complete_split = 0;
1379                         qtd->isoc_split_offset = 0;
1380                         if (qtd->urb &&
1381                             ++qtd->isoc_frame_index == qtd->urb->packet_count) {
1382                                 dwc2_host_complete(hsotg, qtd->urb->priv,
1383                                                    qtd->urb, 0);
1384                                 dwc2_release_channel(hsotg, chan, qtd,
1385                                                      DWC2_HC_XFER_URB_COMPLETE);
1386                         } else {
1387                                 dwc2_release_channel(hsotg, chan, qtd,
1388                                                 DWC2_HC_XFER_NO_HALT_STATUS);
1389                         }
1390                         goto handle_nyet_done;
1391                 }
1392
1393                 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1394                     chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1395                         int frnum = dwc2_hcd_get_frame_number(hsotg);
1396
1397                         if (dwc2_full_frame_num(frnum) !=
1398                             dwc2_full_frame_num(chan->qh->sched_frame)) {
1399                                 /*
1400                                  * No longer in the same full speed frame.
1401                                  * Treat this as a transaction error.
1402                                  */
1403 #if 0
1404                                 /*
1405                                  * Todo: Fix system performance so this can
1406                                  * be treated as an error. Right now complete
1407                                  * splits cannot be scheduled precisely enough
1408                                  * due to other system activity, so this error
1409                                  * occurs regularly in Slave mode.
1410                                  */
1411                                 qtd->error_count++;
1412 #endif
1413                                 qtd->complete_split = 0;
1414                                 dwc2_halt_channel(hsotg, chan, qtd,
1415                                                   DWC2_HC_XFER_XACT_ERR);
1416                                 /* Todo: add support for isoc release */
1417                                 goto handle_nyet_done;
1418                         }
1419                 }
1420
1421                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1422                 goto handle_nyet_done;
1423         }
1424
1425         chan->qh->ping_state = 1;
1426         qtd->error_count = 0;
1427
1428         dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1429                                   DWC2_HC_XFER_NYET);
1430         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1431
1432         /*
1433          * Halt the channel and re-start the transfer so the PING protocol
1434          * will start
1435          */
1436         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1437
1438 handle_nyet_done:
1439         disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1440 }
1441
1442 /*
1443  * Handles a host channel babble interrupt. This handler may be called in
1444  * either DMA mode or Slave mode.
1445  */
1446 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1447                                 struct dwc2_host_chan *chan, int chnum,
1448                                 struct dwc2_qtd *qtd)
1449 {
1450         dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1451                 chnum);
1452
1453         if (hsotg->core_params->dma_desc_enable > 0) {
1454                 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1455                                             DWC2_HC_XFER_BABBLE_ERR);
1456                 goto handle_babble_done;
1457         }
1458
1459         if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1460                 if (qtd->urb)
1461                         dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
1462                                            -EOVERFLOW);
1463                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1464         } else {
1465                 enum dwc2_halt_status halt_status;
1466
1467                 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1468                                                 qtd, DWC2_HC_XFER_BABBLE_ERR);
1469                 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1470         }
1471
1472 handle_babble_done:
1473         dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1474         disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1475 }
1476
1477 /*
1478  * Handles a host channel AHB error interrupt. This handler is only called in
1479  * DMA mode.
1480  */
1481 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1482                                 struct dwc2_host_chan *chan, int chnum,
1483                                 struct dwc2_qtd *qtd)
1484 {
1485         struct dwc2_hcd_urb *urb = qtd->urb;
1486         char *pipetype, *speed;
1487         u32 hcchar;
1488         u32 hcsplt;
1489         u32 hctsiz;
1490         u32 hc_dma;
1491
1492         dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1493                 chnum);
1494
1495         if (!urb)
1496                 goto handle_ahberr_halt;
1497
1498         hcchar = readl(hsotg->regs + HCCHAR(chnum));
1499         hcsplt = readl(hsotg->regs + HCSPLT(chnum));
1500         hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1501         hc_dma = readl(hsotg->regs + HCDMA(chnum));
1502
1503         dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1504         dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1505         dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1506         dev_err(hsotg->dev, "  Device address: %d\n",
1507                 dwc2_hcd_get_dev_addr(&urb->pipe_info));
1508         dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1509                 dwc2_hcd_get_ep_num(&urb->pipe_info),
1510                 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1511
1512         switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1513         case USB_ENDPOINT_XFER_CONTROL:
1514                 pipetype = "CONTROL";
1515                 break;
1516         case USB_ENDPOINT_XFER_BULK:
1517                 pipetype = "BULK";
1518                 break;
1519         case USB_ENDPOINT_XFER_INT:
1520                 pipetype = "INTERRUPT";
1521                 break;
1522         case USB_ENDPOINT_XFER_ISOC:
1523                 pipetype = "ISOCHRONOUS";
1524                 break;
1525         default:
1526                 pipetype = "UNKNOWN";
1527                 break;
1528         }
1529
1530         dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1531
1532         switch (chan->speed) {
1533         case USB_SPEED_HIGH:
1534                 speed = "HIGH";
1535                 break;
1536         case USB_SPEED_FULL:
1537                 speed = "FULL";
1538                 break;
1539         case USB_SPEED_LOW:
1540                 speed = "LOW";
1541                 break;
1542         default:
1543                 speed = "UNKNOWN";
1544                 break;
1545         }
1546
1547         dev_err(hsotg->dev, "  Speed: %s\n", speed);
1548
1549         dev_err(hsotg->dev, "  Max packet size: %d\n",
1550                 dwc2_hcd_get_mps(&urb->pipe_info));
1551         dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1552         dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1553                 urb->buf, (unsigned long)urb->dma);
1554         dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1555                 urb->setup_packet, (unsigned long)urb->setup_dma);
1556         dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1557
1558         /* Core halts the channel for Descriptor DMA mode */
1559         if (hsotg->core_params->dma_desc_enable > 0) {
1560                 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1561                                             DWC2_HC_XFER_AHB_ERR);
1562                 goto handle_ahberr_done;
1563         }
1564
1565         dwc2_host_complete(hsotg, urb->priv, urb, -EIO);
1566
1567 handle_ahberr_halt:
1568         /*
1569          * Force a channel halt. Don't call dwc2_halt_channel because that won't
1570          * write to the HCCHARn register in DMA mode to force the halt.
1571          */
1572         dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1573
1574 handle_ahberr_done:
1575         dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1576         disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1577 }
1578
1579 /*
1580  * Handles a host channel transaction error interrupt. This handler may be
1581  * called in either DMA mode or Slave mode.
1582  */
1583 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1584                                  struct dwc2_host_chan *chan, int chnum,
1585                                  struct dwc2_qtd *qtd)
1586 {
1587         dev_dbg(hsotg->dev,
1588                 "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1589
1590         if (hsotg->core_params->dma_desc_enable > 0) {
1591                 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1592                                             DWC2_HC_XFER_XACT_ERR);
1593                 goto handle_xacterr_done;
1594         }
1595
1596         switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1597         case USB_ENDPOINT_XFER_CONTROL:
1598         case USB_ENDPOINT_XFER_BULK:
1599                 qtd->error_count++;
1600                 if (!chan->qh->ping_state) {
1601
1602                         dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1603                                                   qtd, DWC2_HC_XFER_XACT_ERR);
1604                         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1605                         if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1606                                 chan->qh->ping_state = 1;
1607                 }
1608
1609                 /*
1610                  * Halt the channel so the transfer can be re-started from
1611                  * the appropriate point or the PING protocol will start
1612                  */
1613                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1614                 break;
1615         case USB_ENDPOINT_XFER_INT:
1616                 qtd->error_count++;
1617                 if (chan->do_split && chan->complete_split)
1618                         qtd->complete_split = 0;
1619                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1620                 break;
1621         case USB_ENDPOINT_XFER_ISOC:
1622                 {
1623                         enum dwc2_halt_status halt_status;
1624
1625                         halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1626                                         chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1627                         dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1628                 }
1629                 break;
1630         }
1631
1632 handle_xacterr_done:
1633         dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1634         disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1635 }
1636
1637 /*
1638  * Handles a host channel frame overrun interrupt. This handler may be called
1639  * in either DMA mode or Slave mode.
1640  */
1641 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1642                                   struct dwc2_host_chan *chan, int chnum,
1643                                   struct dwc2_qtd *qtd)
1644 {
1645         enum dwc2_halt_status halt_status;
1646
1647         if (dbg_hc(chan))
1648                 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1649                         chnum);
1650
1651         switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1652         case USB_ENDPOINT_XFER_CONTROL:
1653         case USB_ENDPOINT_XFER_BULK:
1654                 break;
1655         case USB_ENDPOINT_XFER_INT:
1656                 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1657                 break;
1658         case USB_ENDPOINT_XFER_ISOC:
1659                 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1660                                         qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1661                 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1662                 break;
1663         }
1664
1665         dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1666         disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1667 }
1668
1669 /*
1670  * Handles a host channel data toggle error interrupt. This handler may be
1671  * called in either DMA mode or Slave mode.
1672  */
1673 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1674                                     struct dwc2_host_chan *chan, int chnum,
1675                                     struct dwc2_qtd *qtd)
1676 {
1677         dev_dbg(hsotg->dev,
1678                 "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1679
1680         if (chan->ep_is_in)
1681                 qtd->error_count = 0;
1682         else
1683                 dev_err(hsotg->dev,
1684                         "Data Toggle Error on OUT transfer, channel %d\n",
1685                         chnum);
1686
1687         dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1688         disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1689 }
1690
1691 /*
1692  * For debug only. It checks that a valid halt status is set and that
1693  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1694  * taken and a warning is issued.
1695  *
1696  * Return: true if halt status is ok, false otherwise
1697  */
1698 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1699                                 struct dwc2_host_chan *chan, int chnum,
1700                                 struct dwc2_qtd *qtd)
1701 {
1702 #ifdef DEBUG
1703         u32 hcchar;
1704         u32 hctsiz;
1705         u32 hcintmsk;
1706         u32 hcsplt;
1707
1708         if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1709                 /*
1710                  * This code is here only as a check. This condition should
1711                  * never happen. Ignore the halt if it does occur.
1712                  */
1713                 hcchar = readl(hsotg->regs + HCCHAR(chnum));
1714                 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1715                 hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1716                 hcsplt = readl(hsotg->regs + HCSPLT(chnum));
1717                 dev_dbg(hsotg->dev,
1718                         "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1719                          __func__);
1720                 dev_dbg(hsotg->dev,
1721                         "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1722                         chnum, hcchar, hctsiz);
1723                 dev_dbg(hsotg->dev,
1724                         "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1725                         chan->hcint, hcintmsk, hcsplt);
1726                 if (qtd)
1727                         dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1728                                 qtd->complete_split);
1729                 dev_warn(hsotg->dev,
1730                          "%s: no halt status, channel %d, ignoring interrupt\n",
1731                          __func__, chnum);
1732                 return false;
1733         }
1734
1735         /*
1736          * This code is here only as a check. hcchar.chdis should never be set
1737          * when the halt interrupt occurs. Halt the channel again if it does
1738          * occur.
1739          */
1740         hcchar = readl(hsotg->regs + HCCHAR(chnum));
1741         if (hcchar & HCCHAR_CHDIS) {
1742                 dev_warn(hsotg->dev,
1743                          "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1744                          __func__, hcchar);
1745                 chan->halt_pending = 0;
1746                 dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1747                 return false;
1748         }
1749 #endif
1750
1751         return true;
1752 }
1753
1754 /*
1755  * Handles a host Channel Halted interrupt in DMA mode. This handler
1756  * determines the reason the channel halted and proceeds accordingly.
1757  */
1758 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1759                                     struct dwc2_host_chan *chan, int chnum,
1760                                     struct dwc2_qtd *qtd)
1761 {
1762         u32 hcintmsk;
1763         int out_nak_enh = 0;
1764
1765         if (dbg_hc(chan))
1766                 dev_vdbg(hsotg->dev,
1767                          "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1768                          chnum);
1769
1770         /*
1771          * For core with OUT NAK enhancement, the flow for high-speed
1772          * CONTROL/BULK OUT is handled a little differently
1773          */
1774         if (hsotg->snpsid >= DWC2_CORE_REV_2_71a) {
1775                 if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1776                     (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1777                      chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1778                         out_nak_enh = 1;
1779                 }
1780         }
1781
1782         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1783             (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1784              hsotg->core_params->dma_desc_enable <= 0)) {
1785                 if (hsotg->core_params->dma_desc_enable > 0)
1786                         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1787                                                     chan->halt_status);
1788                 else
1789                         /*
1790                          * Just release the channel. A dequeue can happen on a
1791                          * transfer timeout. In the case of an AHB Error, the
1792                          * channel was forced to halt because there's no way to
1793                          * gracefully recover.
1794                          */
1795                         dwc2_release_channel(hsotg, chan, qtd,
1796                                              chan->halt_status);
1797                 return;
1798         }
1799
1800         hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1801
1802         if (chan->hcint & HCINTMSK_XFERCOMPL) {
1803                 /*
1804                  * Todo: This is here because of a possible hardware bug. Spec
1805                  * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1806                  * interrupt w/ACK bit set should occur, but I only see the
1807                  * XFERCOMP bit, even with it masked out. This is a workaround
1808                  * for that behavior. Should fix this when hardware is fixed.
1809                  */
1810                 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1811                         dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1812                 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1813         } else if (chan->hcint & HCINTMSK_STALL) {
1814                 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1815         } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1816                    hsotg->core_params->dma_desc_enable <= 0) {
1817                 if (out_nak_enh) {
1818                         if (chan->hcint &
1819                             (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1820                                 dev_vdbg(hsotg->dev,
1821                                          "XactErr with NYET/NAK/ACK\n");
1822                                 qtd->error_count = 0;
1823                         } else {
1824                                 dev_vdbg(hsotg->dev,
1825                                          "XactErr without NYET/NAK/ACK\n");
1826                         }
1827                 }
1828
1829                 /*
1830                  * Must handle xacterr before nak or ack. Could get a xacterr
1831                  * at the same time as either of these on a BULK/CONTROL OUT
1832                  * that started with a PING. The xacterr takes precedence.
1833                  */
1834                 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1835         } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1836                    hsotg->core_params->dma_desc_enable > 0) {
1837                 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1838         } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1839                    hsotg->core_params->dma_desc_enable > 0) {
1840                 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1841         } else if (chan->hcint & HCINTMSK_BBLERR) {
1842                 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1843         } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1844                 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1845         } else if (!out_nak_enh) {
1846                 if (chan->hcint & HCINTMSK_NYET) {
1847                         /*
1848                          * Must handle nyet before nak or ack. Could get a nyet
1849                          * at the same time as either of those on a BULK/CONTROL
1850                          * OUT that started with a PING. The nyet takes
1851                          * precedence.
1852                          */
1853                         dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1854                 } else if ((chan->hcint & HCINTMSK_NAK) &&
1855                            !(hcintmsk & HCINTMSK_NAK)) {
1856                         /*
1857                          * If nak is not masked, it's because a non-split IN
1858                          * transfer is in an error state. In that case, the nak
1859                          * is handled by the nak interrupt handler, not here.
1860                          * Handle nak here for BULK/CONTROL OUT transfers, which
1861                          * halt on a NAK to allow rewinding the buffer pointer.
1862                          */
1863                         dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1864                 } else if ((chan->hcint & HCINTMSK_ACK) &&
1865                            !(hcintmsk & HCINTMSK_ACK)) {
1866                         /*
1867                          * If ack is not masked, it's because a non-split IN
1868                          * transfer is in an error state. In that case, the ack
1869                          * is handled by the ack interrupt handler, not here.
1870                          * Handle ack here for split transfers. Start splits
1871                          * halt on ACK.
1872                          */
1873                         dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1874                 } else {
1875                         if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1876                             chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1877                                 /*
1878                                  * A periodic transfer halted with no other
1879                                  * channel interrupts set. Assume it was halted
1880                                  * by the core because it could not be completed
1881                                  * in its scheduled (micro)frame.
1882                                  */
1883                                 dev_dbg(hsotg->dev,
1884                                         "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1885                                         __func__, chnum);
1886                                 dwc2_halt_channel(hsotg, chan, qtd,
1887                                         DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1888                         } else {
1889                                 dev_err(hsotg->dev,
1890                                         "%s: Channel %d - ChHltd set, but reason is unknown\n",
1891                                         __func__, chnum);
1892                                 dev_err(hsotg->dev,
1893                                         "hcint 0x%08x, intsts 0x%08x\n",
1894                                         chan->hcint,
1895                                         readl(hsotg->regs + GINTSTS));
1896                         }
1897                 }
1898         } else {
1899                 dev_info(hsotg->dev,
1900                          "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1901                          chan->hcint);
1902         }
1903 }
1904
1905 /*
1906  * Handles a host channel Channel Halted interrupt
1907  *
1908  * In slave mode, this handler is called only when the driver specifically
1909  * requests a halt. This occurs during handling other host channel interrupts
1910  * (e.g. nak, xacterr, stall, nyet, etc.).
1911  *
1912  * In DMA mode, this is the interrupt that occurs when the core has finished
1913  * processing a transfer on a channel. Other host channel interrupts (except
1914  * ahberr) are disabled in DMA mode.
1915  */
1916 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1917                                 struct dwc2_host_chan *chan, int chnum,
1918                                 struct dwc2_qtd *qtd)
1919 {
1920         if (dbg_hc(chan))
1921                 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1922                          chnum);
1923
1924         if (hsotg->core_params->dma_enable > 0) {
1925                 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1926         } else {
1927                 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1928                         return;
1929                 dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1930         }
1931 }
1932
1933 /* Handles interrupt for a specific Host Channel */
1934 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1935 {
1936         struct dwc2_qtd *qtd;
1937         struct dwc2_host_chan *chan;
1938         u32 hcint, hcintmsk;
1939
1940         chan = hsotg->hc_ptr_array[chnum];
1941
1942         if (dbg_hc(chan))
1943                 dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1944                          chnum);
1945
1946         hcint = readl(hsotg->regs + HCINT(chnum));
1947         hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1948         if (dbg_hc(chan))
1949                 dev_vdbg(hsotg->dev,
1950                          "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1951                          hcint, hcintmsk, hcint & hcintmsk);
1952
1953         if (!chan) {
1954                 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
1955                 writel(hcint, hsotg->regs + HCINT(chnum));
1956                 return;
1957         }
1958
1959         writel(hcint, hsotg->regs + HCINT(chnum));
1960         chan->hcint = hcint;
1961         hcint &= hcintmsk;
1962
1963         /*
1964          * If the channel was halted due to a dequeue, the qtd list might
1965          * be empty or at least the first entry will not be the active qtd.
1966          * In this case, take a shortcut and just release the channel.
1967          */
1968         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1969                 /*
1970                  * If the channel was halted, this should be the only
1971                  * interrupt unmasked
1972                  */
1973                 WARN_ON(hcint != HCINTMSK_CHHLTD);
1974                 if (hsotg->core_params->dma_desc_enable > 0)
1975                         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1976                                                     chan->halt_status);
1977                 else
1978                         dwc2_release_channel(hsotg, chan, NULL,
1979                                              chan->halt_status);
1980                 return;
1981         }
1982
1983         if (list_empty(&chan->qh->qtd_list)) {
1984                 /*
1985                  * TODO: Will this ever happen with the
1986                  * DWC2_HC_XFER_URB_DEQUEUE handling above?
1987                  */
1988                 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
1989                         chnum);
1990                 dev_dbg(hsotg->dev,
1991                         "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1992                         chan->hcint, hcintmsk, hcint);
1993                 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
1994                 disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
1995                 chan->hcint = 0;
1996                 return;
1997         }
1998
1999         qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2000                                qtd_list_entry);
2001
2002         if (hsotg->core_params->dma_enable <= 0) {
2003                 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2004                         hcint &= ~HCINTMSK_CHHLTD;
2005         }
2006
2007         if (hcint & HCINTMSK_XFERCOMPL) {
2008                 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2009                 /*
2010                  * If NYET occurred at same time as Xfer Complete, the NYET is
2011                  * handled by the Xfer Complete interrupt handler. Don't want
2012                  * to call the NYET interrupt handler in this case.
2013                  */
2014                 hcint &= ~HCINTMSK_NYET;
2015         }
2016         if (hcint & HCINTMSK_CHHLTD)
2017                 dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2018         if (hcint & HCINTMSK_AHBERR)
2019                 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2020         if (hcint & HCINTMSK_STALL)
2021                 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2022         if (hcint & HCINTMSK_NAK)
2023                 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2024         if (hcint & HCINTMSK_ACK)
2025                 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2026         if (hcint & HCINTMSK_NYET)
2027                 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2028         if (hcint & HCINTMSK_XACTERR)
2029                 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2030         if (hcint & HCINTMSK_BBLERR)
2031                 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2032         if (hcint & HCINTMSK_FRMOVRUN)
2033                 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2034         if (hcint & HCINTMSK_DATATGLERR)
2035                 dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2036
2037         chan->hcint = 0;
2038 }
2039
2040 /*
2041  * This interrupt indicates that one or more host channels has a pending
2042  * interrupt. There are multiple conditions that can cause each host channel
2043  * interrupt. This function determines which conditions have occurred for each
2044  * host channel interrupt and handles them appropriately.
2045  */
2046 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2047 {
2048         u32 haint;
2049         int i;
2050
2051         haint = readl(hsotg->regs + HAINT);
2052         if (dbg_perio()) {
2053                 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2054
2055                 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2056         }
2057
2058         for (i = 0; i < hsotg->core_params->host_channels; i++) {
2059                 if (haint & (1 << i))
2060                         dwc2_hc_n_intr(hsotg, i);
2061         }
2062 }
2063
2064 /* This function handles interrupts for the HCD */
2065 int dwc2_hcd_intr(struct dwc2_hsotg *hsotg)
2066 {
2067         u32 gintsts, dbg_gintsts;
2068         int retval = 0;
2069
2070         if (dwc2_check_core_status(hsotg) < 0) {
2071                 dev_warn(hsotg->dev, "Controller is disconnected\n");
2072                 return 0;
2073         }
2074
2075         spin_lock(&hsotg->lock);
2076
2077         /* Check if HOST Mode */
2078         if (dwc2_is_host_mode(hsotg)) {
2079                 gintsts = dwc2_read_core_intr(hsotg);
2080                 if (!gintsts) {
2081                         spin_unlock(&hsotg->lock);
2082                         return 0;
2083                 }
2084
2085                 retval = 1;
2086
2087                 dbg_gintsts = gintsts;
2088 #ifndef DEBUG_SOF
2089                 dbg_gintsts &= ~GINTSTS_SOF;
2090 #endif
2091                 if (!dbg_perio())
2092                         dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2093                                          GINTSTS_PTXFEMP);
2094
2095                 /* Only print if there are any non-suppressed interrupts left */
2096                 if (dbg_gintsts)
2097                         dev_vdbg(hsotg->dev,
2098                                  "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2099                                  gintsts);
2100
2101                 if (gintsts & GINTSTS_SOF)
2102                         dwc2_sof_intr(hsotg);
2103                 if (gintsts & GINTSTS_RXFLVL)
2104                         dwc2_rx_fifo_level_intr(hsotg);
2105                 if (gintsts & GINTSTS_NPTXFEMP)
2106                         dwc2_np_tx_fifo_empty_intr(hsotg);
2107                 if (gintsts & GINTSTS_I2CINT)
2108                         /* Todo: Implement i2cintr handler */
2109                         writel(GINTSTS_I2CINT, hsotg->regs + GINTSTS);
2110                 if (gintsts & GINTSTS_PRTINT)
2111                         dwc2_port_intr(hsotg);
2112                 if (gintsts & GINTSTS_HCHINT)
2113                         dwc2_hc_intr(hsotg);
2114                 if (gintsts & GINTSTS_PTXFEMP)
2115                         dwc2_perio_tx_fifo_empty_intr(hsotg);
2116
2117                 if (dbg_gintsts) {
2118                         dev_vdbg(hsotg->dev,
2119                                  "DWC OTG HCD Finished Servicing Interrupts\n");
2120                         dev_vdbg(hsotg->dev,
2121                                  "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2122                                  readl(hsotg->regs + GINTSTS),
2123                                  readl(hsotg->regs + GINTMSK));
2124                 }
2125         }
2126
2127         spin_unlock(&hsotg->lock);
2128
2129         return retval;
2130 }