89db47a1ffed4ec0eab6c54724a5f71c4c7c6245
[firefly-linux-kernel-4.4.55.git] / drivers / usb / dwc2 / hcd_ddma.c
1 /*
2  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 /*
38  * This file contains the Descriptor DMA implementation for Host mode
39  */
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/io.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
48
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
51
52 #include "core.h"
53 #include "hcd.h"
54
55 static u16 dwc2_frame_list_idx(u16 frame)
56 {
57         return frame & (FRLISTEN_64_SIZE - 1);
58 }
59
60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
61 {
62         return (idx + inc) &
63                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
64                   MAX_DMA_DESC_NUM_GENERIC) - 1);
65 }
66
67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
68 {
69         return (idx - inc) &
70                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71                   MAX_DMA_DESC_NUM_GENERIC) - 1);
72 }
73
74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
75 {
76         return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
77                 qh->dev_speed == USB_SPEED_HIGH) ?
78                 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
79 }
80
81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
82 {
83         return qh->dev_speed == USB_SPEED_HIGH ?
84                (qh->interval + 8 - 1) / 8 : qh->interval;
85 }
86
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
88                                 gfp_t flags)
89 {
90         struct kmem_cache *desc_cache;
91
92         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
93             && qh->dev_speed == USB_SPEED_HIGH)
94                 desc_cache = hsotg->desc_hsisoc_cache;
95         else
96                 desc_cache = hsotg->desc_gen_cache;
97
98         qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
99                                                 dwc2_max_desc_num(qh);
100
101         qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
102         if (!qh->desc_list)
103                 return -ENOMEM;
104
105         qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
106                                            qh->desc_list_sz,
107                                            DMA_TO_DEVICE);
108
109         qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
110         if (!qh->n_bytes) {
111                 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
112                                  qh->desc_list_sz,
113                                  DMA_FROM_DEVICE);
114                 kfree(qh->desc_list);
115                 qh->desc_list = NULL;
116                 return -ENOMEM;
117         }
118
119         return 0;
120 }
121
122 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
123 {
124         struct kmem_cache *desc_cache;
125
126         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
127             && qh->dev_speed == USB_SPEED_HIGH)
128                 desc_cache = hsotg->desc_hsisoc_cache;
129         else
130                 desc_cache = hsotg->desc_gen_cache;
131
132         if (qh->desc_list) {
133                 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
134                                  qh->desc_list_sz, DMA_FROM_DEVICE);
135                 kmem_cache_free(desc_cache, qh->desc_list);
136                 qh->desc_list = NULL;
137         }
138
139         kfree(qh->n_bytes);
140         qh->n_bytes = NULL;
141 }
142
143 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
144 {
145         if (hsotg->frame_list)
146                 return 0;
147
148         hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
149         hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
150         if (!hsotg->frame_list)
151                 return -ENOMEM;
152
153         hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
154                                                hsotg->frame_list_sz,
155                                                DMA_TO_DEVICE);
156
157         return 0;
158 }
159
160 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
161 {
162         unsigned long flags;
163
164         spin_lock_irqsave(&hsotg->lock, flags);
165
166         if (!hsotg->frame_list) {
167                 spin_unlock_irqrestore(&hsotg->lock, flags);
168                 return;
169         }
170
171         dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
172                          hsotg->frame_list_sz, DMA_FROM_DEVICE);
173
174         kfree(hsotg->frame_list);
175         hsotg->frame_list = NULL;
176
177         spin_unlock_irqrestore(&hsotg->lock, flags);
178
179 }
180
181 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
182 {
183         u32 hcfg;
184         unsigned long flags;
185
186         spin_lock_irqsave(&hsotg->lock, flags);
187
188         hcfg = dwc2_readl(hsotg->regs + HCFG);
189         if (hcfg & HCFG_PERSCHEDENA) {
190                 /* already enabled */
191                 spin_unlock_irqrestore(&hsotg->lock, flags);
192                 return;
193         }
194
195         dwc2_writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
196
197         hcfg &= ~HCFG_FRLISTEN_MASK;
198         hcfg |= fr_list_en | HCFG_PERSCHEDENA;
199         dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
200         dwc2_writel(hcfg, hsotg->regs + HCFG);
201
202         spin_unlock_irqrestore(&hsotg->lock, flags);
203 }
204
205 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
206 {
207         u32 hcfg;
208         unsigned long flags;
209
210         spin_lock_irqsave(&hsotg->lock, flags);
211
212         hcfg = dwc2_readl(hsotg->regs + HCFG);
213         if (!(hcfg & HCFG_PERSCHEDENA)) {
214                 /* already disabled */
215                 spin_unlock_irqrestore(&hsotg->lock, flags);
216                 return;
217         }
218
219         hcfg &= ~HCFG_PERSCHEDENA;
220         dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
221         dwc2_writel(hcfg, hsotg->regs + HCFG);
222
223         spin_unlock_irqrestore(&hsotg->lock, flags);
224 }
225
226 /*
227  * Activates/Deactivates FrameList entries for the channel based on endpoint
228  * servicing period
229  */
230 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
231                                    int enable)
232 {
233         struct dwc2_host_chan *chan;
234         u16 i, j, inc;
235
236         if (!hsotg) {
237                 pr_err("hsotg = %p\n", hsotg);
238                 return;
239         }
240
241         if (!qh->channel) {
242                 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
243                 return;
244         }
245
246         if (!hsotg->frame_list) {
247                 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
248                         hsotg->frame_list);
249                 return;
250         }
251
252         chan = qh->channel;
253         inc = dwc2_frame_incr_val(qh);
254         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
255                 i = dwc2_frame_list_idx(qh->sched_frame);
256         else
257                 i = 0;
258
259         j = i;
260         do {
261                 if (enable)
262                         hsotg->frame_list[j] |= 1 << chan->hc_num;
263                 else
264                         hsotg->frame_list[j] &= ~(1 << chan->hc_num);
265                 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
266         } while (j != i);
267
268         /*
269          * Sync frame list since controller will access it if periodic
270          * channel is currently enabled.
271          */
272         dma_sync_single_for_device(hsotg->dev,
273                                    hsotg->frame_list_dma,
274                                    hsotg->frame_list_sz,
275                                    DMA_TO_DEVICE);
276
277         if (!enable)
278                 return;
279
280         chan->schinfo = 0;
281         if (chan->speed == USB_SPEED_HIGH && qh->interval) {
282                 j = 1;
283                 /* TODO - check this */
284                 inc = (8 + qh->interval - 1) / qh->interval;
285                 for (i = 0; i < inc; i++) {
286                         chan->schinfo |= j;
287                         j = j << qh->interval;
288                 }
289         } else {
290                 chan->schinfo = 0xff;
291         }
292 }
293
294 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
295                                       struct dwc2_qh *qh)
296 {
297         struct dwc2_host_chan *chan = qh->channel;
298
299         if (dwc2_qh_is_non_per(qh)) {
300                 if (hsotg->core_params->uframe_sched > 0)
301                         hsotg->available_host_channels++;
302                 else
303                         hsotg->non_periodic_channels--;
304         } else {
305                 dwc2_update_frame_list(hsotg, qh, 0);
306                 hsotg->available_host_channels++;
307         }
308
309         /*
310          * The condition is added to prevent double cleanup try in case of
311          * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
312          */
313         if (chan->qh) {
314                 if (!list_empty(&chan->hc_list_entry))
315                         list_del(&chan->hc_list_entry);
316                 dwc2_hc_cleanup(hsotg, chan);
317                 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
318                 chan->qh = NULL;
319         }
320
321         qh->channel = NULL;
322         qh->ntd = 0;
323
324         if (qh->desc_list)
325                 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
326                        dwc2_max_desc_num(qh));
327 }
328
329 /**
330  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
331  * related members
332  *
333  * @hsotg: The HCD state structure for the DWC OTG controller
334  * @qh:    The QH to init
335  *
336  * Return: 0 if successful, negative error code otherwise
337  *
338  * Allocates memory for the descriptor list. For the first periodic QH,
339  * allocates memory for the FrameList and enables periodic scheduling.
340  */
341 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
342                           gfp_t mem_flags)
343 {
344         int retval;
345
346         if (qh->do_split) {
347                 dev_err(hsotg->dev,
348                         "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
349                 retval = -EINVAL;
350                 goto err0;
351         }
352
353         retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
354         if (retval)
355                 goto err0;
356
357         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
358             qh->ep_type == USB_ENDPOINT_XFER_INT) {
359                 if (!hsotg->frame_list) {
360                         retval = dwc2_frame_list_alloc(hsotg, mem_flags);
361                         if (retval)
362                                 goto err1;
363                         /* Enable periodic schedule on first periodic QH */
364                         dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
365                 }
366         }
367
368         qh->ntd = 0;
369         return 0;
370
371 err1:
372         dwc2_desc_list_free(hsotg, qh);
373 err0:
374         return retval;
375 }
376
377 /**
378  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
379  * members
380  *
381  * @hsotg: The HCD state structure for the DWC OTG controller
382  * @qh:    The QH to free
383  *
384  * Frees descriptor list memory associated with the QH. If QH is periodic and
385  * the last, frees FrameList memory and disables periodic scheduling.
386  */
387 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
388 {
389         unsigned long flags;
390
391         dwc2_desc_list_free(hsotg, qh);
392
393         /*
394          * Channel still assigned due to some reasons.
395          * Seen on Isoc URB dequeue. Channel halted but no subsequent
396          * ChHalted interrupt to release the channel. Afterwards
397          * when it comes here from endpoint disable routine
398          * channel remains assigned.
399          */
400         spin_lock_irqsave(&hsotg->lock, flags);
401         if (qh->channel)
402                 dwc2_release_channel_ddma(hsotg, qh);
403         spin_unlock_irqrestore(&hsotg->lock, flags);
404
405         if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
406              qh->ep_type == USB_ENDPOINT_XFER_INT) &&
407             (hsotg->core_params->uframe_sched > 0 ||
408              !hsotg->periodic_channels) && hsotg->frame_list) {
409                 dwc2_per_sched_disable(hsotg);
410                 dwc2_frame_list_free(hsotg);
411         }
412 }
413
414 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
415 {
416         if (qh->dev_speed == USB_SPEED_HIGH)
417                 /* Descriptor set (8 descriptors) index which is 8-aligned */
418                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
419         else
420                 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
421 }
422
423 /*
424  * Determine starting frame for Isochronous transfer.
425  * Few frames skipped to prevent race condition with HC.
426  */
427 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
428                                     struct dwc2_qh *qh, u16 *skip_frames)
429 {
430         u16 frame;
431
432         hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
433
434         /* sched_frame is always frame number (not uFrame) both in FS and HS! */
435
436         /*
437          * skip_frames is used to limit activated descriptors number
438          * to avoid the situation when HC services the last activated
439          * descriptor firstly.
440          * Example for FS:
441          * Current frame is 1, scheduled frame is 3. Since HC always fetches
442          * the descriptor corresponding to curr_frame+1, the descriptor
443          * corresponding to frame 2 will be fetched. If the number of
444          * descriptors is max=64 (or greather) the list will be fully programmed
445          * with Active descriptors and it is possible case (rare) that the
446          * latest descriptor(considering rollback) corresponding to frame 2 will
447          * be serviced first. HS case is more probable because, in fact, up to
448          * 11 uframes (16 in the code) may be skipped.
449          */
450         if (qh->dev_speed == USB_SPEED_HIGH) {
451                 /*
452                  * Consider uframe counter also, to start xfer asap. If half of
453                  * the frame elapsed skip 2 frames otherwise just 1 frame.
454                  * Starting descriptor index must be 8-aligned, so if the
455                  * current frame is near to complete the next one is skipped as
456                  * well.
457                  */
458                 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
459                         *skip_frames = 2 * 8;
460                         frame = dwc2_frame_num_inc(hsotg->frame_number,
461                                                    *skip_frames);
462                 } else {
463                         *skip_frames = 1 * 8;
464                         frame = dwc2_frame_num_inc(hsotg->frame_number,
465                                                    *skip_frames);
466                 }
467
468                 frame = dwc2_full_frame_num(frame);
469         } else {
470                 /*
471                  * Two frames are skipped for FS - the current and the next.
472                  * But for descriptor programming, 1 frame (descriptor) is
473                  * enough, see example above.
474                  */
475                 *skip_frames = 1;
476                 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
477         }
478
479         return frame;
480 }
481
482 /*
483  * Calculate initial descriptor index for isochronous transfer based on
484  * scheduled frame
485  */
486 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
487                                         struct dwc2_qh *qh)
488 {
489         u16 frame, fr_idx, fr_idx_tmp, skip_frames;
490
491         /*
492          * With current ISOC processing algorithm the channel is being released
493          * when no more QTDs in the list (qh->ntd == 0). Thus this function is
494          * called only when qh->ntd == 0 and qh->channel == 0.
495          *
496          * So qh->channel != NULL branch is not used and just not removed from
497          * the source file. It is required for another possible approach which
498          * is, do not disable and release the channel when ISOC session
499          * completed, just move QH to inactive schedule until new QTD arrives.
500          * On new QTD, the QH moved back to 'ready' schedule, starting frame and
501          * therefore starting desc_index are recalculated. In this case channel
502          * is released only on ep_disable.
503          */
504
505         /*
506          * Calculate starting descriptor index. For INTERRUPT endpoint it is
507          * always 0.
508          */
509         if (qh->channel) {
510                 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
511                 /*
512                  * Calculate initial descriptor index based on FrameList current
513                  * bitmap and servicing period
514                  */
515                 fr_idx_tmp = dwc2_frame_list_idx(frame);
516                 fr_idx = (FRLISTEN_64_SIZE +
517                           dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
518                          % dwc2_frame_incr_val(qh);
519                 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
520         } else {
521                 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
522                                                            &skip_frames);
523                 fr_idx = dwc2_frame_list_idx(qh->sched_frame);
524         }
525
526         qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
527
528         return skip_frames;
529 }
530
531 #define ISOC_URB_GIVEBACK_ASAP
532
533 #define MAX_ISOC_XFER_SIZE_FS   1023
534 #define MAX_ISOC_XFER_SIZE_HS   3072
535 #define DESCNUM_THRESHOLD       4
536
537 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
538                                          struct dwc2_qtd *qtd,
539                                          struct dwc2_qh *qh, u32 max_xfer_size,
540                                          u16 idx)
541 {
542         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
543         struct dwc2_hcd_iso_packet_desc *frame_desc;
544
545         memset(dma_desc, 0, sizeof(*dma_desc));
546         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
547
548         if (frame_desc->length > max_xfer_size)
549                 qh->n_bytes[idx] = max_xfer_size;
550         else
551                 qh->n_bytes[idx] = frame_desc->length;
552
553         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
554         dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
555                            HOST_DMA_ISOC_NBYTES_MASK;
556
557         /* Set active bit */
558         dma_desc->status |= HOST_DMA_A;
559
560         qh->ntd++;
561         qtd->isoc_frame_index_last++;
562
563 #ifdef ISOC_URB_GIVEBACK_ASAP
564         /* Set IOC for each descriptor corresponding to last frame of URB */
565         if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
566                 dma_desc->status |= HOST_DMA_IOC;
567 #endif
568
569         dma_sync_single_for_device(hsotg->dev,
570                         qh->desc_list_dma +
571                         (idx * sizeof(struct dwc2_hcd_dma_desc)),
572                         sizeof(struct dwc2_hcd_dma_desc),
573                         DMA_TO_DEVICE);
574 }
575
576 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
577                                     struct dwc2_qh *qh, u16 skip_frames)
578 {
579         struct dwc2_qtd *qtd;
580         u32 max_xfer_size;
581         u16 idx, inc, n_desc = 0, ntd_max = 0;
582         u16 cur_idx;
583         u16 next_idx;
584
585         idx = qh->td_last;
586         inc = qh->interval;
587         hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
588         cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
589         next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
590
591         /*
592          * Ensure current frame number didn't overstep last scheduled
593          * descriptor. If it happens, the only way to recover is to move
594          * qh->td_last to current frame number + 1.
595          * So that next isoc descriptor will be scheduled on frame number + 1
596          * and not on a past frame.
597          */
598         if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
599                 if (inc < 32) {
600                         dev_vdbg(hsotg->dev,
601                                  "current frame number overstep last descriptor\n");
602                         qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
603                                                             qh->dev_speed);
604                         idx = qh->td_last;
605                 }
606         }
607
608         if (qh->interval) {
609                 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
610                                 qh->interval;
611                 if (skip_frames && !qh->channel)
612                         ntd_max -= skip_frames / qh->interval;
613         }
614
615         max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
616                         MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
617
618         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
619                 if (qtd->in_process &&
620                     qtd->isoc_frame_index_last ==
621                     qtd->urb->packet_count)
622                         continue;
623
624                 qtd->isoc_td_first = idx;
625                 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
626                                                 qtd->urb->packet_count) {
627                         dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
628                                                      max_xfer_size, idx);
629                         idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
630                         n_desc++;
631                 }
632                 qtd->isoc_td_last = idx;
633                 qtd->in_process = 1;
634         }
635
636         qh->td_last = idx;
637
638 #ifdef ISOC_URB_GIVEBACK_ASAP
639         /* Set IOC for last descriptor if descriptor list is full */
640         if (qh->ntd == ntd_max) {
641                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
642                 qh->desc_list[idx].status |= HOST_DMA_IOC;
643                 dma_sync_single_for_device(hsotg->dev,
644                                            qh->desc_list_dma + (idx *
645                                            sizeof(struct dwc2_hcd_dma_desc)),
646                                            sizeof(struct dwc2_hcd_dma_desc),
647                                            DMA_TO_DEVICE);
648         }
649 #else
650         /*
651          * Set IOC bit only for one descriptor. Always try to be ahead of HW
652          * processing, i.e. on IOC generation driver activates next descriptor
653          * but core continues to process descriptors following the one with IOC
654          * set.
655          */
656
657         if (n_desc > DESCNUM_THRESHOLD)
658                 /*
659                  * Move IOC "up". Required even if there is only one QTD
660                  * in the list, because QTDs might continue to be queued,
661                  * but during the activation it was only one queued.
662                  * Actually more than one QTD might be in the list if this
663                  * function called from XferCompletion - QTDs was queued during
664                  * HW processing of the previous descriptor chunk.
665                  */
666                 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
667                                             qh->dev_speed);
668         else
669                 /*
670                  * Set the IOC for the latest descriptor if either number of
671                  * descriptors is not greater than threshold or no more new
672                  * descriptors activated
673                  */
674                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
675
676         qh->desc_list[idx].status |= HOST_DMA_IOC;
677         dma_sync_single_for_device(hsotg->dev,
678                                    qh->desc_list_dma +
679                                    (idx * sizeof(struct dwc2_hcd_dma_desc)),
680                                    sizeof(struct dwc2_hcd_dma_desc),
681                                    DMA_TO_DEVICE);
682 #endif
683 }
684
685 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
686                                     struct dwc2_host_chan *chan,
687                                     struct dwc2_qtd *qtd, struct dwc2_qh *qh,
688                                     int n_desc)
689 {
690         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
691         int len = chan->xfer_len;
692
693         if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
694                 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
695
696         if (chan->ep_is_in) {
697                 int num_packets;
698
699                 if (len > 0 && chan->max_packet)
700                         num_packets = (len + chan->max_packet - 1)
701                                         / chan->max_packet;
702                 else
703                         /* Need 1 packet for transfer length of 0 */
704                         num_packets = 1;
705
706                 /* Always program an integral # of packets for IN transfers */
707                 len = num_packets * chan->max_packet;
708         }
709
710         dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
711         qh->n_bytes[n_desc] = len;
712
713         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
714             qtd->control_phase == DWC2_CONTROL_SETUP)
715                 dma_desc->status |= HOST_DMA_SUP;
716
717         dma_desc->buf = (u32)chan->xfer_dma;
718
719         dma_sync_single_for_device(hsotg->dev,
720                                    qh->desc_list_dma +
721                                    (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
722                                    sizeof(struct dwc2_hcd_dma_desc),
723                                    DMA_TO_DEVICE);
724
725         /*
726          * Last (or only) descriptor of IN transfer with actual size less
727          * than MaxPacket
728          */
729         if (len > chan->xfer_len) {
730                 chan->xfer_len = 0;
731         } else {
732                 chan->xfer_dma += len;
733                 chan->xfer_len -= len;
734         }
735 }
736
737 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
738                                         struct dwc2_qh *qh)
739 {
740         struct dwc2_qtd *qtd;
741         struct dwc2_host_chan *chan = qh->channel;
742         int n_desc = 0;
743
744         dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
745                  (unsigned long)chan->xfer_dma, chan->xfer_len);
746
747         /*
748          * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
749          * if SG transfer consists of multiple URBs, this pointer is re-assigned
750          * to the buffer of the currently processed QTD. For non-SG request
751          * there is always one QTD active.
752          */
753
754         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
755                 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
756
757                 if (n_desc) {
758                         /* SG request - more than 1 QTD */
759                         chan->xfer_dma = qtd->urb->dma +
760                                         qtd->urb->actual_length;
761                         chan->xfer_len = qtd->urb->length -
762                                         qtd->urb->actual_length;
763                         dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
764                                  (unsigned long)chan->xfer_dma, chan->xfer_len);
765                 }
766
767                 qtd->n_desc = 0;
768                 do {
769                         if (n_desc > 1) {
770                                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
771                                 dev_vdbg(hsotg->dev,
772                                          "set A bit in desc %d (%p)\n",
773                                          n_desc - 1,
774                                          &qh->desc_list[n_desc - 1]);
775                                 dma_sync_single_for_device(hsotg->dev,
776                                         qh->desc_list_dma +
777                                         ((n_desc - 1) *
778                                         sizeof(struct dwc2_hcd_dma_desc)),
779                                         sizeof(struct dwc2_hcd_dma_desc),
780                                         DMA_TO_DEVICE);
781                         }
782                         dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
783                         dev_vdbg(hsotg->dev,
784                                  "desc %d (%p) buf=%08x status=%08x\n",
785                                  n_desc, &qh->desc_list[n_desc],
786                                  qh->desc_list[n_desc].buf,
787                                  qh->desc_list[n_desc].status);
788                         qtd->n_desc++;
789                         n_desc++;
790                 } while (chan->xfer_len > 0 &&
791                          n_desc != MAX_DMA_DESC_NUM_GENERIC);
792
793                 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
794                 qtd->in_process = 1;
795                 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
796                         break;
797                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
798                         break;
799         }
800
801         if (n_desc) {
802                 qh->desc_list[n_desc - 1].status |=
803                                 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
804                 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
805                          n_desc - 1, &qh->desc_list[n_desc - 1]);
806                 dma_sync_single_for_device(hsotg->dev,
807                                            qh->desc_list_dma + (n_desc - 1) *
808                                            sizeof(struct dwc2_hcd_dma_desc),
809                                            sizeof(struct dwc2_hcd_dma_desc),
810                                            DMA_TO_DEVICE);
811                 if (n_desc > 1) {
812                         qh->desc_list[0].status |= HOST_DMA_A;
813                         dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
814                                  &qh->desc_list[0]);
815                         dma_sync_single_for_device(hsotg->dev,
816                                         qh->desc_list_dma,
817                                         sizeof(struct dwc2_hcd_dma_desc),
818                                         DMA_TO_DEVICE);
819                 }
820                 chan->ntd = n_desc;
821         }
822 }
823
824 /**
825  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
826  *
827  * @hsotg: The HCD state structure for the DWC OTG controller
828  * @qh:    The QH to init
829  *
830  * Return: 0 if successful, negative error code otherwise
831  *
832  * For Control and Bulk endpoints, initializes descriptor list and starts the
833  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
834  * list then updates FrameList, marking appropriate entries as active.
835  *
836  * For Isochronous endpoints the starting descriptor index is calculated based
837  * on the scheduled frame, but only on the first transfer descriptor within a
838  * session. Then the transfer is started via enabling the channel.
839  *
840  * For Isochronous endpoints the channel is not halted on XferComplete
841  * interrupt so remains assigned to the endpoint(QH) until session is done.
842  */
843 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
844 {
845         /* Channel is already assigned */
846         struct dwc2_host_chan *chan = qh->channel;
847         u16 skip_frames = 0;
848
849         switch (chan->ep_type) {
850         case USB_ENDPOINT_XFER_CONTROL:
851         case USB_ENDPOINT_XFER_BULK:
852                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
853                 dwc2_hc_start_transfer_ddma(hsotg, chan);
854                 break;
855         case USB_ENDPOINT_XFER_INT:
856                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
857                 dwc2_update_frame_list(hsotg, qh, 1);
858                 dwc2_hc_start_transfer_ddma(hsotg, chan);
859                 break;
860         case USB_ENDPOINT_XFER_ISOC:
861                 if (!qh->ntd)
862                         skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
863                 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
864
865                 if (!chan->xfer_started) {
866                         dwc2_update_frame_list(hsotg, qh, 1);
867
868                         /*
869                          * Always set to max, instead of actual size. Otherwise
870                          * ntd will be changed with channel being enabled. Not
871                          * recommended.
872                          */
873                         chan->ntd = dwc2_max_desc_num(qh);
874
875                         /* Enable channel only once for ISOC */
876                         dwc2_hc_start_transfer_ddma(hsotg, chan);
877                 }
878
879                 break;
880         default:
881                 break;
882         }
883 }
884
885 #define DWC2_CMPL_DONE          1
886 #define DWC2_CMPL_STOP          2
887
888 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
889                                         struct dwc2_host_chan *chan,
890                                         struct dwc2_qtd *qtd,
891                                         struct dwc2_qh *qh, u16 idx)
892 {
893         struct dwc2_hcd_dma_desc *dma_desc;
894         struct dwc2_hcd_iso_packet_desc *frame_desc;
895         u16 remain = 0;
896         int rc = 0;
897
898         if (!qtd->urb)
899                 return -EINVAL;
900
901         dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
902                                 sizeof(struct dwc2_hcd_dma_desc)),
903                                 sizeof(struct dwc2_hcd_dma_desc),
904                                 DMA_FROM_DEVICE);
905
906         dma_desc = &qh->desc_list[idx];
907
908         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
909         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
910         if (chan->ep_is_in)
911                 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
912                          HOST_DMA_ISOC_NBYTES_SHIFT;
913
914         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
915                 /*
916                  * XactError, or unable to complete all the transactions
917                  * in the scheduled micro-frame/frame, both indicated by
918                  * HOST_DMA_STS_PKTERR
919                  */
920                 qtd->urb->error_count++;
921                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
922                 frame_desc->status = -EPROTO;
923         } else {
924                 /* Success */
925                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
926                 frame_desc->status = 0;
927         }
928
929         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
930                 /*
931                  * urb->status is not used for isoc transfers here. The
932                  * individual frame_desc status are used instead.
933                  */
934                 dwc2_host_complete(hsotg, qtd, 0);
935                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
936
937                 /*
938                  * This check is necessary because urb_dequeue can be called
939                  * from urb complete callback (sound driver for example). All
940                  * pending URBs are dequeued there, so no need for further
941                  * processing.
942                  */
943                 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
944                         return -1;
945                 rc = DWC2_CMPL_DONE;
946         }
947
948         qh->ntd--;
949
950         /* Stop if IOC requested descriptor reached */
951         if (dma_desc->status & HOST_DMA_IOC)
952                 rc = DWC2_CMPL_STOP;
953
954         return rc;
955 }
956
957 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
958                                          struct dwc2_host_chan *chan,
959                                          enum dwc2_halt_status halt_status)
960 {
961         struct dwc2_hcd_iso_packet_desc *frame_desc;
962         struct dwc2_qtd *qtd, *qtd_tmp;
963         struct dwc2_qh *qh;
964         u16 idx;
965         int rc;
966
967         qh = chan->qh;
968         idx = qh->td_first;
969
970         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
971                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
972                         qtd->in_process = 0;
973                 return;
974         }
975
976         if (halt_status == DWC2_HC_XFER_AHB_ERR ||
977             halt_status == DWC2_HC_XFER_BABBLE_ERR) {
978                 /*
979                  * Channel is halted in these error cases, considered as serious
980                  * issues.
981                  * Complete all URBs marking all frames as failed, irrespective
982                  * whether some of the descriptors (frames) succeeded or not.
983                  * Pass error code to completion routine as well, to update
984                  * urb->status, some of class drivers might use it to stop
985                  * queing transfer requests.
986                  */
987                 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
988                           -EIO : -EOVERFLOW;
989
990                 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
991                                          qtd_list_entry) {
992                         if (qtd->urb) {
993                                 for (idx = 0; idx < qtd->urb->packet_count;
994                                      idx++) {
995                                         frame_desc = &qtd->urb->iso_descs[idx];
996                                         frame_desc->status = err;
997                                 }
998
999                                 dwc2_host_complete(hsotg, qtd, err);
1000                         }
1001
1002                         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1003                 }
1004
1005                 return;
1006         }
1007
1008         list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1009                 if (!qtd->in_process)
1010                         break;
1011
1012                 /*
1013                  * Ensure idx corresponds to descriptor where first urb of this
1014                  * qtd was added. In fact, during isoc desc init, dwc2 may skip
1015                  * an index if current frame number is already over this index.
1016                  */
1017                 if (idx != qtd->isoc_td_first) {
1018                         dev_vdbg(hsotg->dev,
1019                                  "try to complete %d instead of %d\n",
1020                                  idx, qtd->isoc_td_first);
1021                         idx = qtd->isoc_td_first;
1022                 }
1023
1024                 do {
1025                         struct dwc2_qtd *qtd_next;
1026                         u16 cur_idx;
1027
1028                         rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1029                                                           idx);
1030                         if (rc < 0)
1031                                 return;
1032                         idx = dwc2_desclist_idx_inc(idx, qh->interval,
1033                                                     chan->speed);
1034                         if (!rc)
1035                                 continue;
1036
1037                         if (rc == DWC2_CMPL_DONE)
1038                                 break;
1039
1040                         /* rc == DWC2_CMPL_STOP */
1041
1042                         if (qh->interval >= 32)
1043                                 goto stop_scan;
1044
1045                         qh->td_first = idx;
1046                         cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1047                         qtd_next = list_first_entry(&qh->qtd_list,
1048                                                     struct dwc2_qtd,
1049                                                     qtd_list_entry);
1050                         if (dwc2_frame_idx_num_gt(cur_idx,
1051                                                   qtd_next->isoc_td_last))
1052                                 break;
1053
1054                         goto stop_scan;
1055
1056                 } while (idx != qh->td_first);
1057         }
1058
1059 stop_scan:
1060         qh->td_first = idx;
1061 }
1062
1063 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1064                                         struct dwc2_host_chan *chan,
1065                                         struct dwc2_qtd *qtd,
1066                                         struct dwc2_hcd_dma_desc *dma_desc,
1067                                         enum dwc2_halt_status halt_status,
1068                                         u32 n_bytes, int *xfer_done)
1069 {
1070         struct dwc2_hcd_urb *urb = qtd->urb;
1071         u16 remain = 0;
1072
1073         if (chan->ep_is_in)
1074                 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1075                          HOST_DMA_NBYTES_SHIFT;
1076
1077         dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1078
1079         if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1080                 dev_err(hsotg->dev, "EIO\n");
1081                 urb->status = -EIO;
1082                 return 1;
1083         }
1084
1085         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1086                 switch (halt_status) {
1087                 case DWC2_HC_XFER_STALL:
1088                         dev_vdbg(hsotg->dev, "Stall\n");
1089                         urb->status = -EPIPE;
1090                         break;
1091                 case DWC2_HC_XFER_BABBLE_ERR:
1092                         dev_err(hsotg->dev, "Babble\n");
1093                         urb->status = -EOVERFLOW;
1094                         break;
1095                 case DWC2_HC_XFER_XACT_ERR:
1096                         dev_err(hsotg->dev, "XactErr\n");
1097                         urb->status = -EPROTO;
1098                         break;
1099                 default:
1100                         dev_err(hsotg->dev,
1101                                 "%s: Unhandled descriptor error status (%d)\n",
1102                                 __func__, halt_status);
1103                         break;
1104                 }
1105                 return 1;
1106         }
1107
1108         if (dma_desc->status & HOST_DMA_A) {
1109                 dev_vdbg(hsotg->dev,
1110                          "Active descriptor encountered on channel %d\n",
1111                          chan->hc_num);
1112                 return 0;
1113         }
1114
1115         if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1116                 if (qtd->control_phase == DWC2_CONTROL_DATA) {
1117                         urb->actual_length += n_bytes - remain;
1118                         if (remain || urb->actual_length >= urb->length) {
1119                                 /*
1120                                  * For Control Data stage do not set urb->status
1121                                  * to 0, to prevent URB callback. Set it when
1122                                  * Status phase is done. See below.
1123                                  */
1124                                 *xfer_done = 1;
1125                         }
1126                 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1127                         urb->status = 0;
1128                         *xfer_done = 1;
1129                 }
1130                 /* No handling for SETUP stage */
1131         } else {
1132                 /* BULK and INTR */
1133                 urb->actual_length += n_bytes - remain;
1134                 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1135                          urb->actual_length);
1136                 if (remain || urb->actual_length >= urb->length) {
1137                         urb->status = 0;
1138                         *xfer_done = 1;
1139                 }
1140         }
1141
1142         return 0;
1143 }
1144
1145 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1146                                       struct dwc2_host_chan *chan,
1147                                       int chnum, struct dwc2_qtd *qtd,
1148                                       int desc_num,
1149                                       enum dwc2_halt_status halt_status,
1150                                       int *xfer_done)
1151 {
1152         struct dwc2_qh *qh = chan->qh;
1153         struct dwc2_hcd_urb *urb = qtd->urb;
1154         struct dwc2_hcd_dma_desc *dma_desc;
1155         u32 n_bytes;
1156         int failed;
1157
1158         dev_vdbg(hsotg->dev, "%s()\n", __func__);
1159
1160         if (!urb)
1161                 return -EINVAL;
1162
1163         dma_sync_single_for_cpu(hsotg->dev,
1164                                 qh->desc_list_dma + (desc_num *
1165                                 sizeof(struct dwc2_hcd_dma_desc)),
1166                                 sizeof(struct dwc2_hcd_dma_desc),
1167                                 DMA_FROM_DEVICE);
1168
1169         dma_desc = &qh->desc_list[desc_num];
1170         n_bytes = qh->n_bytes[desc_num];
1171         dev_vdbg(hsotg->dev,
1172                  "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1173                  qtd, urb, desc_num, dma_desc, n_bytes);
1174         failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1175                                                      halt_status, n_bytes,
1176                                                      xfer_done);
1177         if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1178                 dwc2_host_complete(hsotg, qtd, urb->status);
1179                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1180                 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1181                          failed, *xfer_done);
1182                 return failed;
1183         }
1184
1185         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1186                 switch (qtd->control_phase) {
1187                 case DWC2_CONTROL_SETUP:
1188                         if (urb->length > 0)
1189                                 qtd->control_phase = DWC2_CONTROL_DATA;
1190                         else
1191                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1192                         dev_vdbg(hsotg->dev,
1193                                  "  Control setup transaction done\n");
1194                         break;
1195                 case DWC2_CONTROL_DATA:
1196                         if (*xfer_done) {
1197                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1198                                 dev_vdbg(hsotg->dev,
1199                                          "  Control data transfer done\n");
1200                         } else if (desc_num + 1 == qtd->n_desc) {
1201                                 /*
1202                                  * Last descriptor for Control data stage which
1203                                  * is not completed yet
1204                                  */
1205                                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1206                                                           qtd);
1207                         }
1208                         break;
1209                 default:
1210                         break;
1211                 }
1212         }
1213
1214         return 0;
1215 }
1216
1217 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1218                                              struct dwc2_host_chan *chan,
1219                                              int chnum,
1220                                              enum dwc2_halt_status halt_status)
1221 {
1222         struct list_head *qtd_item, *qtd_tmp;
1223         struct dwc2_qh *qh = chan->qh;
1224         struct dwc2_qtd *qtd = NULL;
1225         int xfer_done;
1226         int desc_num = 0;
1227
1228         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1229                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1230                         qtd->in_process = 0;
1231                 return;
1232         }
1233
1234         list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1235                 int i;
1236                 int qtd_desc_count;
1237
1238                 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1239                 xfer_done = 0;
1240                 qtd_desc_count = qtd->n_desc;
1241
1242                 for (i = 0; i < qtd_desc_count; i++) {
1243                         if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1244                                                        desc_num, halt_status,
1245                                                        &xfer_done))
1246                                 goto stop_scan;
1247
1248                         desc_num++;
1249                 }
1250         }
1251
1252 stop_scan:
1253         if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1254                 /*
1255                  * Resetting the data toggle for bulk and interrupt endpoints
1256                  * in case of stall. See handle_hc_stall_intr().
1257                  */
1258                 if (halt_status == DWC2_HC_XFER_STALL)
1259                         qh->data_toggle = DWC2_HC_PID_DATA0;
1260                 else if (qtd)
1261                         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1262         }
1263
1264         if (halt_status == DWC2_HC_XFER_COMPLETE) {
1265                 if (chan->hcint & HCINTMSK_NYET) {
1266                         /*
1267                          * Got a NYET on the last transaction of the transfer.
1268                          * It means that the endpoint should be in the PING
1269                          * state at the beginning of the next transfer.
1270                          */
1271                         qh->ping_state = 1;
1272                 }
1273         }
1274 }
1275
1276 /**
1277  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1278  * status and calls completion routine for the URB if it's done. Called from
1279  * interrupt handlers.
1280  *
1281  * @hsotg:       The HCD state structure for the DWC OTG controller
1282  * @chan:        Host channel the transfer is completed on
1283  * @chnum:       Index of Host channel registers
1284  * @halt_status: Reason the channel is being halted or just XferComplete
1285  *               for isochronous transfers
1286  *
1287  * Releases the channel to be used by other transfers.
1288  * In case of Isochronous endpoint the channel is not halted until the end of
1289  * the session, i.e. QTD list is empty.
1290  * If periodic channel released the FrameList is updated accordingly.
1291  * Calls transaction selection routines to activate pending transfers.
1292  */
1293 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1294                                  struct dwc2_host_chan *chan, int chnum,
1295                                  enum dwc2_halt_status halt_status)
1296 {
1297         struct dwc2_qh *qh = chan->qh;
1298         int continue_isoc_xfer = 0;
1299         enum dwc2_transaction_type tr_type;
1300
1301         if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1302                 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1303
1304                 /* Release the channel if halted or session completed */
1305                 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1306                     list_empty(&qh->qtd_list)) {
1307                         struct dwc2_qtd *qtd, *qtd_tmp;
1308
1309                         /*
1310                          * Kill all remainings QTDs since channel has been
1311                          * halted.
1312                          */
1313                         list_for_each_entry_safe(qtd, qtd_tmp,
1314                                                  &qh->qtd_list,
1315                                                  qtd_list_entry) {
1316                                 dwc2_host_complete(hsotg, qtd,
1317                                                    -ECONNRESET);
1318                                 dwc2_hcd_qtd_unlink_and_free(hsotg,
1319                                                              qtd, qh);
1320                         }
1321
1322                         /* Halt the channel if session completed */
1323                         if (halt_status == DWC2_HC_XFER_COMPLETE)
1324                                 dwc2_hc_halt(hsotg, chan, halt_status);
1325                         dwc2_release_channel_ddma(hsotg, qh);
1326                         dwc2_hcd_qh_unlink(hsotg, qh);
1327                 } else {
1328                         /* Keep in assigned schedule to continue transfer */
1329                         list_move(&qh->qh_list_entry,
1330                                   &hsotg->periodic_sched_assigned);
1331                         /*
1332                          * If channel has been halted during giveback of urb
1333                          * then prevent any new scheduling.
1334                          */
1335                         if (!chan->halt_status)
1336                                 continue_isoc_xfer = 1;
1337                 }
1338                 /*
1339                  * Todo: Consider the case when period exceeds FrameList size.
1340                  * Frame Rollover interrupt should be used.
1341                  */
1342         } else {
1343                 /*
1344                  * Scan descriptor list to complete the URB(s), then release
1345                  * the channel
1346                  */
1347                 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1348                                                  halt_status);
1349                 dwc2_release_channel_ddma(hsotg, qh);
1350                 dwc2_hcd_qh_unlink(hsotg, qh);
1351
1352                 if (!list_empty(&qh->qtd_list)) {
1353                         /*
1354                          * Add back to inactive non-periodic schedule on normal
1355                          * completion
1356                          */
1357                         dwc2_hcd_qh_add(hsotg, qh);
1358                 }
1359         }
1360
1361         tr_type = dwc2_hcd_select_transactions(hsotg);
1362         if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1363                 if (continue_isoc_xfer) {
1364                         if (tr_type == DWC2_TRANSACTION_NONE)
1365                                 tr_type = DWC2_TRANSACTION_PERIODIC;
1366                         else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1367                                 tr_type = DWC2_TRANSACTION_ALL;
1368                 }
1369                 dwc2_hcd_queue_transactions(hsotg, tr_type);
1370         }
1371 }