USB: fix Coding Style.
[firefly-linux-kernel-4.4.55.git] / drivers / usb / dwc_otg_310 / dwc_otg_hcd_ddma.c
1 /*==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
3  * $Revision: #10 $
4  * $Date: 2011/10/20 $
5  * $Change: 1869464 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 /** @file
36  * This file contains Descriptor DMA support implementation for host mode.
37  */
38
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
41
42 static inline uint8_t frame_list_idx(uint16_t frame)
43 {
44         uint8_t ret;
45         ret = frame & (MAX_FRLIST_EN_NUM - 1);
46         return ret;
47 }
48
49 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc,
50                                         uint8_t speed)
51 {
52         return (idx + inc) &
53             (((speed ==
54                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
55               MAX_DMA_DESC_NUM_GENERIC) - 1);
56 }
57
58 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc,
59                                         uint8_t speed)
60 {
61         return (idx - inc) &
62             (((speed ==
63                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
64               MAX_DMA_DESC_NUM_GENERIC) - 1);
65 }
66
67 static inline uint16_t max_desc_num(dwc_otg_qh_t *qh)
68 {
69         return (((qh->ep_type == UE_ISOCHRONOUS)
70                  && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
71                 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
72 }
73
74 static inline uint16_t frame_incr_val(dwc_otg_qh_t *qh)
75 {
76         return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
77                 ? ((qh->interval + 8 - 1) / 8)
78                 : qh->interval);
79 }
80
81 static int desc_list_alloc(dwc_otg_qh_t *qh)
82 {
83         int retval = 0;
84
85         qh->desc_list = (dwc_otg_host_dma_desc_t *)
86             DWC_DMA_ALLOC(sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
87                           &qh->desc_list_dma);
88
89         if (!qh->desc_list) {
90                 retval = -DWC_E_NO_MEMORY;
91                 DWC_ERROR("%s: DMA descriptor list allocation failed\n",
92                           __func__);
93
94         }
95
96         dwc_memset(qh->desc_list, 0x00,
97                    sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
98
99         qh->n_bytes =
100             (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
101
102         if (!qh->n_bytes) {
103                 retval = -DWC_E_NO_MEMORY;
104                 DWC_ERROR
105                     ("%s: Failed to allocate array for descriptors' size actual values\n",
106                      __func__);
107
108         }
109         return retval;
110
111 }
112
113 static void desc_list_free(dwc_otg_qh_t *qh)
114 {
115         if (qh->desc_list) {
116                 DWC_DMA_FREE(max_desc_num(qh), qh->desc_list,
117                              qh->desc_list_dma);
118                 qh->desc_list = NULL;
119         }
120
121         if (qh->n_bytes) {
122                 DWC_FREE(qh->n_bytes);
123                 qh->n_bytes = NULL;
124         }
125 }
126
127 static int frame_list_alloc(dwc_otg_hcd_t *hcd)
128 {
129         int retval = 0;
130         if (hcd->frame_list)
131                 return 0;
132
133         hcd->frame_list = DWC_DMA_ALLOC(4 * MAX_FRLIST_EN_NUM,
134                                         &hcd->frame_list_dma);
135         if (!hcd->frame_list) {
136                 retval = -DWC_E_NO_MEMORY;
137                 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
138         }
139
140         dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
141
142         return retval;
143 }
144
145 static void frame_list_free(dwc_otg_hcd_t *hcd)
146 {
147         if (!hcd->frame_list)
148                 return;
149
150         DWC_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list,
151                      hcd->frame_list_dma);
152         hcd->frame_list = NULL;
153 }
154
155 static void per_sched_enable(dwc_otg_hcd_t *hcd, uint16_t fr_list_en)
156 {
157
158         hcfg_data_t hcfg;
159
160         hcfg.d32 =
161             DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
162
163         if (hcfg.b.perschedena) {
164                 /* already enabled */
165                 return;
166         }
167
168         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
169                         hcd->frame_list_dma);
170
171         switch (fr_list_en) {
172         case 64:
173                 hcfg.b.frlisten = 3;
174                 break;
175         case 32:
176                 hcfg.b.frlisten = 2;
177                 break;
178         case 16:
179                 hcfg.b.frlisten = 1;
180                 break;
181         case 8:
182                 hcfg.b.frlisten = 0;
183                 break;
184         default:
185                 break;
186         }
187
188         hcfg.b.perschedena = 1;
189
190         DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
191         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg,
192                         hcfg.d32);
193
194 }
195
196 static void per_sched_disable(dwc_otg_hcd_t *hcd)
197 {
198         hcfg_data_t hcfg;
199
200         hcfg.d32 =
201             DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
202
203         if (!hcfg.b.perschedena) {
204                 /* already disabled */
205                 return;
206         }
207         hcfg.b.perschedena = 0;
208
209         DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
210         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg,
211                         hcfg.d32);
212 }
213
214 /*
215  * Activates/Deactivates FrameList entries for the channel
216  * based on endpoint servicing period.
217  */
218 void update_frame_list(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, uint8_t enable)
219 {
220         uint16_t i, j, inc;
221         dwc_hc_t *hc = NULL;
222
223         if (!qh->channel) {
224                 DWC_ERROR("qh->channel = %p", qh->channel);
225                 return;
226         }
227
228         if (!hcd) {
229                 DWC_ERROR("------hcd = %p", hcd);
230                 return;
231         }
232
233         if (!hcd->frame_list) {
234                 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
235                 return;
236         }
237
238         hc = qh->channel;
239         inc = frame_incr_val(qh);
240         if (qh->ep_type == UE_ISOCHRONOUS)
241                 i = frame_list_idx(qh->sched_frame);
242         else
243                 i = 0;
244
245         j = i;
246         do {
247                 if (enable)
248                         hcd->frame_list[j] |= (1 << hc->hc_num);
249                 else
250                         hcd->frame_list[j] &= ~(1 << hc->hc_num);
251                 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
252         } while (j != i);
253         if (!enable)
254                 return;
255         hc->schinfo = 0;
256         if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
257                 j = 1;
258                 /* TODO - check this */
259                 inc = (8 + qh->interval - 1) / qh->interval;
260                 for (i = 0; i < inc; i++) {
261                         hc->schinfo |= j;
262                         j = j << qh->interval;
263                 }
264         } else {
265                 hc->schinfo = 0xff;
266         }
267 }
268
269 #if 1
270 void dump_frame_list(dwc_otg_hcd_t *hcd)
271 {
272         int i = 0;
273         DWC_PRINTF("--FRAME LIST (hex) --\n");
274         for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
275                 DWC_PRINTF("%x\t", hcd->frame_list[i]);
276                 if (!(i % 8) && i)
277                         DWC_PRINTF("\n");
278         }
279         DWC_PRINTF("\n----\n");
280
281 }
282 #endif
283
284 static void release_channel_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
285 {
286         dwc_hc_t *hc = qh->channel;
287         if (dwc_qh_is_non_per(qh))
288                 hcd->non_periodic_channels--;
289         else
290                 update_frame_list(hcd, qh, 0);
291
292         /*
293          * The condition is added to prevent double cleanup try in case of device
294          * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
295          */
296         if (hc->qh) {
297                 dwc_otg_hc_cleanup(hcd->core_if, hc);
298                 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
299                 hc->qh = NULL;
300         }
301
302         qh->channel = NULL;
303         qh->ntd = 0;
304
305         if (qh->desc_list) {
306                 dwc_memset(qh->desc_list, 0x00,
307                            sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
308         }
309 }
310
311 /**
312  * Initializes a QH structure's Descriptor DMA related members.
313  * Allocates memory for descriptor list.
314  * On first periodic QH, allocates memory for FrameList
315  * and enables periodic scheduling.
316  *
317  * @param hcd The HCD state structure for the DWC OTG controller.
318  * @param qh The QH to init.
319  *
320  * @return 0 if successful, negative error code otherwise.
321  */
322 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
323 {
324         int retval = 0;
325
326         if (qh->do_split) {
327                 DWC_ERROR
328                     ("SPLIT Transfers are not supported in Descriptor DMA.\n");
329                 return -1;
330         }
331
332         retval = desc_list_alloc(qh);
333
334         if ((retval == 0)
335             && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
336                 if (!hcd->frame_list) {
337                         retval = frame_list_alloc(hcd);
338                         /* Enable periodic schedule on first periodic QH */
339                         if (retval == 0)
340                                 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
341                 }
342         }
343
344         qh->ntd = 0;
345
346         return retval;
347 }
348
349 /**
350  * Frees descriptor list memory associated with the QH.
351  * If QH is periodic and the last, frees FrameList memory
352  * and disables periodic scheduling.
353  *
354  * @param hcd The HCD state structure for the DWC OTG controller.
355  * @param qh The QH to init.
356  */
357 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
358 {
359         desc_list_free(qh);
360
361         /*
362          * Channel still assigned due to some reasons.
363          * Seen on Isoc URB dequeue. Channel halted but no subsequent
364          * ChHalted interrupt to release the channel. Afterwards
365          * when it comes here from endpoint disable routine
366          * channel remains assigned.
367          */
368         if (qh->channel)
369                 release_channel_ddma(hcd, qh);
370
371         if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
372             && !hcd->periodic_channels && hcd->frame_list) {
373
374                 per_sched_disable(hcd);
375                 frame_list_free(hcd);
376         }
377 }
378
379 static uint8_t frame_to_desc_idx(dwc_otg_qh_t *qh, uint16_t frame_idx)
380 {
381         uint8_t ret;
382
383         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
384                 /*
385                  * Descriptor set(8 descriptors) index
386                  * which is 8-aligned.
387                  */
388                 ret = (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
389         } else {
390                 ret = frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
391         }
392
393         return ret;
394 }
395
396 /*
397  * Determine starting frame for Isochronous transfer.
398  * Few frames skipped to prevent race condition with HC.
399  */
400 static uint8_t calc_starting_frame(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh,
401                                    uint8_t *skip_frames)
402 {
403         uint16_t frame = 0;
404         hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
405
406         /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
407
408         /*
409          * skip_frames is used to limit activated descriptors number
410          * to avoid the situation when HC services the last activated
411          * descriptor firstly.
412          * Example for FS:
413          * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
414          * corresponding to curr_frame+1, the descriptor corresponding to frame 2
415          * will be fetched. If the number of descriptors is max=64 (or greather) the
416          * list will be fully programmed with Active descriptors and it is possible
417          * case(rare) that the latest descriptor(considering rollback) corresponding
418          * to frame 2 will be serviced first. HS case is more probable because, in fact,
419          * up to 11 uframes(16 in the code) may be skipped.
420          */
421         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
422                 /*
423                  * Consider uframe counter also, to start xfer asap.
424                  * If half of the frame elapsed skip 2 frames otherwise
425                  * just 1 frame.
426                  * Starting descriptor index must be 8-aligned, so
427                  * if the current frame is near to complete the next one
428                  * is skipped as well.
429                  */
430
431                 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
432                         *skip_frames = 2 * 8;
433                         frame =
434                             dwc_frame_num_inc(hcd->frame_number, *skip_frames);
435                 } else {
436                         *skip_frames = 1 * 8;
437                         frame =
438                             dwc_frame_num_inc(hcd->frame_number, *skip_frames);
439                 }
440
441                 frame = dwc_full_frame_num(frame);
442         } else {
443                 /*
444                  * Two frames are skipped for FS - the current and the next.
445                  * But for descriptor programming, 1 frame(descriptor) is enough,
446                  * see example above.
447                  */
448                 *skip_frames = 1;
449                 frame = dwc_frame_num_inc(hcd->frame_number, 2);
450         }
451
452         return frame;
453 }
454
455 /*
456  * Calculate initial descriptor index for isochronous transfer
457  * based on scheduled frame.
458  */
459 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
460 {
461         uint16_t frame = 0, fr_idx, fr_idx_tmp;
462         uint8_t skip_frames = 0;
463         /*
464          * With current ISOC processing algorithm the channel is being
465          * released when no more QTDs in the list(qh->ntd == 0).
466          * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
467          *
468          * So qh->channel != NULL branch is not used and just not removed from the
469          * source file. It is required for another possible approach which is,
470          * do not disable and release the channel when ISOC session completed,
471          * just move QH to inactive schedule until new QTD arrives.
472          * On new QTD, the QH moved back to 'ready' schedule,
473          * starting frame and therefore starting desc_index are recalculated.
474          * In this case channel is released only on ep_disable.
475          */
476
477         /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
478         if (qh->channel) {
479                 frame = calc_starting_frame(hcd, qh, &skip_frames);
480                 /*
481                  * Calculate initial descriptor index based on FrameList current bitmap
482                  * and servicing period.
483                  */
484                 fr_idx_tmp = frame_list_idx(frame);
485                 fr_idx =
486                     (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
487                      fr_idx_tmp)
488                     % frame_incr_val(qh);
489                 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
490         } else {
491                 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
492                 fr_idx = frame_list_idx(qh->sched_frame);
493         }
494
495         qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
496
497         return skip_frames;
498 }
499
500 #define ISOC_URB_GIVEBACK_ASAP
501
502 #define MAX_ISOC_XFER_SIZE_FS 1023
503 #define MAX_ISOC_XFER_SIZE_HS 3072
504 #define DESCNUM_THRESHOLD 4
505
506 static void init_isoc_dma_desc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh,
507                                uint8_t skip_frames)
508 {
509         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
510         dwc_otg_qtd_t *qtd;
511         dwc_otg_host_dma_desc_t *dma_desc;
512         uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
513
514         idx = qh->td_last;
515         inc = qh->interval;
516         n_desc = 0;
517
518         ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
519         if (skip_frames && !qh->channel)
520                 ntd_max = ntd_max - skip_frames / qh->interval;
521
522         max_xfer_size =
523             (qh->dev_speed ==
524              DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
525             MAX_ISOC_XFER_SIZE_FS;
526
527         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
528                 while ((qh->ntd < ntd_max)
529                        && (qtd->isoc_frame_index_last < qtd->urb->packet_count)) {
530
531                         dma_desc = &qh->desc_list[idx];
532                         dwc_memset(dma_desc, 0x00,
533                                    sizeof(dwc_otg_host_dma_desc_t));
534
535                         frame_desc =
536                             &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
537
538                         if (frame_desc->length > max_xfer_size)
539                                 qh->n_bytes[idx] = max_xfer_size;
540                         else
541                                 qh->n_bytes[idx] = frame_desc->length;
542                         dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
543                         dma_desc->status.b_isoc.a = 1;
544                         dma_desc->status.b_isoc.sts = 0;
545
546                         dma_desc->buf = qtd->urb->dma + frame_desc->offset;
547
548                         qh->ntd++;
549
550                         qtd->isoc_frame_index_last++;
551
552 #ifdef  ISOC_URB_GIVEBACK_ASAP
553                         /*
554                          * Set IOC for each descriptor corresponding to the
555                          * last frame of the URB.
556                          */
557                         if (qtd->isoc_frame_index_last ==
558                             qtd->urb->packet_count)
559                                 dma_desc->status.b_isoc.ioc = 1;
560
561 #endif
562                         idx = desclist_idx_inc(idx, inc, qh->dev_speed);
563                         n_desc++;
564
565                 }
566                 qtd->in_process = 1;
567         }
568
569         qh->td_last = idx;
570
571 #ifdef  ISOC_URB_GIVEBACK_ASAP
572         /* Set IOC for the last descriptor if descriptor list is full */
573         if (qh->ntd == ntd_max) {
574                 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
575                 qh->desc_list[idx].status.b_isoc.ioc = 1;
576         }
577 #else
578         /*
579          * Set IOC bit only for one descriptor.
580          * Always try to be ahead of HW processing,
581          * i.e. on IOC generation driver activates next descriptors but
582          * core continues to process descriptors followed the one with IOC set.
583          */
584
585         if (n_desc > DESCNUM_THRESHOLD) {
586                 /*
587                  * Move IOC "up". Required even if there is only one QTD
588                  * in the list, cause QTDs migth continue to be queued,
589                  * but during the activation it was only one queued.
590                  * Actually more than one QTD might be in the list if this function called
591                  * from XferCompletion - QTDs was queued during HW processing of the previous
592                  * descriptor chunk.
593                  */
594                 idx =
595                     dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
596                                          qh->dev_speed);
597         } else {
598                 /*
599                  * Set the IOC for the latest descriptor
600                  * if either number of descriptor is not greather than threshold
601                  * or no more new descriptors activated.
602                  */
603                 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
604         }
605
606         qh->desc_list[idx].status.b_isoc.ioc = 1;
607 #endif
608 }
609
610 static void init_non_isoc_dma_desc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
611 {
612
613         dwc_hc_t *hc;
614         dwc_otg_host_dma_desc_t *dma_desc;
615         dwc_otg_qtd_t *qtd;
616         int num_packets, len, n_desc = 0;
617
618         hc = qh->channel;
619
620         /*
621          * Start with hc->xfer_buff initialized in
622          * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
623          * this pointer re-assigned to the buffer of the currently processed QTD.
624          * For non-SG request there is always one QTD active.
625          */
626
627         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
628
629                 if (n_desc) {
630                         /* SG request - more than 1 QTDs */
631                         hc->xfer_buff =
632                             (uint8_t *) qtd->urb->dma + qtd->urb->actual_length;
633                         hc->xfer_len =
634                             qtd->urb->length - qtd->urb->actual_length;
635                 }
636
637                 qtd->n_desc = 0;
638
639                 do {
640                         dma_desc = &qh->desc_list[n_desc];
641                         len = hc->xfer_len;
642
643                         if (len > MAX_DMA_DESC_SIZE)
644                                 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
645
646                         if (hc->ep_is_in) {
647                                 if (len > 0) {
648                                         num_packets =
649                                             (len + hc->max_packet -
650                                              1) / hc->max_packet;
651                                 } else {
652                                         /* Need 1 packet for transfer length of 0. */
653                                         num_packets = 1;
654                                 }
655                                 /* Always program an integral # of max packets for IN transfers. */
656                                 len = num_packets*hc->max_packet;
657                         }
658
659                         dma_desc->status.b.n_bytes = len;
660
661                         qh->n_bytes[n_desc] = len;
662
663                         if ((qh->ep_type == UE_CONTROL)
664                             && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
665                                 dma_desc->status.b.sup = 1;     /* Setup Packet */
666
667                         dma_desc->status.b.a = 1;       /* Active descriptor */
668                         dma_desc->status.b.sts = 0;
669
670                         dma_desc->buf =
671                             ((unsigned long)hc->xfer_buff & 0xffffffff);
672
673                         /*
674                          * Last descriptor(or single) of IN transfer
675                          * with actual size less than MaxPacket.
676                          */
677                         if (len > hc->xfer_len) {
678                                 hc->xfer_len = 0;
679                         } else {
680                                 hc->xfer_buff += len;
681                                 hc->xfer_len -= len;
682                         }
683
684                         qtd->n_desc++;
685                         n_desc++;
686                 } while ((hc->xfer_len > 0)
687                          && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
688
689                 qtd->in_process = 1;
690
691                 if (qh->ep_type == UE_CONTROL)
692                         break;
693
694                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
695                         break;
696         }
697
698         if (n_desc) {
699                 /* Request Transfer Complete interrupt for the last descriptor */
700                 qh->desc_list[n_desc - 1].status.b.ioc = 1;
701                 /* End of List indicator */
702                 qh->desc_list[n_desc - 1].status.b.eol = 1;
703
704                 hc->ntd = n_desc;
705         }
706 }
707
708 /**
709  * For Control and Bulk endpoints initializes descriptor list
710  * and starts the transfer.
711  *
712  * For Interrupt and Isochronous endpoints initializes descriptor list
713  * then updates FrameList, marking appropriate entries as active.
714  * In case of Isochronous, the starting descriptor index is calculated based
715  * on the scheduled frame, but only on the first transfer descriptor within a session.
716  * Then starts the transfer via enabling the channel.
717  * For Isochronous endpoint the channel is not halted on XferComplete
718  * interrupt so remains assigned to the endpoint(QH) until session is done.
719  *
720  * @param hcd The HCD state structure for the DWC OTG controller.
721  * @param qh The QH to init.
722  *
723  * @return 0 if successful, negative error code otherwise.
724  */
725 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
726 {
727         /* Channel is already assigned */
728         dwc_hc_t *hc = qh->channel;
729         uint8_t skip_frames = 0;
730
731         switch (hc->ep_type) {
732         case DWC_OTG_EP_TYPE_CONTROL:
733         case DWC_OTG_EP_TYPE_BULK:
734                 init_non_isoc_dma_desc(hcd, qh);
735
736                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
737                 break;
738         case DWC_OTG_EP_TYPE_INTR:
739                 init_non_isoc_dma_desc(hcd, qh);
740
741                 update_frame_list(hcd, qh, 1);
742                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
743                 break;
744         case DWC_OTG_EP_TYPE_ISOC:
745
746                 if (!qh->ntd)
747                         skip_frames = recalc_initial_desc_idx(hcd, qh);
748
749                 init_isoc_dma_desc(hcd, qh, skip_frames);
750
751                 if (!hc->xfer_started) {
752
753                         update_frame_list(hcd, qh, 1);
754
755                         /*
756                          * Always set to max, instead of actual size.
757                          * Otherwise ntd will be changed with
758                          * channel being enabled. Not recommended.
759                          *
760                          */
761                         hc->ntd = max_desc_num(qh);
762                         /* Enable channel only once for ISOC */
763                         dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
764                 }
765
766                 break;
767         default:
768
769                 break;
770         }
771 }
772
773 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t *hcd,
774                                     dwc_hc_t *hc,
775                                     dwc_otg_hc_regs_t *hc_regs,
776                                     dwc_otg_halt_status_e halt_status)
777 {
778         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
779         dwc_otg_qtd_t *qtd, *qtd_tmp;
780         dwc_otg_qh_t *qh;
781         dwc_otg_host_dma_desc_t *dma_desc;
782         uint16_t idx, remain;
783         uint8_t urb_compl;
784
785         qh = hc->qh;
786         idx = qh->td_first;
787
788         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
789                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
790                                          qtd_list_entry)
791                     qtd->in_process = 0;
792                 return;
793         } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
794                    (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
795                 /*
796                  * Channel is halted in these error cases.
797                  * Considered as serious issues.
798                  * Complete all URBs marking all frames as failed,
799                  * irrespective whether some of the descriptors(frames) succeeded or no.
800                  * Pass error code to completion routine as well, to
801                  * update urb->status, some of class drivers might use it to stop
802                  * queing transfer requests.
803                  */
804                 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
805                     ? (-DWC_E_IO)
806                     : (-DWC_E_OVERFLOW);
807
808                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
809                                          qtd_list_entry) {
810                         for (idx = 0; idx < qtd->urb->packet_count; idx++) {
811                                 frame_desc = &qtd->urb->iso_descs[idx];
812                                 frame_desc->status = err;
813                         }
814                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
815                         dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
816                 }
817                 return;
818         }
819
820         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
821                                  qtd_list_entry) {
822
823                 if (!qtd->in_process)
824                         break;
825
826                 urb_compl = 0;
827
828                 do {
829
830                         dma_desc = &qh->desc_list[idx];
831
832                         frame_desc =
833                             &qtd->urb->iso_descs[qtd->isoc_frame_index];
834                         remain =
835                             hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
836
837                         if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
838                                 /*
839                                  * XactError or, unable to complete all the transactions
840                                  * in the scheduled micro-frame/frame,
841                                  * both indicated by DMA_DESC_STS_PKTERR.
842                                  */
843                                 qtd->urb->error_count++;
844                                 frame_desc->actual_length =
845                                     qh->n_bytes[idx] - remain;
846                                 frame_desc->status = -DWC_E_PROTOCOL;
847                         } else {
848                                 /* Success */
849
850                                 frame_desc->actual_length =
851                                     qh->n_bytes[idx] - remain;
852                                 frame_desc->status = 0;
853                         }
854
855                         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
856                                 /*
857                                  * urb->status is not used for isoc transfers here.
858                                  * The individual frame_desc status are used instead.
859                                  */
860
861                                 hcd->fops->complete(hcd, qtd->urb->priv,
862                                                     qtd->urb, 0);
863                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
864
865                                 /*
866                                  * This check is necessary because urb_dequeue can be called
867                                  * from urb complete callback(sound driver example).
868                                  * All pending URBs are dequeued there, so no need for
869                                  * further processing.
870                                  */
871                                 if (hc->halt_status ==
872                                     DWC_OTG_HC_XFER_URB_DEQUEUE) {
873                                         return;
874                                 }
875
876                                 urb_compl = 1;
877
878                         }
879
880                         qh->ntd--;
881
882                         /* Stop if IOC requested descriptor reached */
883                         if (dma_desc->status.b_isoc.ioc) {
884                                 idx =
885                                     desclist_idx_inc(idx, qh->interval,
886                                                      hc->speed);
887                                 goto stop_scan;
888                         }
889
890                         idx = desclist_idx_inc(idx, qh->interval, hc->speed);
891
892                         if (urb_compl)
893                                 break;
894                 } while (idx != qh->td_first);
895         }
896 stop_scan:
897         qh->td_first = idx;
898 }
899
900 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t *hcd,
901                                        dwc_hc_t *hc,
902                                        dwc_otg_qtd_t *qtd,
903                                        dwc_otg_host_dma_desc_t *dma_desc,
904                                        dwc_otg_halt_status_e halt_status,
905                                        uint32_t n_bytes, uint8_t *xfer_done)
906 {
907
908         uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
909         dwc_otg_hcd_urb_t *urb = qtd->urb;
910
911         if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
912                 urb->status = -DWC_E_IO;
913                 return 1;
914         }
915         if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
916                 switch (halt_status) {
917                 case DWC_OTG_HC_XFER_STALL:
918                         urb->status = -DWC_E_PIPE;
919                         break;
920                 case DWC_OTG_HC_XFER_BABBLE_ERR:
921                         urb->status = -DWC_E_OVERFLOW;
922                         break;
923                 case DWC_OTG_HC_XFER_XACT_ERR:
924                         urb->status = -DWC_E_PROTOCOL;
925                         break;
926                 default:
927                         DWC_ERROR
928                             ("%s: Unhandled descriptor error status (%d)\n",
929                              __func__, halt_status);
930                         break;
931                 }
932                 return 1;
933         }
934
935         if (dma_desc->status.b.a == 1) {
936                 DWC_DEBUGPL(DBG_HCDV,
937                             "Active descriptor encountered on channel %d\n",
938                             hc->hc_num);
939                 return 0;
940         }
941
942         if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
943                 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
944                         urb->actual_length += n_bytes - remain;
945                         if (remain || urb->actual_length == urb->length) {
946                                 /*
947                                  * For Control Data stage do not set urb->status=0 to prevent
948                                  * URB callback. Set it when Status phase done. See below.
949                                  */
950                                 *xfer_done = 1;
951                         }
952
953                 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
954                         urb->status = 0;
955                         *xfer_done = 1;
956                 }
957                 /* No handling for SETUP stage */
958         } else {
959                 /* BULK and INTR */
960                 urb->actual_length += n_bytes - remain;
961                 if (remain || urb->actual_length == urb->length) {
962                         urb->status = 0;
963                         *xfer_done = 1;
964                 }
965         }
966
967         return 0;
968 }
969
970 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t *hcd,
971                                         dwc_hc_t *hc,
972                                         dwc_otg_hc_regs_t *hc_regs,
973                                         dwc_otg_halt_status_e halt_status)
974 {
975         dwc_otg_hcd_urb_t *urb = NULL;
976         dwc_otg_qtd_t *qtd, *qtd_tmp;
977         dwc_otg_qh_t *qh;
978         dwc_otg_host_dma_desc_t *dma_desc;
979         uint32_t n_bytes, n_desc, i, qtd_n_desc;
980         uint8_t failed = 0, xfer_done;
981
982         n_desc = 0;
983
984         qh = hc->qh;
985         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
986                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
987                                          qtd_list_entry) {
988                         qtd->in_process = 0;
989                 }
990                 return;
991         }
992
993         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
994
995                 urb = qtd->urb;
996
997                 n_bytes = 0;
998                 xfer_done = 0;
999                 qtd_n_desc = qtd->n_desc;
1000                 for (i = 0; i < qtd_n_desc; i++) {
1001                         dma_desc = &qh->desc_list[n_desc];
1002
1003                         n_bytes = qh->n_bytes[n_desc];
1004
1005                         failed =
1006                             update_non_isoc_urb_state_ddma(hcd, hc, qtd,
1007                                                            dma_desc,
1008                                                            halt_status, n_bytes,
1009                                                            &xfer_done);
1010
1011                         if (failed
1012                             || (xfer_done
1013                                 && (urb->status != -DWC_E_IN_PROGRESS))) {
1014
1015                                 hcd->fops->complete(hcd, urb->priv, urb,
1016                                                     urb->status);
1017                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
1018
1019                                 if (failed)
1020                                         goto stop_scan;
1021                         } else if (qh->ep_type == UE_CONTROL) {
1022                                 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
1023                                         if (urb->length > 0) {
1024                                                 qtd->control_phase =
1025                                                     DWC_OTG_CONTROL_DATA;
1026                                         } else {
1027                                                 qtd->control_phase =
1028                                                     DWC_OTG_CONTROL_STATUS;
1029                                         }
1030                                         DWC_DEBUGPL(DBG_HCDV,
1031                                                     "  Control setup transaction done\n");
1032                                 } else if (qtd->control_phase ==
1033                                            DWC_OTG_CONTROL_DATA) {
1034                                         if (xfer_done) {
1035                                                 qtd->control_phase =
1036                                                     DWC_OTG_CONTROL_STATUS;
1037                                                 DWC_DEBUGPL(DBG_HCDV,
1038                                                             "  Control data transfer done\n");
1039                                         } else if (i + 1 == qtd->n_desc) {
1040                                                 /*
1041                                                  * Last descriptor for Control data stage which is
1042                                                  * not completed yet.
1043                                                  */
1044                                                 dwc_otg_hcd_save_data_toggle(hc,
1045                                                                              hc_regs,
1046                                                                              qtd);
1047                                         }
1048                                 }
1049                         }
1050
1051                         n_desc++;
1052                 }
1053
1054         }
1055
1056 stop_scan:
1057
1058         if (qh->ep_type != UE_CONTROL) {
1059                 /*
1060                  * Resetting the data toggle for bulk
1061                  * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
1062                  */
1063                 if (halt_status == DWC_OTG_HC_XFER_STALL)
1064                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1065                 else
1066                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1067         }
1068
1069         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1070                 hcint_data_t hcint;
1071                 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1072                 if (hcint.b.nyet) {
1073                         /*
1074                          * Got a NYET on the last transaction of the transfer. It
1075                          * means that the endpoint should be in the PING state at the
1076                          * beginning of the next transfer.
1077                          */
1078                         qh->ping_state = 1;
1079                         clear_hc_int(hc_regs, nyet);
1080                 }
1081
1082         }
1083
1084 }
1085
1086 /**
1087  * This function is called from interrupt handlers.
1088  * Scans the descriptor list, updates URB's status and
1089  * calls completion routine for the URB if it's done.
1090  * Releases the channel to be used by other transfers.
1091  * In case of Isochronous endpoint the channel is not halted until
1092  * the end of the session, i.e. QTD list is empty.
1093  * If periodic channel released the FrameList is updated accordingly.
1094  *
1095  * Calls transaction selection routines to activate pending transfers.
1096  *
1097  * @param hcd The HCD state structure for the DWC OTG controller.
1098  * @param hc Host channel, the transfer is completed on.
1099  * @param hc_regs Host channel registers.
1100  * @param halt_status Reason the channel is being halted,
1101  *                    or just XferComplete for isochronous transfer
1102  */
1103 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t *hcd,
1104                                     dwc_hc_t *hc,
1105                                     dwc_otg_hc_regs_t *hc_regs,
1106                                     dwc_otg_halt_status_e halt_status)
1107 {
1108         uint8_t continue_isoc_xfer = 0;
1109         dwc_otg_transaction_type_e tr_type;
1110         dwc_otg_qh_t *qh = hc->qh;
1111
1112         if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1113
1114                 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1115
1116                 /* Release the channel if halted or session completed */
1117                 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1118                     DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1119
1120                         /* Halt the channel if session completed */
1121                         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1122                                 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1123                         }
1124
1125                         release_channel_ddma(hcd, qh);
1126                         dwc_otg_hcd_qh_remove(hcd, qh);
1127                 } else {
1128                         /* Keep in assigned schedule to continue transfer */
1129                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1130                                            &qh->qh_list_entry);
1131                         continue_isoc_xfer = 1;
1132
1133                 }
1134                 /** @todo Consider the case when period exceeds FrameList size.
1135                  *  Frame Rollover interrupt should be used.
1136                  */
1137         } else {
1138                 /* Scan descriptor list to complete the URB(s), then release the channel */
1139                 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1140
1141                 release_channel_ddma(hcd, qh);
1142                 dwc_otg_hcd_qh_remove(hcd, qh);
1143
1144                 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1145                         /* Add back to inactive non-periodic schedule on normal completion */
1146                         dwc_otg_hcd_qh_add(hcd, qh);
1147                 }
1148
1149         }
1150         tr_type = dwc_otg_hcd_select_transactions(hcd);
1151         if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1152                 if (continue_isoc_xfer) {
1153                         if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1154                                 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1155                         } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1156                                 tr_type = DWC_OTG_TRANSACTION_ALL;
1157                         }
1158                 }
1159                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1160         }
1161 }
1162
1163 #endif /* DWC_DEVICE_ONLY */