1 /*==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
36 * This file contains Descriptor DMA support implementation for host mode.
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
42 static inline uint8_t frame_list_idx(uint16_t frame)
45 ret = frame & (MAX_FRLIST_EN_NUM - 1);
49 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc,
54 DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
55 MAX_DMA_DESC_NUM_GENERIC) - 1);
58 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc,
63 DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
64 MAX_DMA_DESC_NUM_GENERIC) - 1);
67 static inline uint16_t max_desc_num(dwc_otg_qh_t *qh)
69 return (((qh->ep_type == UE_ISOCHRONOUS)
70 && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
71 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
74 static inline uint16_t frame_incr_val(dwc_otg_qh_t *qh)
76 return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
77 ? ((qh->interval + 8 - 1) / 8)
81 static int desc_list_alloc(dwc_otg_qh_t *qh)
85 qh->desc_list = (dwc_otg_host_dma_desc_t *)
86 DWC_DEV_DMA_ALLOC_ATOMIC(sizeof(dwc_otg_host_dma_desc_t) *
91 retval = -DWC_E_NO_MEMORY;
92 DWC_ERROR("%s: DMA descriptor list allocation failed\n",
97 dwc_memset(qh->desc_list, 0x00,
98 sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
101 (uint32_t *) DWC_ALLOC_ATOMIC(sizeof(uint32_t) * max_desc_num(qh));
104 retval = -DWC_E_NO_MEMORY;
106 ("%s: Failed to allocate array for descriptors' size actual values\n",
114 static void desc_list_free(dwc_otg_qh_t *qh)
117 DWC_DEV_DMA_FREE(max_desc_num(qh), qh->desc_list,
119 qh->desc_list = NULL;
123 DWC_FREE(qh->n_bytes);
128 static int frame_list_alloc(dwc_otg_hcd_t *hcd)
134 hcd->frame_list = DWC_DEV_DMA_ALLOC_ATOMIC(4 * MAX_FRLIST_EN_NUM,
135 &hcd->frame_list_dma);
136 if (!hcd->frame_list) {
137 retval = -DWC_E_NO_MEMORY;
138 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
141 dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
146 static void frame_list_free(dwc_otg_hcd_t *hcd)
148 if (!hcd->frame_list)
151 DWC_DEV_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list,
152 hcd->frame_list_dma);
153 hcd->frame_list = NULL;
156 static void per_sched_enable(dwc_otg_hcd_t *hcd, uint16_t fr_list_en)
162 DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
164 if (hcfg.b.perschedena) {
165 /* already enabled */
169 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
170 hcd->frame_list_dma);
172 switch (fr_list_en) {
189 hcfg.b.perschedena = 1;
191 DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
192 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg,
197 static void per_sched_disable(dwc_otg_hcd_t *hcd)
202 DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
204 if (!hcfg.b.perschedena) {
205 /* already disabled */
208 hcfg.b.perschedena = 0;
210 DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
211 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg,
216 * Activates/Deactivates FrameList entries for the channel
217 * based on endpoint servicing period.
219 void update_frame_list(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, uint8_t enable)
225 DWC_ERROR("qh->channel = %p", qh->channel);
230 DWC_ERROR("------hcd = %p", hcd);
234 if (!hcd->frame_list) {
235 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
240 inc = frame_incr_val(qh);
241 if (qh->ep_type == UE_ISOCHRONOUS)
242 i = frame_list_idx(qh->sched_frame);
249 hcd->frame_list[j] |= (1 << hc->hc_num);
251 hcd->frame_list[j] &= ~(1 << hc->hc_num);
252 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
257 if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
259 /* TODO - check this */
260 inc = (8 + qh->interval - 1) / qh->interval;
261 for (i = 0; i < inc; i++) {
263 j = j << qh->interval;
271 void dump_frame_list(dwc_otg_hcd_t *hcd)
274 DWC_PRINTF("--FRAME LIST (hex) --\n");
275 for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
276 DWC_PRINTF("%x\t", hcd->frame_list[i]);
280 DWC_PRINTF("\n----\n");
285 static void release_channel_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
287 dwc_hc_t *hc = qh->channel;
288 if (dwc_qh_is_non_per(qh))
289 hcd->non_periodic_channels--;
291 update_frame_list(hcd, qh, 0);
294 * The condition is added to prevent double cleanup try in case of device
295 * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
298 dwc_otg_hc_cleanup(hcd->core_if, hc);
299 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
307 dwc_memset(qh->desc_list, 0x00,
308 sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
313 * Initializes a QH structure's Descriptor DMA related members.
314 * Allocates memory for descriptor list.
315 * On first periodic QH, allocates memory for FrameList
316 * and enables periodic scheduling.
318 * @param hcd The HCD state structure for the DWC OTG controller.
319 * @param qh The QH to init.
321 * @return 0 if successful, negative error code otherwise.
323 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
329 ("SPLIT Transfers are not supported in Descriptor DMA.\n");
333 retval = desc_list_alloc(qh);
336 && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
337 if (!hcd->frame_list) {
338 retval = frame_list_alloc(hcd);
339 /* Enable periodic schedule on first periodic QH */
341 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
351 * Frees descriptor list memory associated with the QH.
352 * If QH is periodic and the last, frees FrameList memory
353 * and disables periodic scheduling.
355 * @param hcd The HCD state structure for the DWC OTG controller.
356 * @param qh The QH to init.
358 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
363 * Channel still assigned due to some reasons.
364 * Seen on Isoc URB dequeue. Channel halted but no subsequent
365 * ChHalted interrupt to release the channel. Afterwards
366 * when it comes here from endpoint disable routine
367 * channel remains assigned.
370 release_channel_ddma(hcd, qh);
372 if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
373 && !hcd->periodic_channels && hcd->frame_list) {
375 per_sched_disable(hcd);
376 frame_list_free(hcd);
380 static uint8_t frame_to_desc_idx(dwc_otg_qh_t *qh, uint16_t frame_idx)
384 if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
386 * Descriptor set(8 descriptors) index
387 * which is 8-aligned.
389 ret = (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
391 ret = frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
398 * Determine starting frame for Isochronous transfer.
399 * Few frames skipped to prevent race condition with HC.
401 static uint8_t calc_starting_frame(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh,
402 uint8_t *skip_frames)
405 hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
407 /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
410 * skip_frames is used to limit activated descriptors number
411 * to avoid the situation when HC services the last activated
412 * descriptor firstly.
414 * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
415 * corresponding to curr_frame+1, the descriptor corresponding to frame 2
416 * will be fetched. If the number of descriptors is max=64 (or greather) the
417 * list will be fully programmed with Active descriptors and it is possible
418 * case(rare) that the latest descriptor(considering rollback) corresponding
419 * to frame 2 will be serviced first. HS case is more probable because, in fact,
420 * up to 11 uframes(16 in the code) may be skipped.
422 if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
424 * Consider uframe counter also, to start xfer asap.
425 * If half of the frame elapsed skip 2 frames otherwise
427 * Starting descriptor index must be 8-aligned, so
428 * if the current frame is near to complete the next one
429 * is skipped as well.
432 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
433 *skip_frames = 2 * 8;
435 dwc_frame_num_inc(hcd->frame_number, *skip_frames);
437 *skip_frames = 1 * 8;
439 dwc_frame_num_inc(hcd->frame_number, *skip_frames);
442 frame = dwc_full_frame_num(frame);
445 * Two frames are skipped for FS - the current and the next.
446 * But for descriptor programming, 1 frame(descriptor) is enough,
450 frame = dwc_frame_num_inc(hcd->frame_number, 2);
457 * Calculate initial descriptor index for isochronous transfer
458 * based on scheduled frame.
460 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
462 uint16_t frame = 0, fr_idx, fr_idx_tmp;
463 uint8_t skip_frames = 0;
465 * With current ISOC processing algorithm the channel is being
466 * released when no more QTDs in the list(qh->ntd == 0).
467 * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
469 * So qh->channel != NULL branch is not used and just not removed from the
470 * source file. It is required for another possible approach which is,
471 * do not disable and release the channel when ISOC session completed,
472 * just move QH to inactive schedule until new QTD arrives.
473 * On new QTD, the QH moved back to 'ready' schedule,
474 * starting frame and therefore starting desc_index are recalculated.
475 * In this case channel is released only on ep_disable.
478 /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
480 frame = calc_starting_frame(hcd, qh, &skip_frames);
482 * Calculate initial descriptor index based on FrameList current bitmap
483 * and servicing period.
485 fr_idx_tmp = frame_list_idx(frame);
487 (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
489 % frame_incr_val(qh);
490 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
492 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
493 fr_idx = frame_list_idx(qh->sched_frame);
496 qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
501 #define ISOC_URB_GIVEBACK_ASAP
503 #define MAX_ISOC_XFER_SIZE_FS 1023
504 #define MAX_ISOC_XFER_SIZE_HS 3072
505 #define DESCNUM_THRESHOLD 4
507 static void init_isoc_dma_desc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh,
510 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
512 dwc_otg_host_dma_desc_t *dma_desc;
513 uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
519 ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
520 if (skip_frames && !qh->channel)
521 ntd_max = ntd_max - skip_frames / qh->interval;
525 DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
526 MAX_ISOC_XFER_SIZE_FS;
528 DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
529 while ((qh->ntd < ntd_max)
530 && (qtd->isoc_frame_index_last < qtd->urb->packet_count)) {
532 dma_desc = &qh->desc_list[idx];
533 dwc_memset(dma_desc, 0x00,
534 sizeof(dwc_otg_host_dma_desc_t));
537 &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
539 if (frame_desc->length > max_xfer_size)
540 qh->n_bytes[idx] = max_xfer_size;
542 qh->n_bytes[idx] = frame_desc->length;
543 dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
544 dma_desc->status.b_isoc.a = 1;
545 dma_desc->status.b_isoc.sts = 0;
547 dma_desc->buf = qtd->urb->dma + frame_desc->offset;
551 qtd->isoc_frame_index_last++;
553 #ifdef ISOC_URB_GIVEBACK_ASAP
555 * Set IOC for each descriptor corresponding to the
556 * last frame of the URB.
558 if (qtd->isoc_frame_index_last ==
559 qtd->urb->packet_count)
560 dma_desc->status.b_isoc.ioc = 1;
563 idx = desclist_idx_inc(idx, inc, qh->dev_speed);
572 #ifdef ISOC_URB_GIVEBACK_ASAP
573 /* Set IOC for the last descriptor if descriptor list is full */
574 if (qh->ntd == ntd_max) {
575 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
576 qh->desc_list[idx].status.b_isoc.ioc = 1;
580 * Set IOC bit only for one descriptor.
581 * Always try to be ahead of HW processing,
582 * i.e. on IOC generation driver activates next descriptors but
583 * core continues to process descriptors followed the one with IOC set.
586 if (n_desc > DESCNUM_THRESHOLD) {
588 * Move IOC "up". Required even if there is only one QTD
589 * in the list, cause QTDs migth continue to be queued,
590 * but during the activation it was only one queued.
591 * Actually more than one QTD might be in the list if this function called
592 * from XferCompletion - QTDs was queued during HW processing of the previous
596 dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
600 * Set the IOC for the latest descriptor
601 * if either number of descriptor is not greather than threshold
602 * or no more new descriptors activated.
604 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
607 qh->desc_list[idx].status.b_isoc.ioc = 1;
611 static void init_non_isoc_dma_desc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
615 dwc_otg_host_dma_desc_t *dma_desc;
617 int num_packets, len, n_desc = 0;
622 * Start with hc->xfer_buff initialized in
623 * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
624 * this pointer re-assigned to the buffer of the currently processed QTD.
625 * For non-SG request there is always one QTD active.
628 DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
631 /* SG request - more than 1 QTDs */
633 (uint8_t *) qtd->urb->dma + qtd->urb->actual_length;
635 qtd->urb->length - qtd->urb->actual_length;
641 dma_desc = &qh->desc_list[n_desc];
644 if (len > MAX_DMA_DESC_SIZE)
645 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
650 (len + hc->max_packet -
653 /* Need 1 packet for transfer length of 0. */
656 /* Always program an integral # of max packets for IN transfers. */
657 len = num_packets*hc->max_packet;
660 dma_desc->status.b.n_bytes = len;
662 qh->n_bytes[n_desc] = len;
664 if ((qh->ep_type == UE_CONTROL)
665 && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
666 dma_desc->status.b.sup = 1; /* Setup Packet */
668 dma_desc->status.b.a = 1; /* Active descriptor */
669 dma_desc->status.b.sts = 0;
672 ((unsigned long)hc->xfer_buff & 0xffffffff);
675 * Last descriptor(or single) of IN transfer
676 * with actual size less than MaxPacket.
678 if (len > hc->xfer_len) {
681 hc->xfer_buff += len;
687 } while ((hc->xfer_len > 0)
688 && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
692 if (qh->ep_type == UE_CONTROL)
695 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
700 /* Request Transfer Complete interrupt for the last descriptor */
701 qh->desc_list[n_desc - 1].status.b.ioc = 1;
702 /* End of List indicator */
703 qh->desc_list[n_desc - 1].status.b.eol = 1;
710 * For Control and Bulk endpoints initializes descriptor list
711 * and starts the transfer.
713 * For Interrupt and Isochronous endpoints initializes descriptor list
714 * then updates FrameList, marking appropriate entries as active.
715 * In case of Isochronous, the starting descriptor index is calculated based
716 * on the scheduled frame, but only on the first transfer descriptor within a session.
717 * Then starts the transfer via enabling the channel.
718 * For Isochronous endpoint the channel is not halted on XferComplete
719 * interrupt so remains assigned to the endpoint(QH) until session is done.
721 * @param hcd The HCD state structure for the DWC OTG controller.
722 * @param qh The QH to init.
724 * @return 0 if successful, negative error code otherwise.
726 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
728 /* Channel is already assigned */
729 dwc_hc_t *hc = qh->channel;
730 uint8_t skip_frames = 0;
732 switch (hc->ep_type) {
733 case DWC_OTG_EP_TYPE_CONTROL:
734 case DWC_OTG_EP_TYPE_BULK:
735 init_non_isoc_dma_desc(hcd, qh);
737 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
739 case DWC_OTG_EP_TYPE_INTR:
740 init_non_isoc_dma_desc(hcd, qh);
742 update_frame_list(hcd, qh, 1);
743 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
745 case DWC_OTG_EP_TYPE_ISOC:
748 skip_frames = recalc_initial_desc_idx(hcd, qh);
750 init_isoc_dma_desc(hcd, qh, skip_frames);
752 if (!hc->xfer_started) {
754 update_frame_list(hcd, qh, 1);
757 * Always set to max, instead of actual size.
758 * Otherwise ntd will be changed with
759 * channel being enabled. Not recommended.
762 hc->ntd = max_desc_num(qh);
763 /* Enable channel only once for ISOC */
764 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
774 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t *hcd,
776 dwc_otg_hc_regs_t *hc_regs,
777 dwc_otg_halt_status_e halt_status)
779 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
780 dwc_otg_qtd_t *qtd, *qtd_tmp;
782 dwc_otg_host_dma_desc_t *dma_desc;
783 uint16_t idx, remain;
789 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
790 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
794 } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
795 (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
797 * Channel is halted in these error cases.
798 * Considered as serious issues.
799 * Complete all URBs marking all frames as failed,
800 * irrespective whether some of the descriptors(frames) succeeded or no.
801 * Pass error code to completion routine as well, to
802 * update urb->status, some of class drivers might use it to stop
803 * queing transfer requests.
805 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
809 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
811 for (idx = 0; idx < qtd->urb->packet_count; idx++) {
812 frame_desc = &qtd->urb->iso_descs[idx];
813 frame_desc->status = err;
815 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
816 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
821 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
824 if (!qtd->in_process)
831 dma_desc = &qh->desc_list[idx];
834 &qtd->urb->iso_descs[qtd->isoc_frame_index];
836 hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
838 if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
840 * XactError or, unable to complete all the transactions
841 * in the scheduled micro-frame/frame,
842 * both indicated by DMA_DESC_STS_PKTERR.
844 qtd->urb->error_count++;
845 frame_desc->actual_length =
846 qh->n_bytes[idx] - remain;
847 frame_desc->status = -DWC_E_PROTOCOL;
851 frame_desc->actual_length =
852 qh->n_bytes[idx] - remain;
853 frame_desc->status = 0;
856 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
858 * urb->status is not used for isoc transfers here.
859 * The individual frame_desc status are used instead.
862 hcd->fops->complete(hcd, qtd->urb->priv,
864 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
867 * This check is necessary because urb_dequeue can be called
868 * from urb complete callback(sound driver example).
869 * All pending URBs are dequeued there, so no need for
870 * further processing.
872 if (hc->halt_status ==
873 DWC_OTG_HC_XFER_URB_DEQUEUE) {
883 /* Stop if IOC requested descriptor reached */
884 if (dma_desc->status.b_isoc.ioc) {
886 desclist_idx_inc(idx, qh->interval,
891 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
895 } while (idx != qh->td_first);
901 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t *hcd,
904 dwc_otg_host_dma_desc_t *dma_desc,
905 dwc_otg_halt_status_e halt_status,
906 uint32_t n_bytes, uint8_t *xfer_done)
909 uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
910 dwc_otg_hcd_urb_t *urb = qtd->urb;
912 if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
913 urb->status = -DWC_E_IO;
916 if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
917 switch (halt_status) {
918 case DWC_OTG_HC_XFER_STALL:
919 urb->status = -DWC_E_PIPE;
921 case DWC_OTG_HC_XFER_BABBLE_ERR:
922 urb->status = -DWC_E_OVERFLOW;
924 case DWC_OTG_HC_XFER_XACT_ERR:
925 urb->status = -DWC_E_PROTOCOL;
929 ("%s: Unhandled descriptor error status (%d)\n",
930 __func__, halt_status);
936 if (dma_desc->status.b.a == 1) {
937 DWC_DEBUGPL(DBG_HCDV,
938 "Active descriptor encountered on channel %d\n",
943 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
944 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
945 urb->actual_length += n_bytes - remain;
946 if (remain || urb->actual_length == urb->length) {
948 * For Control Data stage do not set urb->status=0 to prevent
949 * URB callback. Set it when Status phase done. See below.
954 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
958 /* No handling for SETUP stage */
961 urb->actual_length += n_bytes - remain;
962 if (remain || urb->actual_length == urb->length) {
971 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t *hcd,
973 dwc_otg_hc_regs_t *hc_regs,
974 dwc_otg_halt_status_e halt_status)
976 dwc_otg_hcd_urb_t *urb = NULL;
977 dwc_otg_qtd_t *qtd, *qtd_tmp;
979 dwc_otg_host_dma_desc_t *dma_desc;
980 uint32_t n_bytes, n_desc, i, qtd_n_desc;
981 uint8_t failed = 0, xfer_done;
986 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
987 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
994 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1000 qtd_n_desc = qtd->n_desc;
1001 for (i = 0; i < qtd_n_desc; i++) {
1002 dma_desc = &qh->desc_list[n_desc];
1004 n_bytes = qh->n_bytes[n_desc];
1007 update_non_isoc_urb_state_ddma(hcd, hc, qtd,
1009 halt_status, n_bytes,
1014 && (urb->status != -DWC_E_IN_PROGRESS))) {
1016 hcd->fops->complete(hcd, urb->priv, urb,
1018 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
1022 } else if (qh->ep_type == UE_CONTROL) {
1023 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
1024 if (urb->length > 0) {
1025 qtd->control_phase =
1026 DWC_OTG_CONTROL_DATA;
1028 qtd->control_phase =
1029 DWC_OTG_CONTROL_STATUS;
1031 DWC_DEBUGPL(DBG_HCDV,
1032 " Control setup transaction done\n");
1033 } else if (qtd->control_phase ==
1034 DWC_OTG_CONTROL_DATA) {
1036 qtd->control_phase =
1037 DWC_OTG_CONTROL_STATUS;
1038 DWC_DEBUGPL(DBG_HCDV,
1039 " Control data transfer done\n");
1040 } else if (i + 1 == qtd->n_desc) {
1042 * Last descriptor for Control data stage which is
1043 * not completed yet.
1045 dwc_otg_hcd_save_data_toggle(hc,
1059 if (qh->ep_type != UE_CONTROL) {
1061 * Resetting the data toggle for bulk
1062 * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
1064 if (halt_status == DWC_OTG_HC_XFER_STALL)
1065 qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1067 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1070 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1072 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1075 * Got a NYET on the last transaction of the transfer. It
1076 * means that the endpoint should be in the PING state at the
1077 * beginning of the next transfer.
1080 clear_hc_int(hc_regs, nyet);
1088 * This function is called from interrupt handlers.
1089 * Scans the descriptor list, updates URB's status and
1090 * calls completion routine for the URB if it's done.
1091 * Releases the channel to be used by other transfers.
1092 * In case of Isochronous endpoint the channel is not halted until
1093 * the end of the session, i.e. QTD list is empty.
1094 * If periodic channel released the FrameList is updated accordingly.
1096 * Calls transaction selection routines to activate pending transfers.
1098 * @param hcd The HCD state structure for the DWC OTG controller.
1099 * @param hc Host channel, the transfer is completed on.
1100 * @param hc_regs Host channel registers.
1101 * @param halt_status Reason the channel is being halted,
1102 * or just XferComplete for isochronous transfer
1104 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t *hcd,
1106 dwc_otg_hc_regs_t *hc_regs,
1107 dwc_otg_halt_status_e halt_status)
1109 uint8_t continue_isoc_xfer = 0;
1110 dwc_otg_transaction_type_e tr_type;
1111 dwc_otg_qh_t *qh = hc->qh;
1113 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1115 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1117 /* Release the channel if halted or session completed */
1118 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1119 DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1121 /* Halt the channel if session completed */
1122 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1123 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1126 release_channel_ddma(hcd, qh);
1127 dwc_otg_hcd_qh_remove(hcd, qh);
1129 /* Keep in assigned schedule to continue transfer */
1130 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1131 &qh->qh_list_entry);
1132 continue_isoc_xfer = 1;
1135 /** @todo Consider the case when period exceeds FrameList size.
1136 * Frame Rollover interrupt should be used.
1139 /* Scan descriptor list to complete the URB(s), then release the channel */
1140 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1142 release_channel_ddma(hcd, qh);
1143 dwc_otg_hcd_qh_remove(hcd, qh);
1145 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1146 /* Add back to inactive non-periodic schedule on normal completion */
1147 dwc_otg_hcd_qh_add(hcd, qh);
1151 tr_type = dwc_otg_hcd_select_transactions(hcd);
1152 if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1153 if (continue_isoc_xfer) {
1154 if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1155 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1156 } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1157 tr_type = DWC_OTG_TRANSACTION_ALL;
1160 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1164 #endif /* DWC_DEVICE_ONLY */