static inline uint8_t frame_list_idx(uint16_t frame)
{
- return (frame & (MAX_FRLIST_EN_NUM - 1));
+ uint8_t ret;
+ ret = frame & (MAX_FRLIST_EN_NUM - 1);
+ return ret;
}
-static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
+static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc,
+ uint8_t speed)
{
return (idx + inc) &
(((speed ==
MAX_DMA_DESC_NUM_GENERIC) - 1);
}
-static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
+static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc,
+ uint8_t speed)
{
return (idx - inc) &
(((speed ==
MAX_DMA_DESC_NUM_GENERIC) - 1);
}
-static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
+static inline uint16_t max_desc_num(dwc_otg_qh_t *qh)
{
return (((qh->ep_type == UE_ISOCHRONOUS)
&& (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
}
-static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
+
+static inline uint16_t frame_incr_val(dwc_otg_qh_t *qh)
{
return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
? ((qh->interval + 8 - 1) / 8)
: qh->interval);
}
-static int desc_list_alloc(dwc_otg_qh_t * qh)
+static int desc_list_alloc(dwc_otg_qh_t *qh)
{
int retval = 0;
if (!qh->desc_list) {
retval = -DWC_E_NO_MEMORY;
- DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
-
+ DWC_ERROR("%s: DMA descriptor list allocation failed\n",
+ __func__);
+
}
dwc_memset(qh->desc_list, 0x00,
}
-static void desc_list_free(dwc_otg_qh_t * qh)
+static void desc_list_free(dwc_otg_qh_t *qh)
{
if (qh->desc_list) {
DWC_DMA_FREE(max_desc_num(qh), qh->desc_list,
}
}
-static int frame_list_alloc(dwc_otg_hcd_t * hcd)
+static int frame_list_alloc(dwc_otg_hcd_t *hcd)
{
int retval = 0;
if (hcd->frame_list)
return retval;
}
-static void frame_list_free(dwc_otg_hcd_t * hcd)
+static void frame_list_free(dwc_otg_hcd_t *hcd)
{
if (!hcd->frame_list)
return;
-
- DWC_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
+
+ DWC_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list,
+ hcd->frame_list_dma);
hcd->frame_list = NULL;
}
-static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
+static void per_sched_enable(dwc_otg_hcd_t *hcd, uint16_t fr_list_en)
{
hcfg_data_t hcfg;
- hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
+ hcfg.d32 =
+ DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
if (hcfg.b.perschedena) {
/* already enabled */
hcfg.b.perschedena = 1;
DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
- DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+ DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg,
+ hcfg.d32);
}
-static void per_sched_disable(dwc_otg_hcd_t * hcd)
+static void per_sched_disable(dwc_otg_hcd_t *hcd)
{
hcfg_data_t hcfg;
- hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
-
+ hcfg.d32 =
+ DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
+
if (!hcfg.b.perschedena) {
/* already disabled */
return;
hcfg.b.perschedena = 0;
DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
- DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+ DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg,
+ hcfg.d32);
}
-/*
- * Activates/Deactivates FrameList entries for the channel
+/*
+ * Activates/Deactivates FrameList entries for the channel
* based on endpoint servicing period.
*/
-void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
+void update_frame_list(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, uint8_t enable)
{
uint16_t i, j, inc;
dwc_hc_t *hc = NULL;
else
hcd->frame_list[j] &= ~(1 << hc->hc_num);
j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
- }
- while (j != i);
+ } while (j != i);
if (!enable)
return;
hc->schinfo = 0;
}
#if 1
-void dump_frame_list(dwc_otg_hcd_t * hcd)
+void dump_frame_list(dwc_otg_hcd_t *hcd)
{
int i = 0;
DWC_PRINTF("--FRAME LIST (hex) --\n");
}
#endif
-static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
+static void release_channel_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
{
dwc_hc_t *hc = qh->channel;
if (dwc_qh_is_non_per(qh))
else
update_frame_list(hcd, qh, 0);
- /*
+ /*
* The condition is added to prevent double cleanup try in case of device
* disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
*/
}
}
-/**
+/**
* Initializes a QH structure's Descriptor DMA related members.
* Allocates memory for descriptor list.
- * On first periodic QH, allocates memory for FrameList
+ * On first periodic QH, allocates memory for FrameList
* and enables periodic scheduling.
*
* @param hcd The HCD state structure for the DWC OTG controller.
*
* @return 0 if successful, negative error code otherwise.
*/
-int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
+int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
{
int retval = 0;
if (qh->do_split) {
- DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
- return -1;
- }
+ DWC_ERROR
+ ("SPLIT Transfers are not supported in Descriptor DMA.\n");
+ return -1;
+ }
retval = desc_list_alloc(qh);
return retval;
}
-/**
- * Frees descriptor list memory associated with the QH.
- * If QH is periodic and the last, frees FrameList memory
- * and disables periodic scheduling.
+/**
+ * Frees descriptor list memory associated with the QH.
+ * If QH is periodic and the last, frees FrameList memory
+ * and disables periodic scheduling.
*
* @param hcd The HCD state structure for the DWC OTG controller.
* @param qh The QH to init.
*/
-void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
+void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
{
desc_list_free(qh);
- /*
- * Channel still assigned due to some reasons.
+ /*
+ * Channel still assigned due to some reasons.
* Seen on Isoc URB dequeue. Channel halted but no subsequent
* ChHalted interrupt to release the channel. Afterwards
* when it comes here from endpoint disable routine
}
}
-static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
+static uint8_t frame_to_desc_idx(dwc_otg_qh_t *qh, uint16_t frame_idx)
{
+ uint8_t ret;
+
if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
- /*
+ /*
* Descriptor set(8 descriptors) index
* which is 8-aligned.
*/
- return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
+ ret = (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
} else {
- return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
+ ret = frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
}
+
+ return ret;
}
-/*
- * Determine starting frame for Isochronous transfer.
- * Few frames skipped to prevent race condition with HC.
+/*
+ * Determine starting frame for Isochronous transfer.
+ * Few frames skipped to prevent race condition with HC.
*/
-static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
- uint8_t * skip_frames)
+static uint8_t calc_starting_frame(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh,
+ uint8_t *skip_frames)
{
uint16_t frame = 0;
hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
-
+
/* sched_frame is always frame number(not uFrame) both in FS and HS !! */
-
- /*
+
+ /*
* skip_frames is used to limit activated descriptors number
* to avoid the situation when HC services the last activated
* descriptor firstly.
* up to 11 uframes(16 in the code) may be skipped.
*/
if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
- /*
+ /*
* Consider uframe counter also, to start xfer asap.
* If half of the frame elapsed skip 2 frames otherwise
- * just 1 frame.
+ * just 1 frame.
* Starting descriptor index must be 8-aligned, so
* if the current frame is near to complete the next one
* is skipped as well.
if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
*skip_frames = 2 * 8;
- frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
+ frame =
+ dwc_frame_num_inc(hcd->frame_number, *skip_frames);
} else {
*skip_frames = 1 * 8;
- frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
+ frame =
+ dwc_frame_num_inc(hcd->frame_number, *skip_frames);
}
frame = dwc_full_frame_num(frame);
} else {
- /*
+ /*
* Two frames are skipped for FS - the current and the next.
* But for descriptor programming, 1 frame(descriptor) is enough,
* see example above.
return frame;
}
-/*
+/*
* Calculate initial descriptor index for isochronous transfer
- * based on scheduled frame.
+ * based on scheduled frame.
*/
-static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
+static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
{
uint16_t frame = 0, fr_idx, fr_idx_tmp;
uint8_t skip_frames = 0;
- /*
+ /*
* With current ISOC processing algorithm the channel is being
* released when no more QTDs in the list(qh->ntd == 0).
- * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
+ * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
*
* So qh->channel != NULL branch is not used and just not removed from the
* source file. It is required for another possible approach which is,
- * do not disable and release the channel when ISOC session completed,
- * just move QH to inactive schedule until new QTD arrives.
+ * do not disable and release the channel when ISOC session completed,
+ * just move QH to inactive schedule until new QTD arrives.
* On new QTD, the QH moved back to 'ready' schedule,
* starting frame and therefore starting desc_index are recalculated.
* In this case channel is released only on ep_disable.
/* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
if (qh->channel) {
frame = calc_starting_frame(hcd, qh, &skip_frames);
- /*
+ /*
* Calculate initial descriptor index based on FrameList current bitmap
* and servicing period.
*/
#define MAX_ISOC_XFER_SIZE_HS 3072
#define DESCNUM_THRESHOLD 4
-static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
+static void init_isoc_dma_desc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh,
uint8_t skip_frames)
{
struct dwc_otg_hcd_iso_packet_desc *frame_desc;
DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
while ((qh->ntd < ntd_max)
- && (qtd->isoc_frame_index_last <
- qtd->urb->packet_count)) {
+ && (qtd->isoc_frame_index_last < qtd->urb->packet_count)) {
dma_desc = &qh->desc_list[idx];
- dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
+ dwc_memset(dma_desc, 0x00,
+ sizeof(dwc_otg_host_dma_desc_t));
- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+ frame_desc =
+ &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
if (frame_desc->length > max_xfer_size)
qh->n_bytes[idx] = max_xfer_size;
qtd->isoc_frame_index_last++;
#ifdef ISOC_URB_GIVEBACK_ASAP
- /*
- * Set IOC for each descriptor corresponding to the
+ /*
+ * Set IOC for each descriptor corresponding to the
* last frame of the URB.
*/
if (qtd->isoc_frame_index_last ==
qh->desc_list[idx].status.b_isoc.ioc = 1;
}
#else
- /*
- * Set IOC bit only for one descriptor.
+ /*
+ * Set IOC bit only for one descriptor.
* Always try to be ahead of HW processing,
* i.e. on IOC generation driver activates next descriptors but
* core continues to process descriptors followed the one with IOC set.
*/
if (n_desc > DESCNUM_THRESHOLD) {
- /*
- * Move IOC "up". Required even if there is only one QTD
+ /*
+ * Move IOC "up". Required even if there is only one QTD
* in the list, cause QTDs migth continue to be queued,
* but during the activation it was only one queued.
- * Actually more than one QTD might be in the list if this function called
+ * Actually more than one QTD might be in the list if this function called
* from XferCompletion - QTDs was queued during HW processing of the previous
* descriptor chunk.
*/
- idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
+ idx =
+ dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
+ qh->dev_speed);
} else {
- /*
+ /*
* Set the IOC for the latest descriptor
* if either number of descriptor is not greather than threshold
* or no more new descriptors activated.
#endif
}
-static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
+static void init_non_isoc_dma_desc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
{
dwc_hc_t *hc;
hc = qh->channel;
- /*
- * Start with hc->xfer_buff initialized in
+ /*
+ * Start with hc->xfer_buff initialized in
* assign_and_init_hc(), then if SG transfer consists of multiple URBs,
* this pointer re-assigned to the buffer of the currently processed QTD.
* For non-SG request there is always one QTD active.
if (n_desc) {
/* SG request - more than 1 QTDs */
- hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
- hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
+ hc->xfer_buff =
+ (uint8_t *) qtd->urb->dma + qtd->urb->actual_length;
+ hc->xfer_len =
+ qtd->urb->length - qtd->urb->actual_length;
}
qtd->n_desc = 0;
if (hc->ep_is_in) {
if (len > 0) {
- num_packets = (len + hc->max_packet - 1) / hc->max_packet;
+ num_packets =
+ (len + hc->max_packet -
+ 1) / hc->max_packet;
} else {
/* Need 1 packet for transfer length of 0. */
num_packets = 1;
}
/* Always program an integral # of max packets for IN transfers. */
- len = num_packets * hc->max_packet;
+ len = num_packets*hc->max_packet;
}
dma_desc->status.b.n_bytes = len;
dma_desc->buf =
((unsigned long)hc->xfer_buff & 0xffffffff);
- /*
- * Last descriptor(or single) of IN transfer
+ /*
+ * Last descriptor(or single) of IN transfer
* with actual size less than MaxPacket.
*/
if (len > hc->xfer_len) {
qtd->n_desc++;
n_desc++;
- }
- while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
-
+ } while ((hc->xfer_len > 0)
+ && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
qtd->in_process = 1;
}
}
-/**
+/**
* For Control and Bulk endpoints initializes descriptor list
* and starts the transfer.
*
* then updates FrameList, marking appropriate entries as active.
* In case of Isochronous, the starting descriptor index is calculated based
* on the scheduled frame, but only on the first transfer descriptor within a session.
- * Then starts the transfer via enabling the channel.
- * For Isochronous endpoint the channel is not halted on XferComplete
+ * Then starts the transfer via enabling the channel.
+ * For Isochronous endpoint the channel is not halted on XferComplete
* interrupt so remains assigned to the endpoint(QH) until session is done.
*
* @param hcd The HCD state structure for the DWC OTG controller.
*
* @return 0 if successful, negative error code otherwise.
*/
-void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
+void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
{
/* Channel is already assigned */
dwc_hc_t *hc = qh->channel;
update_frame_list(hcd, qh, 1);
- /*
+ /*
* Always set to max, instead of actual size.
- * Otherwise ntd will be changed with
+ * Otherwise ntd will be changed with
* channel being enabled. Not recommended.
*
*/
}
}
-static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
- dwc_hc_t * hc,
- dwc_otg_hc_regs_t * hc_regs,
+static void complete_isoc_xfer_ddma(dwc_otg_hcd_t *hcd,
+ dwc_hc_t *hc,
+ dwc_otg_hc_regs_t *hc_regs,
dwc_otg_halt_status_e halt_status)
{
struct dwc_otg_hcd_iso_packet_desc *frame_desc;
idx = qh->td_first;
if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
- DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
+ DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
+ qtd_list_entry)
qtd->in_process = 0;
return;
} else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
(halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
- /*
+ /*
* Channel is halted in these error cases.
* Considered as serious issues.
- * Complete all URBs marking all frames as failed,
+ * Complete all URBs marking all frames as failed,
* irrespective whether some of the descriptors(frames) succeeded or no.
* Pass error code to completion routine as well, to
* update urb->status, some of class drivers might use it to stop
int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
? (-DWC_E_IO)
: (-DWC_E_OVERFLOW);
-
- DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
+
+ DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
+ qtd_list_entry) {
for (idx = 0; idx < qtd->urb->packet_count; idx++) {
frame_desc = &qtd->urb->iso_descs[idx];
frame_desc->status = err;
return;
}
- DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
+ DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
+ qtd_list_entry) {
if (!qtd->in_process)
break;
do {
dma_desc = &qh->desc_list[idx];
-
- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
- remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
+
+ frame_desc =
+ &qtd->urb->iso_descs[qtd->isoc_frame_index];
+ remain =
+ hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
- /*
- * XactError or, unable to complete all the transactions
- * in the scheduled micro-frame/frame,
+ /*
+ * XactError or, unable to complete all the transactions
+ * in the scheduled micro-frame/frame,
* both indicated by DMA_DESC_STS_PKTERR.
*/
qtd->urb->error_count++;
- frame_desc->actual_length = qh->n_bytes[idx] - remain;
+ frame_desc->actual_length =
+ qh->n_bytes[idx] - remain;
frame_desc->status = -DWC_E_PROTOCOL;
} else {
/* Success */
-
- frame_desc->actual_length = qh->n_bytes[idx] - remain;
+
+ frame_desc->actual_length =
+ qh->n_bytes[idx] - remain;
frame_desc->status = 0;
}
* The individual frame_desc status are used instead.
*/
- hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
+ hcd->fops->complete(hcd, qtd->urb->priv,
+ qtd->urb, 0);
dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
- /*
- * This check is necessary because urb_dequeue can be called
+ /*
+ * This check is necessary because urb_dequeue can be called
* from urb complete callback(sound driver example).
* All pending URBs are dequeued there, so no need for
* further processing.
*/
- if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
+ if (hc->halt_status ==
+ DWC_OTG_HC_XFER_URB_DEQUEUE) {
return;
}
/* Stop if IOC requested descriptor reached */
if (dma_desc->status.b_isoc.ioc) {
- idx = desclist_idx_inc(idx, qh->interval, hc->speed);
+ idx =
+ desclist_idx_inc(idx, qh->interval,
+ hc->speed);
goto stop_scan;
}
if (urb_compl)
break;
- }
- while (idx != qh->td_first);
+ } while (idx != qh->td_first);
}
stop_scan:
qh->td_first = idx;
}
-uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
- dwc_hc_t * hc,
- dwc_otg_qtd_t * qtd,
- dwc_otg_host_dma_desc_t * dma_desc,
+uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t *hcd,
+ dwc_hc_t *hc,
+ dwc_otg_qtd_t *qtd,
+ dwc_otg_host_dma_desc_t *dma_desc,
dwc_otg_halt_status_e halt_status,
- uint32_t n_bytes, uint8_t * xfer_done)
+ uint32_t n_bytes, uint8_t *xfer_done)
{
uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
case DWC_OTG_HC_XFER_XACT_ERR:
urb->status = -DWC_E_PROTOCOL;
break;
- default:
- DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
- halt_status);
+ default:
+ DWC_ERROR
+ ("%s: Unhandled descriptor error status (%d)\n",
+ __func__, halt_status);
break;
}
return 1;
if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
urb->actual_length += n_bytes - remain;
if (remain || urb->actual_length == urb->length) {
- /*
+ /*
* For Control Data stage do not set urb->status=0 to prevent
* URB callback. Set it when Status phase done. See below.
*/
return 0;
}
-static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
- dwc_hc_t * hc,
- dwc_otg_hc_regs_t * hc_regs,
+static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t *hcd,
+ dwc_hc_t *hc,
+ dwc_otg_hc_regs_t *hc_regs,
dwc_otg_halt_status_e halt_status)
{
dwc_otg_hcd_urb_t *urb = NULL;
qh = hc->qh;
if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
- DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
+ DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list,
+ qtd_list_entry) {
qtd->in_process = 0;
}
return;
} else if (qh->ep_type == UE_CONTROL) {
if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
if (urb->length > 0) {
- qtd->control_phase = DWC_OTG_CONTROL_DATA;
+ qtd->control_phase =
+ DWC_OTG_CONTROL_DATA;
} else {
- qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ qtd->control_phase =
+ DWC_OTG_CONTROL_STATUS;
}
- DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
- } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
+ DWC_DEBUGPL(DBG_HCDV,
+ " Control setup transaction done\n");
+ } else if (qtd->control_phase ==
+ DWC_OTG_CONTROL_DATA) {
if (xfer_done) {
- qtd->control_phase = DWC_OTG_CONTROL_STATUS;
- DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
+ qtd->control_phase =
+ DWC_OTG_CONTROL_STATUS;
+ DWC_DEBUGPL(DBG_HCDV,
+ " Control data transfer done\n");
} else if (i + 1 == qtd->n_desc) {
- /*
+ /*
* Last descriptor for Control data stage which is
* not completed yet.
*/
- dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ dwc_otg_hcd_save_data_toggle(hc,
+ hc_regs,
+ qtd);
}
}
}
stop_scan:
if (qh->ep_type != UE_CONTROL) {
- /*
+ /*
* Resetting the data toggle for bulk
- * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
+ * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
*/
if (halt_status == DWC_OTG_HC_XFER_STALL)
qh->data_toggle = DWC_OTG_HC_PID_DATA0;
* Scans the descriptor list, updates URB's status and
* calls completion routine for the URB if it's done.
* Releases the channel to be used by other transfers.
- * In case of Isochronous endpoint the channel is not halted until
+ * In case of Isochronous endpoint the channel is not halted until
* the end of the session, i.e. QTD list is empty.
* If periodic channel released the FrameList is updated accordingly.
*
* @param hcd The HCD state structure for the DWC OTG controller.
* @param hc Host channel, the transfer is completed on.
* @param hc_regs Host channel registers.
- * @param halt_status Reason the channel is being halted,
+ * @param halt_status Reason the channel is being halted,
* or just XferComplete for isochronous transfer
*/
-void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
- dwc_hc_t * hc,
- dwc_otg_hc_regs_t * hc_regs,
+void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t *hcd,
+ dwc_hc_t *hc,
+ dwc_otg_hc_regs_t *hc_regs,
dwc_otg_halt_status_e halt_status)
{
uint8_t continue_isoc_xfer = 0;
}
/** @todo Consider the case when period exceeds FrameList size.
- * Frame Rollover interrupt should be used.
+ * Frame Rollover interrupt should be used.
*/
} else {
/* Scan descriptor list to complete the URB(s), then release the channel */