core_if->hcd_cb->suspend( core_if->hcd_cb->p, 0);
}
}
- udelay(1);
+ udelay(3);
+ #ifndef CONFIG_DWC_REMOTE_WAKEUP
clk_disable(core_if->otg_dev->phyclk);
clk_disable(core_if->otg_dev->ahbclk);
+ #endif
//power off
return 0;
}
DWC_PRINT("%s, usb device mode\n", __func__);
return 0;
}
+ #ifndef CONFIG_DWC_REMOTE_WAKEUP
clk_enable(core_if->otg_dev->phyclk);
clk_enable(core_if->otg_dev->ahbclk);
-
+ #endif
//partial power-down
//power on
pcgcctl.d32 = dwc_read_reg32(core_if->pcgcctl);;
hprt0.b.prtconndet = 1;
dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
- hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+ //hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
//DWC_PRINT("%s, HPRT0:0x%x\n",hcd->self.bus_name,hprt0.d32);
- gintmsk.b.portintr = 1;
- dwc_write_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
mdelay(10);
}
else
core_if->hcd_cb->suspend( core_if->hcd_cb->p, 1);
}
}
+ gintmsk.b.portintr = 1;
+ dwc_write_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
return 0;
}
dwc_otg_qh_t *qh;
struct list_head *qtd_item;
dwc_otg_qtd_t *qtd;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
list_for_each(qh_item, _qh_list) {
qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
qtd_item = qh->qtd_list.next) {
qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
if (qtd->urb != NULL) {
+ urb = qtd->urb;
+ ep = qtd->urb->ep;
+ // 20110415 yk
+ // urb will be re entry to ep->urb_list if use ETIMEOUT
dwc_otg_hcd_complete_urb(_hcd, qtd->urb,
- -ETIMEDOUT);
+ -ETIMEDOUT);//ESHUTDOWN
+
+ //if(!list_empty(&ep->urb_list))
+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
+ __func__, urb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "IN" : "OUT", urb->unlinked);
+
+ if (!urb->unlinked)
+ urb->unlinked = -ESHUTDOWN;
+
}
dwc_otg_hcd_qtd_remove_and_free(qtd);
}
}
}
-#if 0
+
/**
* Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
* and periodic schedules. The QTD associated with each URB is removed from
kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned);
kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued);
}
-#endif
+
/**
* HCD Callback function for disconnect of the HCD.
*
static int32_t dwc_otg_hcd_disconnect_cb( void *_p )
{
gintsts_data_t intr;
- dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_p);
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_p);
- //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
- /*
- * Set status flags for the hub driver.
- */
-// DWC_PRINT("dwc_otg_hcd_disconnect_cb");
- dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ /*
+ * Set status flags for the hub driver.
+ */
+ dwc_otg_hcd->flags.b.port_connect_status_change = 1;
dwc_otg_hcd->flags.b.port_connect_status = 0;
- /*
- * Shutdown any transfers in process by clearing the Tx FIFO Empty
- * interrupt mask and status bits and disabling subsequent host
- * channel interrupts.
- */
- intr.d32 = 0;
- intr.b.nptxfempty = 1;
- intr.b.ptxfempty = 1;
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ intr.d32 = 0;
+ intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1;
intr.b.hcintr = 1;
- dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0);
- dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0);
+ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0);
+ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0);
del_timers(dwc_otg_hcd);
}
/* Respond with an error status to all URBs in the schedule. */
- // yk@20101227 handle kernel panic bug when disconnect
- //kill_all_urbs(dwc_otg_hcd);
+ kill_all_urbs(dwc_otg_hcd);
if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
/* Clean up any host channels that were in use. */
}
}
- /* A disconnect will end the session so the B-Device is no
- * longer a B-host. */
- ((struct usb_hcd *)_p)->self.is_b_host = 0;
- return 1;
+ /* A disconnect will end the session so the B-Device is no
+ * longer a B-host. */
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ return 1;
}
/**
dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t*)data;
dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
hprt0_data_t hprt0;
-
+
DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
hprt0.d32 = dwc_otg_read_hprt0 (core_if);
dev->coherent_dma_mask = 0;
}
#endif
- DWC_PRINT("dwc_otg_hcd_init everest\n");
// g_dbg_lvl = 0xff;
/*
goto error3;
}
- DWC_PRINT("%s end,everest\n",__func__);
// DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
// dev->bus_id, hcd->self.busnum);
dev->coherent_dma_mask = 0;
}
#endif
- DWC_PRINT("%s everest\n",__func__);
// g_dbg_lvl = 0xff;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
goto error3;
}
- DWC_PRINT("%s end,everest\n",__func__);
// DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
// dev->bus_id, hcd->self.busnum);
dev->coherent_dma_mask = 0;
}
#endif
- DWC_PRINT("%s everest\n",__func__);
// g_dbg_lvl = 0xff;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
goto error3;
}
- DWC_PRINT("%s end,everest\n",__func__);
return 0;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
spin_lock_irqsave(&dwc_otg_hcd->global_lock, flags);
- DWC_PRINT("dwc_otg_hcd_start! everest\n");
bus = hcd_to_bus(_hcd);
_hcd->state = HC_STATE_RUNNING;
dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
dwc_otg_qtd_t * qtd;
unsigned long flags;
-
+#if 0
retval = usb_hcd_link_urb_to_ep(_hcd, _urb);
if (retval)
{
DWC_PRINT("%s, usb_hcd_link_urb_to_ep error\n", __func__);
return retval;
}
+#endif
spin_lock_irqsave(&dwc_otg_hcd->global_lock, flags);
#if 1
/*
dwc_otg_qtd_t * urb_qtd;
dwc_otg_qh_t * qh;
struct usb_host_endpoint *_ep = _urb->ep;//dwc_urb_to_endpoint(_urb);
- int retval;
+ //int retval;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
urb_qtd = (dwc_otg_qtd_t *) _urb->hcpriv;
}
qh = (dwc_otg_qh_t *) _ep->hcpriv;
spin_lock_irqsave(&dwc_otg_hcd->global_lock, flags);
+ #if 0
retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status);
if (retval) {
spin_unlock_irqrestore(&dwc_otg_hcd->global_lock, flags);
return retval;
}
+ #endif
if(urb_qtd == NULL)
{
DWC_PRINT("%s,urb_qtd is null\n",__func__);
urb_qtd_null:
spin_unlock_irqrestore(&dwc_otg_hcd->global_lock, flags);
_urb->hcpriv = NULL;
- usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ //usb_hcd_unlink_urb_from_ep(_hcd, _urb);
/* Higher layer software sets URB status. */
usb_hcd_giveback_urb(_hcd, _urb, _status);
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
port_status |= (1 << USB_PORT_FEAT_POWER);
if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
- port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ port_status |= USB_PORT_STAT_HIGH_SPEED;
else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
- port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+ port_status |= USB_PORT_STAT_LOW_SPEED;
if (hprt0.b.prttstctl)
port_status |= (1 << USB_PORT_FEAT_TEST);
hprt0.b.prtrst = 1;
dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
}
+ spin_unlock_irqrestore(&dwc_otg_hcd->global_lock, flags);
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
- MDELAY (60);
+ // kever @rk 20110712
+ // can not use mdelay(60) while irq disable
+ //MDELAY (60);
+ msleep(60);
+ spin_lock_irqsave(&dwc_otg_hcd->global_lock, flags);
hprt0.b.prtrst = 0;
dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
break;
* periodic assigned schedule.
*/
qh_ptr = qh_ptr->next;
- list_move(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
ret_val = DWC_OTG_TRANSACTION_PERIODIC;
}
* non-periodic active schedule.
*/
qh_ptr = qh_ptr->next;
- list_move(&qh->qh_list_entry, &_hcd->non_periodic_sched_active);
+ list_move_tail(&qh->qh_list_entry, &_hcd->non_periodic_sched_active);
if (ret_val == DWC_OTG_TRANSACTION_NONE) {
ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
* Move the QH from the periodic assigned schedule to
* the periodic queued schedule.
*/
- list_move(&qh->qh_list_entry, &_hcd->periodic_sched_queued);
+ list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_queued);
/* done queuing high bandwidth */
_hcd->core_if->queuing_high_bandwidth = 0;
_urb->hcpriv = NULL;
- usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb);
+ //usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb);
spin_unlock(&_hcd->lock);
usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, _status);
spin_lock(&_hcd->lock);
}
-
+void dwc_otg_clear_halt(struct urb *_urb)
+{
+ struct dwc_otg_qh *_qh;
+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_urb);
+ if((ep)&&(ep->hcpriv))
+ {
+ _qh = (dwc_otg_qh_t *) ep->hcpriv;
+ _qh->data_toggle = 0;
+ }
+}
/*
* Returns the Queue Head for an URB.
*/