DWC_FREE(dwc_otg_urb);
+ usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(hcd), urb);
+
DWC_SPINUNLOCK(hcd->lock);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb);
dwc_otg_device_t *otg_dev = dwc_get_device_platform_data(_dev);
int retval = 0;
int irq;
- static u64 usb_dmamask = 0xffffffffUL;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
- /* Set device flags indicating whether the HCD supports DMA. */
- if (dwc_otg_is_dma_enable(otg_dev->core_if)) {
-
- _dev->dev.dma_mask = &usb_dmamask;
- _dev->dev.coherent_dma_mask = ~0;
- } else {
-
- _dev->dev.dma_mask = (void *)0;
- _dev->dev.coherent_dma_mask = 0;
- }
-
/*
* Allocate memory for the base HCD plus the DWC OTG HCD.
* Initialize the base HCD.
dwc_otg_device_t *otg_dev = dwc_get_device_platform_data(_dev);
int retval = 0;
int irq;
- static u64 usb_dmamask = 0xffffffffUL;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
- /* Set device flags indicating whether the HCD supports DMA. */
- if (dwc_otg_is_dma_enable(otg_dev->core_if)) {
-
- _dev->dev.dma_mask = &usb_dmamask;
- _dev->dev.coherent_dma_mask = ~0;
- } else {
-
- _dev->dev.dma_mask = (void *)0;
- _dev->dev.coherent_dma_mask = 0;
- }
-
/*
* Allocate memory for the base HCD plus the DWC OTG HCD.
* Initialize the base HCD.
int alloc_bandwidth = 0;
uint8_t ep_type = 0;
uint32_t flags = 0;
+ dwc_irqflags_t irq_flags;
void *buf;
#ifdef DEBUG
}
urb->hcpriv = dwc_otg_urb;
+
+ DWC_SPINLOCK_IRQSAVE(dwc_otg_hcd->lock, &irq_flags);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock, irq_flags);
+ if (retval)
+ goto fail1;
+
retval = dwc_otg_hcd_urb_enqueue(dwc_otg_hcd, dwc_otg_urb, &ep->hcpriv,
mem_flags == GFP_ATOMIC ? 1 : 0);
- if (!retval) {
- if (alloc_bandwidth) {
- allocate_bus_bandwidth(hcd,
- dwc_otg_hcd_get_ep_bandwidth
- (dwc_otg_hcd, ep->hcpriv), urb);
- }
- } else {
- if (retval == -DWC_E_NO_DEVICE) {
+ if (retval) {
+ if (retval == -DWC_E_NO_DEVICE)
retval = -ENODEV;
- }
+ goto fail2;
+ }
+
+ if (alloc_bandwidth) {
+ allocate_bus_bandwidth(hcd, dwc_otg_hcd_get_ep_bandwidth
+ (dwc_otg_hcd, ep->hcpriv), urb);
}
+ return 0;
+fail2:
+ /* */
+ DWC_SPINLOCK_IRQSAVE(dwc_otg_hcd->lock, &irq_flags);
+ dwc_otg_urb->priv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock, irq_flags);
+fail1:
+ urb->hcpriv = NULL;
+ DWC_FREE(dwc_otg_urb);
return retval;
}
static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
#endif
{
+ int rc;
dwc_irqflags_t flags;
dwc_otg_hcd_t *dwc_otg_hcd;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
}
#endif
- if (((uint32_t) urb & 0xf0000000) == 0) {
+ if (!urb) {
DWC_PRINTF("%s error: urb is %p!!!\n", __func__, urb);
return 0;
}
DWC_SPINLOCK_IRQSAVE(dwc_otg_hcd->lock, &flags);
-
- if (((uint32_t) urb->hcpriv & 0xf0000000) == 0) {
- DWC_PRINTF("%s error: urb->hcpriv %p urb %p, count %d!!!\n",
- __func__, urb->hcpriv, urb,
- atomic_read(&urb->use_count));
- if ((atomic_read(&urb->use_count)) == 1)
- goto out2;
- else {
- DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock, flags);
- return 0;
- }
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc) {
+ DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock, flags);
+ return rc;
}
dwc_otg_hcd_urb_dequeue(dwc_otg_hcd, urb->hcpriv);
-
-out2:
DWC_FREE(urb->hcpriv);
urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock, flags);
/* Higher layer software sets URB status. */