2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
30 #include <linux/dma-mapping.h>
33 #include "xhci-trace.h"
35 #define DRIVER_AUTHOR "Sarah Sharp"
36 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
40 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
41 static int link_quirk;
42 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
45 static unsigned int quirks;
46 module_param(quirks, uint, S_IRUGO);
47 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
49 /* TODO: copied from ehci-hcd.c - can this be refactored? */
51 * xhci_handshake - spin reading hc until handshake completes or fails
52 * @ptr: address of hc register to be read
53 * @mask: bits to look at in result of read
54 * @done: value of those bits when handshake succeeds
55 * @usec: timeout in microseconds
57 * Returns negative errno, or zero on success
59 * Success happens when the "mask" bits have the specified value (hardware
60 * handshake done). There are two failure modes: "usec" have passed (major
61 * hardware flakeout), or the register reads as all-ones (hardware removed).
63 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
69 if (result == ~(u32)0) /* card removed */
81 * Disable interrupts and begin the xHCI halting process.
83 void xhci_quiesce(struct xhci_hcd *xhci)
90 halted = readl(&xhci->op_regs->status) & STS_HALT;
94 cmd = readl(&xhci->op_regs->command);
96 writel(cmd, &xhci->op_regs->command);
100 * Force HC into halt state.
102 * Disable any IRQs and clear the run/stop bit.
103 * HC will complete any current and actively pipelined transactions, and
104 * should halt within 16 ms of the run/stop bit being cleared.
105 * Read HC Halted bit in the status register to see when the HC is finished.
107 int xhci_halt(struct xhci_hcd *xhci)
110 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
113 ret = xhci_handshake(&xhci->op_regs->status,
114 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
116 xhci->xhc_state |= XHCI_STATE_HALTED;
117 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
119 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
125 * Set the run bit and wait for the host to be running.
127 static int xhci_start(struct xhci_hcd *xhci)
132 temp = readl(&xhci->op_regs->command);
134 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
136 writel(temp, &xhci->op_regs->command);
139 * Wait for the HCHalted Status bit to be 0 to indicate the host is
142 ret = xhci_handshake(&xhci->op_regs->status,
143 STS_HALT, 0, XHCI_MAX_HALT_USEC);
144 if (ret == -ETIMEDOUT)
145 xhci_err(xhci, "Host took too long to start, "
146 "waited %u microseconds.\n",
149 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
157 * This resets pipelines, timers, counters, state machines, etc.
158 * Transactions will be terminated immediately, and operational registers
159 * will be set to their defaults.
161 int xhci_reset(struct xhci_hcd *xhci)
167 state = readl(&xhci->op_regs->status);
168 if ((state & STS_HALT) == 0) {
169 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
173 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
174 command = readl(&xhci->op_regs->command);
175 command |= CMD_RESET;
176 writel(command, &xhci->op_regs->command);
178 /* Existing Intel xHCI controllers require a delay of 1 mS,
179 * after setting the CMD_RESET bit, and before accessing any
180 * HC registers. This allows the HC to complete the
181 * reset operation and be ready for HC register access.
182 * Without this delay, the subsequent HC register access,
183 * may result in a system hang very rarely.
185 if (xhci->quirks & XHCI_INTEL_HOST)
188 ret = xhci_handshake(&xhci->op_regs->command,
189 CMD_RESET, 0, 10 * 1000 * 1000);
193 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
194 "Wait for controller to be ready for doorbell rings");
196 * xHCI cannot write to any doorbells or operational registers other
197 * than status until the "Controller Not Ready" flag is cleared.
199 ret = xhci_handshake(&xhci->op_regs->status,
200 STS_CNR, 0, 10 * 1000 * 1000);
202 for (i = 0; i < 2; ++i) {
203 xhci->bus_state[i].port_c_suspend = 0;
204 xhci->bus_state[i].suspended_ports = 0;
205 xhci->bus_state[i].resuming_ports = 0;
212 static int xhci_free_msi(struct xhci_hcd *xhci)
216 if (!xhci->msix_entries)
219 for (i = 0; i < xhci->msix_count; i++)
220 if (xhci->msix_entries[i].vector)
221 free_irq(xhci->msix_entries[i].vector,
229 static int xhci_setup_msi(struct xhci_hcd *xhci)
232 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
234 ret = pci_enable_msi(pdev);
236 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
237 "failed to allocate MSI entry");
241 ret = request_irq(pdev->irq, xhci_msi_irq,
242 0, "xhci_hcd", xhci_to_hcd(xhci));
244 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
245 "disable MSI interrupt");
246 pci_disable_msi(pdev);
254 * free all IRQs request
256 static void xhci_free_irq(struct xhci_hcd *xhci)
258 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
261 /* return if using legacy interrupt */
262 if (xhci_to_hcd(xhci)->irq > 0)
265 ret = xhci_free_msi(xhci);
269 free_irq(pdev->irq, xhci_to_hcd(xhci));
277 static int xhci_setup_msix(struct xhci_hcd *xhci)
280 struct usb_hcd *hcd = xhci_to_hcd(xhci);
281 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
284 * calculate number of msi-x vectors supported.
285 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
286 * with max number of interrupters based on the xhci HCSPARAMS1.
287 * - num_online_cpus: maximum msi-x vectors per CPUs core.
288 * Add additional 1 vector to ensure always available interrupt.
290 xhci->msix_count = min(num_online_cpus() + 1,
291 HCS_MAX_INTRS(xhci->hcs_params1));
294 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
296 if (!xhci->msix_entries) {
297 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
301 for (i = 0; i < xhci->msix_count; i++) {
302 xhci->msix_entries[i].entry = i;
303 xhci->msix_entries[i].vector = 0;
306 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
308 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
309 "Failed to enable MSI-X");
313 for (i = 0; i < xhci->msix_count; i++) {
314 ret = request_irq(xhci->msix_entries[i].vector,
316 0, "xhci_hcd", xhci_to_hcd(xhci));
321 hcd->msix_enabled = 1;
325 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
327 pci_disable_msix(pdev);
329 kfree(xhci->msix_entries);
330 xhci->msix_entries = NULL;
334 /* Free any IRQs and disable MSI-X */
335 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
337 struct usb_hcd *hcd = xhci_to_hcd(xhci);
338 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
340 if (xhci->quirks & XHCI_PLAT)
345 if (xhci->msix_entries) {
346 pci_disable_msix(pdev);
347 kfree(xhci->msix_entries);
348 xhci->msix_entries = NULL;
350 pci_disable_msi(pdev);
353 hcd->msix_enabled = 0;
357 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
361 if (xhci->msix_entries) {
362 for (i = 0; i < xhci->msix_count; i++)
363 synchronize_irq(xhci->msix_entries[i].vector);
367 static int xhci_try_enable_msi(struct usb_hcd *hcd)
369 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
370 struct pci_dev *pdev;
373 /* The xhci platform device has set up IRQs through usb_add_hcd. */
374 if (xhci->quirks & XHCI_PLAT)
377 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
379 * Some Fresco Logic host controllers advertise MSI, but fail to
380 * generate interrupts. Don't even try to enable MSI.
382 if (xhci->quirks & XHCI_BROKEN_MSI)
385 /* unregister the legacy interrupt */
387 free_irq(hcd->irq, hcd);
390 ret = xhci_setup_msix(xhci);
392 /* fall back to msi*/
393 ret = xhci_setup_msi(xhci);
396 /* hcd->irq is 0, we have MSI */
400 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
405 if (!strlen(hcd->irq_descr))
406 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
407 hcd->driver->description, hcd->self.busnum);
409 /* fall back to legacy interrupt*/
410 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
411 hcd->irq_descr, hcd);
413 xhci_err(xhci, "request interrupt %d failed\n",
417 hcd->irq = pdev->irq;
423 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
428 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
432 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
438 static void compliance_mode_recovery(unsigned long arg)
440 struct xhci_hcd *xhci;
445 xhci = (struct xhci_hcd *)arg;
447 for (i = 0; i < xhci->num_usb3_ports; i++) {
448 temp = readl(xhci->usb3_ports[i]);
449 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
451 * Compliance Mode Detected. Letting USB Core
452 * handle the Warm Reset
454 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
455 "Compliance mode detected->port %d",
457 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
458 "Attempting compliance mode recovery");
459 hcd = xhci->shared_hcd;
461 if (hcd->state == HC_STATE_SUSPENDED)
462 usb_hcd_resume_root_hub(hcd);
464 usb_hcd_poll_rh_status(hcd);
468 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
469 mod_timer(&xhci->comp_mode_recovery_timer,
470 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
474 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
475 * that causes ports behind that hardware to enter compliance mode sometimes.
476 * The quirk creates a timer that polls every 2 seconds the link state of
477 * each host controller's port and recovers it by issuing a Warm reset
478 * if Compliance mode is detected, otherwise the port will become "dead" (no
479 * device connections or disconnections will be detected anymore). Becasue no
480 * status event is generated when entering compliance mode (per xhci spec),
481 * this quirk is needed on systems that have the failing hardware installed.
483 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
485 xhci->port_status_u0 = 0;
486 setup_timer(&xhci->comp_mode_recovery_timer,
487 compliance_mode_recovery, (unsigned long)xhci);
488 xhci->comp_mode_recovery_timer.expires = jiffies +
489 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
491 set_timer_slack(&xhci->comp_mode_recovery_timer,
492 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
493 add_timer(&xhci->comp_mode_recovery_timer);
494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
495 "Compliance mode recovery timer initialized");
499 * This function identifies the systems that have installed the SN65LVPE502CP
500 * USB3.0 re-driver and that need the Compliance Mode Quirk.
502 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
504 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
506 const char *dmi_product_name, *dmi_sys_vendor;
508 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
509 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
510 if (!dmi_product_name || !dmi_sys_vendor)
513 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
516 if (strstr(dmi_product_name, "Z420") ||
517 strstr(dmi_product_name, "Z620") ||
518 strstr(dmi_product_name, "Z820") ||
519 strstr(dmi_product_name, "Z1 Workstation"))
525 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
527 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
532 * Initialize memory for HCD and xHC (one-time init).
534 * Program the PAGESIZE register, initialize the device context array, create
535 * device contexts (?), set up a command ring segment (or two?), create event
536 * ring (one for now).
538 int xhci_init(struct usb_hcd *hcd)
540 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
543 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
544 spin_lock_init(&xhci->lock);
545 if (xhci->hci_version == 0x95 && link_quirk) {
546 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
547 "QUIRK: Not clearing Link TRB chain bits.");
548 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
550 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
551 "xHCI doesn't need link TRB QUIRK");
553 retval = xhci_mem_init(xhci, GFP_KERNEL);
554 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
556 /* Initializing Compliance Mode Recovery Data If Needed */
557 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
558 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
559 compliance_mode_recovery_timer_init(xhci);
565 /*-------------------------------------------------------------------------*/
568 static int xhci_run_finished(struct xhci_hcd *xhci)
570 if (xhci_start(xhci)) {
574 xhci->shared_hcd->state = HC_STATE_RUNNING;
575 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
577 if (xhci->quirks & XHCI_NEC_HOST)
578 xhci_ring_cmd_db(xhci);
580 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
581 "Finished xhci_run for USB3 roothub");
586 * Start the HC after it was halted.
588 * This function is called by the USB core when the HC driver is added.
589 * Its opposite is xhci_stop().
591 * xhci_init() must be called once before this function can be called.
592 * Reset the HC, enable device slot contexts, program DCBAAP, and
593 * set command ring pointer and event ring pointer.
595 * Setup MSI-X vectors and enable interrupts.
597 int xhci_run(struct usb_hcd *hcd)
602 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
604 /* Start the xHCI host controller running only after the USB 2.0 roothub
608 hcd->uses_new_polling = 1;
609 if (!usb_hcd_is_primary_hcd(hcd))
610 return xhci_run_finished(xhci);
612 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
614 ret = xhci_try_enable_msi(hcd);
618 xhci_dbg(xhci, "Command ring memory map follows:\n");
619 xhci_debug_ring(xhci, xhci->cmd_ring);
620 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
621 xhci_dbg_cmd_ptrs(xhci);
623 xhci_dbg(xhci, "ERST memory map follows:\n");
624 xhci_dbg_erst(xhci, &xhci->erst);
625 xhci_dbg(xhci, "Event ring:\n");
626 xhci_debug_ring(xhci, xhci->event_ring);
627 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
628 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
629 temp_64 &= ~ERST_PTR_MASK;
630 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
631 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
633 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
634 "// Set the interrupt modulation register");
635 temp = readl(&xhci->ir_set->irq_control);
636 temp &= ~ER_IRQ_INTERVAL_MASK;
638 writel(temp, &xhci->ir_set->irq_control);
640 /* Set the HCD state before we enable the irqs */
641 temp = readl(&xhci->op_regs->command);
643 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
644 "// Enable interrupts, cmd = 0x%x.", temp);
645 writel(temp, &xhci->op_regs->command);
647 temp = readl(&xhci->ir_set->irq_pending);
648 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
649 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
650 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
651 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
652 xhci_print_ir_set(xhci, 0);
654 if (xhci->quirks & XHCI_NEC_HOST) {
655 struct xhci_command *command;
656 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
659 xhci_queue_vendor_command(xhci, command, 0, 0, 0,
660 TRB_TYPE(TRB_NEC_GET_FW));
662 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
663 "Finished xhci_run for USB2 roothub");
666 EXPORT_SYMBOL_GPL(xhci_run);
671 * This function is called by the USB core when the HC driver is removed.
672 * Its opposite is xhci_run().
674 * Disable device contexts, disable IRQs, and quiesce the HC.
675 * Reset the HC, finish any completed transactions, and cleanup memory.
677 void xhci_stop(struct usb_hcd *hcd)
680 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
682 if (xhci->xhc_state & XHCI_STATE_HALTED)
685 mutex_lock(&xhci->mutex);
686 spin_lock_irq(&xhci->lock);
687 xhci->xhc_state |= XHCI_STATE_HALTED;
688 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
690 /* Make sure the xHC is halted for a USB3 roothub
691 * (xhci_stop() could be called as part of failed init).
695 spin_unlock_irq(&xhci->lock);
697 xhci_cleanup_msix(xhci);
699 /* Deleting Compliance Mode Recovery Timer */
700 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
701 (!(xhci_all_ports_seen_u0(xhci)))) {
702 del_timer_sync(&xhci->comp_mode_recovery_timer);
703 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
704 "%s: compliance mode recovery timer deleted",
708 if (xhci->quirks & XHCI_AMD_PLL_FIX)
711 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
712 "// Disabling event ring interrupts");
713 temp = readl(&xhci->op_regs->status);
714 writel(temp & ~STS_EINT, &xhci->op_regs->status);
715 temp = readl(&xhci->ir_set->irq_pending);
716 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
717 xhci_print_ir_set(xhci, 0);
719 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
720 xhci_mem_cleanup(xhci);
721 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
722 "xhci_stop completed - status = %x",
723 readl(&xhci->op_regs->status));
724 mutex_unlock(&xhci->mutex);
728 * Shutdown HC (not bus-specific)
730 * This is called when the machine is rebooting or halting. We assume that the
731 * machine will be powered off, and the HC's internal state will be reset.
732 * Don't bother to free memory.
734 * This will only ever be called with the main usb_hcd (the USB3 roothub).
736 void xhci_shutdown(struct usb_hcd *hcd)
738 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
740 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
741 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
743 spin_lock_irq(&xhci->lock);
745 /* Workaround for spurious wakeups at shutdown with HSW */
746 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
748 spin_unlock_irq(&xhci->lock);
750 xhci_cleanup_msix(xhci);
752 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
753 "xhci_shutdown completed - status = %x",
754 readl(&xhci->op_regs->status));
756 /* Yet another workaround for spurious wakeups at shutdown with HSW */
757 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
758 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
762 static void xhci_save_registers(struct xhci_hcd *xhci)
764 xhci->s3.command = readl(&xhci->op_regs->command);
765 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
766 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
767 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
768 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
769 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
770 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
771 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
772 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
775 static void xhci_restore_registers(struct xhci_hcd *xhci)
777 writel(xhci->s3.command, &xhci->op_regs->command);
778 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
779 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
780 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
781 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
782 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
783 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
784 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
785 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
788 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
792 /* step 2: initialize command ring buffer */
793 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
794 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
795 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
796 xhci->cmd_ring->dequeue) &
797 (u64) ~CMD_RING_RSVD_BITS) |
798 xhci->cmd_ring->cycle_state;
799 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
800 "// Setting command ring address to 0x%llx",
801 (long unsigned long) val_64);
802 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
806 * The whole command ring must be cleared to zero when we suspend the host.
808 * The host doesn't save the command ring pointer in the suspend well, so we
809 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
810 * aligned, because of the reserved bits in the command ring dequeue pointer
811 * register. Therefore, we can't just set the dequeue pointer back in the
812 * middle of the ring (TRBs are 16-byte aligned).
814 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
816 struct xhci_ring *ring;
817 struct xhci_segment *seg;
819 ring = xhci->cmd_ring;
823 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
824 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
825 cpu_to_le32(~TRB_CYCLE);
827 } while (seg != ring->deq_seg);
829 /* Reset the software enqueue and dequeue pointers */
830 ring->deq_seg = ring->first_seg;
831 ring->dequeue = ring->first_seg->trbs;
832 ring->enq_seg = ring->deq_seg;
833 ring->enqueue = ring->dequeue;
835 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
837 * Ring is now zeroed, so the HW should look for change of ownership
838 * when the cycle bit is set to 1.
840 ring->cycle_state = 1;
843 * Reset the hardware dequeue pointer.
844 * Yes, this will need to be re-written after resume, but we're paranoid
845 * and want to make sure the hardware doesn't access bogus memory
846 * because, say, the BIOS or an SMI started the host without changing
847 * the command ring pointers.
849 xhci_set_cmd_ring_deq(xhci);
852 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
855 __le32 __iomem **port_array;
859 spin_lock_irqsave(&xhci->lock, flags);
861 /* disble usb3 ports Wake bits*/
862 port_index = xhci->num_usb3_ports;
863 port_array = xhci->usb3_ports;
864 while (port_index--) {
865 t1 = readl(port_array[port_index]);
866 t1 = xhci_port_state_to_neutral(t1);
867 t2 = t1 & ~PORT_WAKE_BITS;
869 writel(t2, port_array[port_index]);
872 /* disble usb2 ports Wake bits*/
873 port_index = xhci->num_usb2_ports;
874 port_array = xhci->usb2_ports;
875 while (port_index--) {
876 t1 = readl(port_array[port_index]);
877 t1 = xhci_port_state_to_neutral(t1);
878 t2 = t1 & ~PORT_WAKE_BITS;
880 writel(t2, port_array[port_index]);
883 spin_unlock_irqrestore(&xhci->lock, flags);
887 * Stop HC (not bus-specific)
889 * This is called when the machine transition into S3/S4 mode.
892 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
895 unsigned int delay = XHCI_MAX_HALT_USEC;
896 struct usb_hcd *hcd = xhci_to_hcd(xhci);
902 if (hcd->state != HC_STATE_SUSPENDED ||
903 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
906 /* Clear root port wake on bits if wakeup not allowed. */
908 xhci_disable_port_wake_on_bits(xhci);
910 /* Don't poll the roothubs on bus suspend. */
911 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
912 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
913 del_timer_sync(&hcd->rh_timer);
914 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
915 del_timer_sync(&xhci->shared_hcd->rh_timer);
917 spin_lock_irq(&xhci->lock);
918 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
919 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
920 /* step 1: stop endpoint */
921 /* skipped assuming that port suspend has done */
923 /* step 2: clear Run/Stop bit */
924 command = readl(&xhci->op_regs->command);
926 writel(command, &xhci->op_regs->command);
928 /* Some chips from Fresco Logic need an extraordinary delay */
929 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
931 if (xhci_handshake(&xhci->op_regs->status,
932 STS_HALT, STS_HALT, delay)) {
933 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
934 spin_unlock_irq(&xhci->lock);
937 xhci_clear_command_ring(xhci);
939 /* step 3: save registers */
940 xhci_save_registers(xhci);
942 /* step 4: set CSS flag */
943 command = readl(&xhci->op_regs->command);
945 writel(command, &xhci->op_regs->command);
946 if (xhci_handshake(&xhci->op_regs->status,
947 STS_SAVE, 0, 10 * 1000)) {
948 xhci_warn(xhci, "WARN: xHC save state timeout\n");
949 spin_unlock_irq(&xhci->lock);
952 spin_unlock_irq(&xhci->lock);
955 * Deleting Compliance Mode Recovery Timer because the xHCI Host
956 * is about to be suspended.
958 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
959 (!(xhci_all_ports_seen_u0(xhci)))) {
960 del_timer_sync(&xhci->comp_mode_recovery_timer);
961 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
962 "%s: compliance mode recovery timer deleted",
966 /* step 5: remove core well power */
967 /* synchronize irq when using MSI-X */
968 xhci_msix_sync_irqs(xhci);
972 EXPORT_SYMBOL_GPL(xhci_suspend);
975 * start xHC (not bus-specific)
977 * This is called when the machine transition from S3/S4 mode.
980 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
982 u32 command, temp = 0, status;
983 struct usb_hcd *hcd = xhci_to_hcd(xhci);
984 struct usb_hcd *secondary_hcd;
986 bool comp_timer_running = false;
991 /* Wait a bit if either of the roothubs need to settle from the
992 * transition into bus suspend.
994 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
996 xhci->bus_state[1].next_statechange))
999 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1000 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1002 spin_lock_irq(&xhci->lock);
1003 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1007 /* step 1: restore register */
1008 xhci_restore_registers(xhci);
1009 /* step 2: initialize command ring buffer */
1010 xhci_set_cmd_ring_deq(xhci);
1011 /* step 3: restore state and start state*/
1012 /* step 3: set CRS flag */
1013 command = readl(&xhci->op_regs->command);
1015 writel(command, &xhci->op_regs->command);
1016 if (xhci_handshake(&xhci->op_regs->status,
1017 STS_RESTORE, 0, 10 * 1000)) {
1018 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1019 spin_unlock_irq(&xhci->lock);
1022 temp = readl(&xhci->op_regs->status);
1025 /* If restore operation fails, re-initialize the HC during resume */
1026 if ((temp & STS_SRE) || hibernated) {
1028 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1029 !(xhci_all_ports_seen_u0(xhci))) {
1030 del_timer_sync(&xhci->comp_mode_recovery_timer);
1031 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1032 "Compliance Mode Recovery Timer deleted!");
1035 /* Let the USB core know _both_ roothubs lost power. */
1036 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1037 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1039 xhci_dbg(xhci, "Stop HCD\n");
1042 spin_unlock_irq(&xhci->lock);
1043 xhci_cleanup_msix(xhci);
1045 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1046 temp = readl(&xhci->op_regs->status);
1047 writel(temp & ~STS_EINT, &xhci->op_regs->status);
1048 temp = readl(&xhci->ir_set->irq_pending);
1049 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1050 xhci_print_ir_set(xhci, 0);
1052 xhci_dbg(xhci, "cleaning up memory\n");
1053 xhci_mem_cleanup(xhci);
1054 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1055 readl(&xhci->op_regs->status));
1057 /* USB core calls the PCI reinit and start functions twice:
1058 * first with the primary HCD, and then with the secondary HCD.
1059 * If we don't do the same, the host will never be started.
1061 if (!usb_hcd_is_primary_hcd(hcd))
1062 secondary_hcd = hcd;
1064 secondary_hcd = xhci->shared_hcd;
1066 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1067 retval = xhci_init(hcd->primary_hcd);
1070 comp_timer_running = true;
1072 xhci_dbg(xhci, "Start the primary HCD\n");
1073 retval = xhci_run(hcd->primary_hcd);
1075 xhci_dbg(xhci, "Start the secondary HCD\n");
1076 retval = xhci_run(secondary_hcd);
1078 hcd->state = HC_STATE_SUSPENDED;
1079 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1083 /* step 4: set Run/Stop bit */
1084 command = readl(&xhci->op_regs->command);
1086 writel(command, &xhci->op_regs->command);
1087 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1090 /* step 5: walk topology and initialize portsc,
1091 * portpmsc and portli
1093 /* this is done in bus_resume */
1095 /* step 6: restart each of the previously
1096 * Running endpoints by ringing their doorbells
1099 spin_unlock_irq(&xhci->lock);
1103 /* Resume root hubs only when have pending events. */
1104 status = readl(&xhci->op_regs->status);
1105 if (status & STS_EINT) {
1106 usb_hcd_resume_root_hub(hcd);
1107 usb_hcd_resume_root_hub(xhci->shared_hcd);
1112 * If system is subject to the Quirk, Compliance Mode Timer needs to
1113 * be re-initialized Always after a system resume. Ports are subject
1114 * to suffer the Compliance Mode issue again. It doesn't matter if
1115 * ports have entered previously to U0 before system's suspension.
1117 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1118 compliance_mode_recovery_timer_init(xhci);
1120 /* Re-enable port polling. */
1121 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1122 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1123 usb_hcd_poll_rh_status(hcd);
1124 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1125 usb_hcd_poll_rh_status(xhci->shared_hcd);
1129 EXPORT_SYMBOL_GPL(xhci_resume);
1130 #endif /* CONFIG_PM */
1132 /*-------------------------------------------------------------------------*/
1135 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1136 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1137 * value to right shift 1 for the bitmask.
1139 * Index = (epnum * 2) + direction - 1,
1140 * where direction = 0 for OUT, 1 for IN.
1141 * For control endpoints, the IN index is used (OUT index is unused), so
1142 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1144 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1147 if (usb_endpoint_xfer_control(desc))
1148 index = (unsigned int) (usb_endpoint_num(desc)*2);
1150 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1151 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1155 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1156 * address from the XHCI endpoint index.
1158 unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1160 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1161 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1162 return direction | number;
1165 /* Find the flag for this endpoint (for use in the control context). Use the
1166 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1169 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1171 return 1 << (xhci_get_endpoint_index(desc) + 1);
1174 /* Find the flag for this endpoint (for use in the control context). Use the
1175 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1178 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1180 return 1 << (ep_index + 1);
1183 /* Compute the last valid endpoint context index. Basically, this is the
1184 * endpoint index plus one. For slot contexts with more than valid endpoint,
1185 * we find the most significant bit set in the added contexts flags.
1186 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1187 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1189 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1191 return fls(added_ctxs) - 1;
1194 /* Returns 1 if the arguments are OK;
1195 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1197 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1198 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1200 struct xhci_hcd *xhci;
1201 struct xhci_virt_device *virt_dev;
1203 if (!hcd || (check_ep && !ep) || !udev) {
1204 pr_debug("xHCI %s called with invalid args\n", func);
1207 if (!udev->parent) {
1208 pr_debug("xHCI %s called for root hub\n", func);
1212 xhci = hcd_to_xhci(hcd);
1213 if (check_virt_dev) {
1214 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1215 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1220 virt_dev = xhci->devs[udev->slot_id];
1221 if (virt_dev->udev != udev) {
1222 xhci_dbg(xhci, "xHCI %s called with udev and "
1223 "virt_dev does not match\n", func);
1228 if (xhci->xhc_state & XHCI_STATE_HALTED)
1234 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1235 struct usb_device *udev, struct xhci_command *command,
1236 bool ctx_change, bool must_succeed);
1239 * Full speed devices may have a max packet size greater than 8 bytes, but the
1240 * USB core doesn't know that until it reads the first 8 bytes of the
1241 * descriptor. If the usb_device's max packet size changes after that point,
1242 * we need to issue an evaluate context command and wait on it.
1244 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1245 unsigned int ep_index, struct urb *urb)
1247 struct xhci_container_ctx *out_ctx;
1248 struct xhci_input_control_ctx *ctrl_ctx;
1249 struct xhci_ep_ctx *ep_ctx;
1250 struct xhci_command *command;
1251 int max_packet_size;
1252 int hw_max_packet_size;
1255 out_ctx = xhci->devs[slot_id]->out_ctx;
1256 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1257 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1258 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1259 if (hw_max_packet_size != max_packet_size) {
1260 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1261 "Max Packet Size for ep 0 changed.");
1262 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1263 "Max packet size in usb_device = %d",
1265 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1266 "Max packet size in xHCI HW = %d",
1267 hw_max_packet_size);
1268 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1269 "Issuing evaluate context command.");
1271 /* Set up the input context flags for the command */
1272 /* FIXME: This won't work if a non-default control endpoint
1273 * changes max packet sizes.
1276 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1280 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1281 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1283 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1286 goto command_cleanup;
1288 /* Set up the modified control endpoint 0 */
1289 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1290 xhci->devs[slot_id]->out_ctx, ep_index);
1292 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1293 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1294 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1296 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1297 ctrl_ctx->drop_flags = 0;
1299 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1300 xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1301 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1302 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1304 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1307 /* Clean up the input context for later use by bandwidth
1310 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1312 kfree(command->completion);
1319 * non-error returns are a promise to giveback() the urb later
1320 * we drop ownership so next owner (or urb unlink) can get it
1322 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1324 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1325 struct xhci_td *buffer;
1326 unsigned long flags;
1328 unsigned int slot_id, ep_index;
1329 struct urb_priv *urb_priv;
1332 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1333 true, true, __func__) <= 0)
1336 slot_id = urb->dev->slot_id;
1337 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1339 if (!HCD_HW_ACCESSIBLE(hcd)) {
1340 if (!in_interrupt())
1341 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1346 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1347 size = urb->number_of_packets;
1348 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1349 urb->transfer_buffer_length > 0 &&
1350 urb->transfer_flags & URB_ZERO_PACKET &&
1351 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1356 urb_priv = kzalloc(sizeof(struct urb_priv) +
1357 size * sizeof(struct xhci_td *), mem_flags);
1361 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1367 for (i = 0; i < size; i++) {
1368 urb_priv->td[i] = buffer;
1372 urb_priv->length = size;
1373 urb_priv->td_cnt = 0;
1374 urb->hcpriv = urb_priv;
1376 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1377 /* Check to see if the max packet size for the default control
1378 * endpoint changed during FS device enumeration
1380 if (urb->dev->speed == USB_SPEED_FULL) {
1381 ret = xhci_check_maxpacket(xhci, slot_id,
1384 xhci_urb_free_priv(urb_priv);
1390 /* We have a spinlock and interrupts disabled, so we must pass
1391 * atomic context to this function, which may allocate memory.
1393 spin_lock_irqsave(&xhci->lock, flags);
1394 if (xhci->xhc_state & XHCI_STATE_DYING)
1396 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1400 spin_unlock_irqrestore(&xhci->lock, flags);
1401 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1402 spin_lock_irqsave(&xhci->lock, flags);
1403 if (xhci->xhc_state & XHCI_STATE_DYING)
1405 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1406 EP_GETTING_STREAMS) {
1407 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1408 "is transitioning to using streams.\n");
1410 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1411 EP_GETTING_NO_STREAMS) {
1412 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1413 "is transitioning to "
1414 "not having streams.\n");
1417 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1422 spin_unlock_irqrestore(&xhci->lock, flags);
1423 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1424 spin_lock_irqsave(&xhci->lock, flags);
1425 if (xhci->xhc_state & XHCI_STATE_DYING)
1427 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1431 spin_unlock_irqrestore(&xhci->lock, flags);
1433 spin_lock_irqsave(&xhci->lock, flags);
1434 if (xhci->xhc_state & XHCI_STATE_DYING)
1436 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1440 spin_unlock_irqrestore(&xhci->lock, flags);
1445 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1446 "non-responsive xHCI host.\n",
1447 urb->ep->desc.bEndpointAddress, urb);
1450 xhci_urb_free_priv(urb_priv);
1452 spin_unlock_irqrestore(&xhci->lock, flags);
1456 /* Get the right ring for the given URB.
1457 * If the endpoint supports streams, boundary check the URB's stream ID.
1458 * If the endpoint doesn't support streams, return the singular endpoint ring.
1460 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1463 unsigned int slot_id;
1464 unsigned int ep_index;
1465 unsigned int stream_id;
1466 struct xhci_virt_ep *ep;
1468 slot_id = urb->dev->slot_id;
1469 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1470 stream_id = urb->stream_id;
1471 ep = &xhci->devs[slot_id]->eps[ep_index];
1472 /* Common case: no streams */
1473 if (!(ep->ep_state & EP_HAS_STREAMS))
1476 if (stream_id == 0) {
1478 "WARN: Slot ID %u, ep index %u has streams, "
1479 "but URB has no stream ID.\n",
1484 if (stream_id < ep->stream_info->num_streams)
1485 return ep->stream_info->stream_rings[stream_id];
1488 "WARN: Slot ID %u, ep index %u has "
1489 "stream IDs 1 to %u allocated, "
1490 "but stream ID %u is requested.\n",
1492 ep->stream_info->num_streams - 1,
1498 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1499 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1500 * should pick up where it left off in the TD, unless a Set Transfer Ring
1501 * Dequeue Pointer is issued.
1503 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1504 * the ring. Since the ring is a contiguous structure, they can't be physically
1505 * removed. Instead, there are two options:
1507 * 1) If the HC is in the middle of processing the URB to be canceled, we
1508 * simply move the ring's dequeue pointer past those TRBs using the Set
1509 * Transfer Ring Dequeue Pointer command. This will be the common case,
1510 * when drivers timeout on the last submitted URB and attempt to cancel.
1512 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1513 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1514 * HC will need to invalidate the any TRBs it has cached after the stop
1515 * endpoint command, as noted in the xHCI 0.95 errata.
1517 * 3) The TD may have completed by the time the Stop Endpoint Command
1518 * completes, so software needs to handle that case too.
1520 * This function should protect against the TD enqueueing code ringing the
1521 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1522 * It also needs to account for multiple cancellations on happening at the same
1523 * time for the same endpoint.
1525 * Note that this function can be called in any context, or so says
1526 * usb_hcd_unlink_urb()
1528 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1530 unsigned long flags;
1533 struct xhci_hcd *xhci;
1534 struct urb_priv *urb_priv;
1536 unsigned int ep_index;
1537 struct xhci_ring *ep_ring;
1538 struct xhci_virt_ep *ep;
1539 struct xhci_command *command;
1541 xhci = hcd_to_xhci(hcd);
1542 spin_lock_irqsave(&xhci->lock, flags);
1543 /* Make sure the URB hasn't completed or been unlinked already */
1544 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1545 if (ret || !urb->hcpriv)
1547 temp = readl(&xhci->op_regs->status);
1548 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1549 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1550 "HW died, freeing TD.");
1551 urb_priv = urb->hcpriv;
1552 for (i = urb_priv->td_cnt;
1553 i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1555 td = urb_priv->td[i];
1556 if (!list_empty(&td->td_list))
1557 list_del_init(&td->td_list);
1558 if (!list_empty(&td->cancelled_td_list))
1559 list_del_init(&td->cancelled_td_list);
1562 usb_hcd_unlink_urb_from_ep(hcd, urb);
1563 spin_unlock_irqrestore(&xhci->lock, flags);
1564 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1565 xhci_urb_free_priv(urb_priv);
1568 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1569 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1570 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1571 "Ep 0x%x: URB %p to be canceled on "
1572 "non-responsive xHCI host.",
1573 urb->ep->desc.bEndpointAddress, urb);
1574 /* Let the stop endpoint command watchdog timer (which set this
1575 * state) finish cleaning up the endpoint TD lists. We must
1576 * have caught it in the middle of dropping a lock and giving
1582 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1583 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1584 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1590 urb_priv = urb->hcpriv;
1591 i = urb_priv->td_cnt;
1592 if (i < urb_priv->length)
1593 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1594 "Cancel URB %p, dev %s, ep 0x%x, "
1595 "starting at offset 0x%llx",
1596 urb, urb->dev->devpath,
1597 urb->ep->desc.bEndpointAddress,
1598 (unsigned long long) xhci_trb_virt_to_dma(
1599 urb_priv->td[i]->start_seg,
1600 urb_priv->td[i]->first_trb));
1602 for (; i < urb_priv->length; i++) {
1603 td = urb_priv->td[i];
1604 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1607 /* Queue a stop endpoint command, but only if this is
1608 * the first cancellation to be handled.
1610 if (!(ep->ep_state & EP_HALT_PENDING)) {
1611 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1616 ep->ep_state |= EP_HALT_PENDING;
1617 ep->stop_cmds_pending++;
1618 ep->stop_cmd_timer.expires = jiffies +
1619 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1620 add_timer(&ep->stop_cmd_timer);
1621 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1623 xhci_ring_cmd_db(xhci);
1626 spin_unlock_irqrestore(&xhci->lock, flags);
1630 /* Drop an endpoint from a new bandwidth configuration for this device.
1631 * Only one call to this function is allowed per endpoint before
1632 * check_bandwidth() or reset_bandwidth() must be called.
1633 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1634 * add the endpoint to the schedule with possibly new parameters denoted by a
1635 * different endpoint descriptor in usb_host_endpoint.
1636 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1639 * The USB core will not allow URBs to be queued to an endpoint that is being
1640 * disabled, so there's no need for mutual exclusion to protect
1641 * the xhci->devs[slot_id] structure.
1643 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1644 struct usb_host_endpoint *ep)
1646 struct xhci_hcd *xhci;
1647 struct xhci_container_ctx *in_ctx, *out_ctx;
1648 struct xhci_input_control_ctx *ctrl_ctx;
1649 unsigned int ep_index;
1650 struct xhci_ep_ctx *ep_ctx;
1652 u32 new_add_flags, new_drop_flags;
1655 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1658 xhci = hcd_to_xhci(hcd);
1659 if (xhci->xhc_state & XHCI_STATE_DYING)
1662 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1663 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1664 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1665 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1666 __func__, drop_flag);
1670 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1671 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1672 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1674 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1679 ep_index = xhci_get_endpoint_index(&ep->desc);
1680 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1681 /* If the HC already knows the endpoint is disabled,
1682 * or the HCD has noted it is disabled, ignore this request
1684 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1685 cpu_to_le32(EP_STATE_DISABLED)) ||
1686 le32_to_cpu(ctrl_ctx->drop_flags) &
1687 xhci_get_endpoint_flag(&ep->desc)) {
1688 /* Do not warn when called after a usb_device_reset */
1689 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1690 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1695 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1696 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1698 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1699 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1701 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1703 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1704 (unsigned int) ep->desc.bEndpointAddress,
1706 (unsigned int) new_drop_flags,
1707 (unsigned int) new_add_flags);
1711 /* Add an endpoint to a new possible bandwidth configuration for this device.
1712 * Only one call to this function is allowed per endpoint before
1713 * check_bandwidth() or reset_bandwidth() must be called.
1714 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1715 * add the endpoint to the schedule with possibly new parameters denoted by a
1716 * different endpoint descriptor in usb_host_endpoint.
1717 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1720 * The USB core will not allow URBs to be queued to an endpoint until the
1721 * configuration or alt setting is installed in the device, so there's no need
1722 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1724 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1725 struct usb_host_endpoint *ep)
1727 struct xhci_hcd *xhci;
1728 struct xhci_container_ctx *in_ctx;
1729 unsigned int ep_index;
1730 struct xhci_input_control_ctx *ctrl_ctx;
1732 u32 new_add_flags, new_drop_flags;
1733 struct xhci_virt_device *virt_dev;
1736 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1738 /* So we won't queue a reset ep command for a root hub */
1742 xhci = hcd_to_xhci(hcd);
1743 if (xhci->xhc_state & XHCI_STATE_DYING)
1746 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1747 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1748 /* FIXME when we have to issue an evaluate endpoint command to
1749 * deal with ep0 max packet size changing once we get the
1752 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1753 __func__, added_ctxs);
1757 virt_dev = xhci->devs[udev->slot_id];
1758 in_ctx = virt_dev->in_ctx;
1759 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1761 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1766 ep_index = xhci_get_endpoint_index(&ep->desc);
1767 /* If this endpoint is already in use, and the upper layers are trying
1768 * to add it again without dropping it, reject the addition.
1770 if (virt_dev->eps[ep_index].ring &&
1771 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1772 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1773 "without dropping it.\n",
1774 (unsigned int) ep->desc.bEndpointAddress);
1778 /* If the HCD has already noted the endpoint is enabled,
1779 * ignore this request.
1781 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1782 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1788 * Configuration and alternate setting changes must be done in
1789 * process context, not interrupt context (or so documenation
1790 * for usb_set_interface() and usb_set_configuration() claim).
1792 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1793 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1794 __func__, ep->desc.bEndpointAddress);
1798 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1799 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1801 /* If xhci_endpoint_disable() was called for this endpoint, but the
1802 * xHC hasn't been notified yet through the check_bandwidth() call,
1803 * this re-adds a new state for the endpoint from the new endpoint
1804 * descriptors. We must drop and re-add this endpoint, so we leave the
1807 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1809 /* Store the usb_device pointer for later use */
1812 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1813 (unsigned int) ep->desc.bEndpointAddress,
1815 (unsigned int) new_drop_flags,
1816 (unsigned int) new_add_flags);
1820 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1822 struct xhci_input_control_ctx *ctrl_ctx;
1823 struct xhci_ep_ctx *ep_ctx;
1824 struct xhci_slot_ctx *slot_ctx;
1827 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1829 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1834 /* When a device's add flag and drop flag are zero, any subsequent
1835 * configure endpoint command will leave that endpoint's state
1836 * untouched. Make sure we don't leave any old state in the input
1837 * endpoint contexts.
1839 ctrl_ctx->drop_flags = 0;
1840 ctrl_ctx->add_flags = 0;
1841 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1842 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1843 /* Endpoint 0 is always valid */
1844 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1845 for (i = 1; i < 31; ++i) {
1846 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1847 ep_ctx->ep_info = 0;
1848 ep_ctx->ep_info2 = 0;
1850 ep_ctx->tx_info = 0;
1854 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1855 struct usb_device *udev, u32 *cmd_status)
1859 switch (*cmd_status) {
1860 case COMP_CMD_ABORT:
1862 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1866 dev_warn(&udev->dev,
1867 "Not enough host controller resources for new device state.\n");
1869 /* FIXME: can we allocate more resources for the HC? */
1872 case COMP_2ND_BW_ERR:
1873 dev_warn(&udev->dev,
1874 "Not enough bandwidth for new device state.\n");
1876 /* FIXME: can we go back to the old state? */
1879 /* the HCD set up something wrong */
1880 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1882 "and endpoint is not disabled.\n");
1886 dev_warn(&udev->dev,
1887 "ERROR: Incompatible device for endpoint configure command.\n");
1891 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1892 "Successful Endpoint Configure command");
1896 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1904 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1905 struct usb_device *udev, u32 *cmd_status)
1908 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1910 switch (*cmd_status) {
1911 case COMP_CMD_ABORT:
1913 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1917 dev_warn(&udev->dev,
1918 "WARN: xHCI driver setup invalid evaluate context command.\n");
1922 dev_warn(&udev->dev,
1923 "WARN: slot not enabled for evaluate context command.\n");
1926 case COMP_CTX_STATE:
1927 dev_warn(&udev->dev,
1928 "WARN: invalid context state for evaluate context command.\n");
1929 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1933 dev_warn(&udev->dev,
1934 "ERROR: Incompatible device for evaluate context command.\n");
1938 /* Max Exit Latency too large error */
1939 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1943 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1944 "Successful evaluate context command");
1948 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1956 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1957 struct xhci_input_control_ctx *ctrl_ctx)
1959 u32 valid_add_flags;
1960 u32 valid_drop_flags;
1962 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1963 * (bit 1). The default control endpoint is added during the Address
1964 * Device command and is never removed until the slot is disabled.
1966 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1967 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1969 /* Use hweight32 to count the number of ones in the add flags, or
1970 * number of endpoints added. Don't count endpoints that are changed
1971 * (both added and dropped).
1973 return hweight32(valid_add_flags) -
1974 hweight32(valid_add_flags & valid_drop_flags);
1977 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1978 struct xhci_input_control_ctx *ctrl_ctx)
1980 u32 valid_add_flags;
1981 u32 valid_drop_flags;
1983 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1984 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1986 return hweight32(valid_drop_flags) -
1987 hweight32(valid_add_flags & valid_drop_flags);
1991 * We need to reserve the new number of endpoints before the configure endpoint
1992 * command completes. We can't subtract the dropped endpoints from the number
1993 * of active endpoints until the command completes because we can oversubscribe
1994 * the host in this case:
1996 * - the first configure endpoint command drops more endpoints than it adds
1997 * - a second configure endpoint command that adds more endpoints is queued
1998 * - the first configure endpoint command fails, so the config is unchanged
1999 * - the second command may succeed, even though there isn't enough resources
2001 * Must be called with xhci->lock held.
2003 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2004 struct xhci_input_control_ctx *ctrl_ctx)
2008 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2009 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2010 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2011 "Not enough ep ctxs: "
2012 "%u active, need to add %u, limit is %u.",
2013 xhci->num_active_eps, added_eps,
2014 xhci->limit_active_eps);
2017 xhci->num_active_eps += added_eps;
2018 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2019 "Adding %u ep ctxs, %u now active.", added_eps,
2020 xhci->num_active_eps);
2025 * The configure endpoint was failed by the xHC for some other reason, so we
2026 * need to revert the resources that failed configuration would have used.
2028 * Must be called with xhci->lock held.
2030 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2031 struct xhci_input_control_ctx *ctrl_ctx)
2035 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2036 xhci->num_active_eps -= num_failed_eps;
2037 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2038 "Removing %u failed ep ctxs, %u now active.",
2040 xhci->num_active_eps);
2044 * Now that the command has completed, clean up the active endpoint count by
2045 * subtracting out the endpoints that were dropped (but not changed).
2047 * Must be called with xhci->lock held.
2049 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2050 struct xhci_input_control_ctx *ctrl_ctx)
2052 u32 num_dropped_eps;
2054 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2055 xhci->num_active_eps -= num_dropped_eps;
2056 if (num_dropped_eps)
2057 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2058 "Removing %u dropped ep ctxs, %u now active.",
2060 xhci->num_active_eps);
2063 static unsigned int xhci_get_block_size(struct usb_device *udev)
2065 switch (udev->speed) {
2067 case USB_SPEED_FULL:
2069 case USB_SPEED_HIGH:
2071 case USB_SPEED_SUPER:
2073 case USB_SPEED_UNKNOWN:
2074 case USB_SPEED_WIRELESS:
2076 /* Should never happen */
2082 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2084 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2086 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2091 /* If we are changing a LS/FS device under a HS hub,
2092 * make sure (if we are activating a new TT) that the HS bus has enough
2093 * bandwidth for this new TT.
2095 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2096 struct xhci_virt_device *virt_dev,
2099 struct xhci_interval_bw_table *bw_table;
2100 struct xhci_tt_bw_info *tt_info;
2102 /* Find the bandwidth table for the root port this TT is attached to. */
2103 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2104 tt_info = virt_dev->tt_info;
2105 /* If this TT already had active endpoints, the bandwidth for this TT
2106 * has already been added. Removing all periodic endpoints (and thus
2107 * making the TT enactive) will only decrease the bandwidth used.
2111 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2112 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2116 /* Not sure why we would have no new active endpoints...
2118 * Maybe because of an Evaluate Context change for a hub update or a
2119 * control endpoint 0 max packet size change?
2120 * FIXME: skip the bandwidth calculation in that case.
2125 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2126 struct xhci_virt_device *virt_dev)
2128 unsigned int bw_reserved;
2130 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2131 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2134 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2135 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2142 * This algorithm is a very conservative estimate of the worst-case scheduling
2143 * scenario for any one interval. The hardware dynamically schedules the
2144 * packets, so we can't tell which microframe could be the limiting factor in
2145 * the bandwidth scheduling. This only takes into account periodic endpoints.
2147 * Obviously, we can't solve an NP complete problem to find the minimum worst
2148 * case scenario. Instead, we come up with an estimate that is no less than
2149 * the worst case bandwidth used for any one microframe, but may be an
2152 * We walk the requirements for each endpoint by interval, starting with the
2153 * smallest interval, and place packets in the schedule where there is only one
2154 * possible way to schedule packets for that interval. In order to simplify
2155 * this algorithm, we record the largest max packet size for each interval, and
2156 * assume all packets will be that size.
2158 * For interval 0, we obviously must schedule all packets for each interval.
2159 * The bandwidth for interval 0 is just the amount of data to be transmitted
2160 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2161 * the number of packets).
2163 * For interval 1, we have two possible microframes to schedule those packets
2164 * in. For this algorithm, if we can schedule the same number of packets for
2165 * each possible scheduling opportunity (each microframe), we will do so. The
2166 * remaining number of packets will be saved to be transmitted in the gaps in
2167 * the next interval's scheduling sequence.
2169 * As we move those remaining packets to be scheduled with interval 2 packets,
2170 * we have to double the number of remaining packets to transmit. This is
2171 * because the intervals are actually powers of 2, and we would be transmitting
2172 * the previous interval's packets twice in this interval. We also have to be
2173 * sure that when we look at the largest max packet size for this interval, we
2174 * also look at the largest max packet size for the remaining packets and take
2175 * the greater of the two.
2177 * The algorithm continues to evenly distribute packets in each scheduling
2178 * opportunity, and push the remaining packets out, until we get to the last
2179 * interval. Then those packets and their associated overhead are just added
2180 * to the bandwidth used.
2182 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2183 struct xhci_virt_device *virt_dev,
2186 unsigned int bw_reserved;
2187 unsigned int max_bandwidth;
2188 unsigned int bw_used;
2189 unsigned int block_size;
2190 struct xhci_interval_bw_table *bw_table;
2191 unsigned int packet_size = 0;
2192 unsigned int overhead = 0;
2193 unsigned int packets_transmitted = 0;
2194 unsigned int packets_remaining = 0;
2197 if (virt_dev->udev->speed == USB_SPEED_SUPER)
2198 return xhci_check_ss_bw(xhci, virt_dev);
2200 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2201 max_bandwidth = HS_BW_LIMIT;
2202 /* Convert percent of bus BW reserved to blocks reserved */
2203 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2205 max_bandwidth = FS_BW_LIMIT;
2206 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2209 bw_table = virt_dev->bw_table;
2210 /* We need to translate the max packet size and max ESIT payloads into
2211 * the units the hardware uses.
2213 block_size = xhci_get_block_size(virt_dev->udev);
2215 /* If we are manipulating a LS/FS device under a HS hub, double check
2216 * that the HS bus has enough bandwidth if we are activing a new TT.
2218 if (virt_dev->tt_info) {
2219 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2220 "Recalculating BW for rootport %u",
2221 virt_dev->real_port);
2222 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2223 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2224 "newly activated TT.\n");
2227 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2228 "Recalculating BW for TT slot %u port %u",
2229 virt_dev->tt_info->slot_id,
2230 virt_dev->tt_info->ttport);
2232 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2233 "Recalculating BW for rootport %u",
2234 virt_dev->real_port);
2237 /* Add in how much bandwidth will be used for interval zero, or the
2238 * rounded max ESIT payload + number of packets * largest overhead.
2240 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2241 bw_table->interval_bw[0].num_packets *
2242 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2244 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2245 unsigned int bw_added;
2246 unsigned int largest_mps;
2247 unsigned int interval_overhead;
2250 * How many packets could we transmit in this interval?
2251 * If packets didn't fit in the previous interval, we will need
2252 * to transmit that many packets twice within this interval.
2254 packets_remaining = 2 * packets_remaining +
2255 bw_table->interval_bw[i].num_packets;
2257 /* Find the largest max packet size of this or the previous
2260 if (list_empty(&bw_table->interval_bw[i].endpoints))
2263 struct xhci_virt_ep *virt_ep;
2264 struct list_head *ep_entry;
2266 ep_entry = bw_table->interval_bw[i].endpoints.next;
2267 virt_ep = list_entry(ep_entry,
2268 struct xhci_virt_ep, bw_endpoint_list);
2269 /* Convert to blocks, rounding up */
2270 largest_mps = DIV_ROUND_UP(
2271 virt_ep->bw_info.max_packet_size,
2274 if (largest_mps > packet_size)
2275 packet_size = largest_mps;
2277 /* Use the larger overhead of this or the previous interval. */
2278 interval_overhead = xhci_get_largest_overhead(
2279 &bw_table->interval_bw[i]);
2280 if (interval_overhead > overhead)
2281 overhead = interval_overhead;
2283 /* How many packets can we evenly distribute across
2284 * (1 << (i + 1)) possible scheduling opportunities?
2286 packets_transmitted = packets_remaining >> (i + 1);
2288 /* Add in the bandwidth used for those scheduled packets */
2289 bw_added = packets_transmitted * (overhead + packet_size);
2291 /* How many packets do we have remaining to transmit? */
2292 packets_remaining = packets_remaining % (1 << (i + 1));
2294 /* What largest max packet size should those packets have? */
2295 /* If we've transmitted all packets, don't carry over the
2296 * largest packet size.
2298 if (packets_remaining == 0) {
2301 } else if (packets_transmitted > 0) {
2302 /* Otherwise if we do have remaining packets, and we've
2303 * scheduled some packets in this interval, take the
2304 * largest max packet size from endpoints with this
2307 packet_size = largest_mps;
2308 overhead = interval_overhead;
2310 /* Otherwise carry over packet_size and overhead from the last
2311 * time we had a remainder.
2313 bw_used += bw_added;
2314 if (bw_used > max_bandwidth) {
2315 xhci_warn(xhci, "Not enough bandwidth. "
2316 "Proposed: %u, Max: %u\n",
2317 bw_used, max_bandwidth);
2322 * Ok, we know we have some packets left over after even-handedly
2323 * scheduling interval 15. We don't know which microframes they will
2324 * fit into, so we over-schedule and say they will be scheduled every
2327 if (packets_remaining > 0)
2328 bw_used += overhead + packet_size;
2330 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2331 unsigned int port_index = virt_dev->real_port - 1;
2333 /* OK, we're manipulating a HS device attached to a
2334 * root port bandwidth domain. Include the number of active TTs
2335 * in the bandwidth used.
2337 bw_used += TT_HS_OVERHEAD *
2338 xhci->rh_bw[port_index].num_active_tts;
2341 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2342 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2343 "Available: %u " "percent",
2344 bw_used, max_bandwidth, bw_reserved,
2345 (max_bandwidth - bw_used - bw_reserved) * 100 /
2348 bw_used += bw_reserved;
2349 if (bw_used > max_bandwidth) {
2350 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2351 bw_used, max_bandwidth);
2355 bw_table->bw_used = bw_used;
2359 static bool xhci_is_async_ep(unsigned int ep_type)
2361 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2362 ep_type != ISOC_IN_EP &&
2363 ep_type != INT_IN_EP);
2366 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2368 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2371 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2373 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2375 if (ep_bw->ep_interval == 0)
2376 return SS_OVERHEAD_BURST +
2377 (ep_bw->mult * ep_bw->num_packets *
2378 (SS_OVERHEAD + mps));
2379 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2380 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2381 1 << ep_bw->ep_interval);
2385 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2386 struct xhci_bw_info *ep_bw,
2387 struct xhci_interval_bw_table *bw_table,
2388 struct usb_device *udev,
2389 struct xhci_virt_ep *virt_ep,
2390 struct xhci_tt_bw_info *tt_info)
2392 struct xhci_interval_bw *interval_bw;
2393 int normalized_interval;
2395 if (xhci_is_async_ep(ep_bw->type))
2398 if (udev->speed == USB_SPEED_SUPER) {
2399 if (xhci_is_sync_in_ep(ep_bw->type))
2400 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2401 xhci_get_ss_bw_consumed(ep_bw);
2403 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2404 xhci_get_ss_bw_consumed(ep_bw);
2408 /* SuperSpeed endpoints never get added to intervals in the table, so
2409 * this check is only valid for HS/FS/LS devices.
2411 if (list_empty(&virt_ep->bw_endpoint_list))
2413 /* For LS/FS devices, we need to translate the interval expressed in
2414 * microframes to frames.
2416 if (udev->speed == USB_SPEED_HIGH)
2417 normalized_interval = ep_bw->ep_interval;
2419 normalized_interval = ep_bw->ep_interval - 3;
2421 if (normalized_interval == 0)
2422 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2423 interval_bw = &bw_table->interval_bw[normalized_interval];
2424 interval_bw->num_packets -= ep_bw->num_packets;
2425 switch (udev->speed) {
2427 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2429 case USB_SPEED_FULL:
2430 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2432 case USB_SPEED_HIGH:
2433 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2435 case USB_SPEED_SUPER:
2436 case USB_SPEED_UNKNOWN:
2437 case USB_SPEED_WIRELESS:
2438 /* Should never happen because only LS/FS/HS endpoints will get
2439 * added to the endpoint list.
2444 tt_info->active_eps -= 1;
2445 list_del_init(&virt_ep->bw_endpoint_list);
2448 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2449 struct xhci_bw_info *ep_bw,
2450 struct xhci_interval_bw_table *bw_table,
2451 struct usb_device *udev,
2452 struct xhci_virt_ep *virt_ep,
2453 struct xhci_tt_bw_info *tt_info)
2455 struct xhci_interval_bw *interval_bw;
2456 struct xhci_virt_ep *smaller_ep;
2457 int normalized_interval;
2459 if (xhci_is_async_ep(ep_bw->type))
2462 if (udev->speed == USB_SPEED_SUPER) {
2463 if (xhci_is_sync_in_ep(ep_bw->type))
2464 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2465 xhci_get_ss_bw_consumed(ep_bw);
2467 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2468 xhci_get_ss_bw_consumed(ep_bw);
2472 /* For LS/FS devices, we need to translate the interval expressed in
2473 * microframes to frames.
2475 if (udev->speed == USB_SPEED_HIGH)
2476 normalized_interval = ep_bw->ep_interval;
2478 normalized_interval = ep_bw->ep_interval - 3;
2480 if (normalized_interval == 0)
2481 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2482 interval_bw = &bw_table->interval_bw[normalized_interval];
2483 interval_bw->num_packets += ep_bw->num_packets;
2484 switch (udev->speed) {
2486 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2488 case USB_SPEED_FULL:
2489 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2491 case USB_SPEED_HIGH:
2492 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2494 case USB_SPEED_SUPER:
2495 case USB_SPEED_UNKNOWN:
2496 case USB_SPEED_WIRELESS:
2497 /* Should never happen because only LS/FS/HS endpoints will get
2498 * added to the endpoint list.
2504 tt_info->active_eps += 1;
2505 /* Insert the endpoint into the list, largest max packet size first. */
2506 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2508 if (ep_bw->max_packet_size >=
2509 smaller_ep->bw_info.max_packet_size) {
2510 /* Add the new ep before the smaller endpoint */
2511 list_add_tail(&virt_ep->bw_endpoint_list,
2512 &smaller_ep->bw_endpoint_list);
2516 /* Add the new endpoint at the end of the list. */
2517 list_add_tail(&virt_ep->bw_endpoint_list,
2518 &interval_bw->endpoints);
2521 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2522 struct xhci_virt_device *virt_dev,
2525 struct xhci_root_port_bw_info *rh_bw_info;
2526 if (!virt_dev->tt_info)
2529 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2530 if (old_active_eps == 0 &&
2531 virt_dev->tt_info->active_eps != 0) {
2532 rh_bw_info->num_active_tts += 1;
2533 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2534 } else if (old_active_eps != 0 &&
2535 virt_dev->tt_info->active_eps == 0) {
2536 rh_bw_info->num_active_tts -= 1;
2537 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2541 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2542 struct xhci_virt_device *virt_dev,
2543 struct xhci_container_ctx *in_ctx)
2545 struct xhci_bw_info ep_bw_info[31];
2547 struct xhci_input_control_ctx *ctrl_ctx;
2548 int old_active_eps = 0;
2550 if (virt_dev->tt_info)
2551 old_active_eps = virt_dev->tt_info->active_eps;
2553 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2555 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2560 for (i = 0; i < 31; i++) {
2561 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2564 /* Make a copy of the BW info in case we need to revert this */
2565 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2566 sizeof(ep_bw_info[i]));
2567 /* Drop the endpoint from the interval table if the endpoint is
2568 * being dropped or changed.
2570 if (EP_IS_DROPPED(ctrl_ctx, i))
2571 xhci_drop_ep_from_interval_table(xhci,
2572 &virt_dev->eps[i].bw_info,
2578 /* Overwrite the information stored in the endpoints' bw_info */
2579 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2580 for (i = 0; i < 31; i++) {
2581 /* Add any changed or added endpoints to the interval table */
2582 if (EP_IS_ADDED(ctrl_ctx, i))
2583 xhci_add_ep_to_interval_table(xhci,
2584 &virt_dev->eps[i].bw_info,
2591 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2592 /* Ok, this fits in the bandwidth we have.
2593 * Update the number of active TTs.
2595 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2599 /* We don't have enough bandwidth for this, revert the stored info. */
2600 for (i = 0; i < 31; i++) {
2601 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2604 /* Drop the new copies of any added or changed endpoints from
2605 * the interval table.
2607 if (EP_IS_ADDED(ctrl_ctx, i)) {
2608 xhci_drop_ep_from_interval_table(xhci,
2609 &virt_dev->eps[i].bw_info,
2615 /* Revert the endpoint back to its old information */
2616 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2617 sizeof(ep_bw_info[i]));
2618 /* Add any changed or dropped endpoints back into the table */
2619 if (EP_IS_DROPPED(ctrl_ctx, i))
2620 xhci_add_ep_to_interval_table(xhci,
2621 &virt_dev->eps[i].bw_info,
2631 /* Issue a configure endpoint command or evaluate context command
2632 * and wait for it to finish.
2634 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2635 struct usb_device *udev,
2636 struct xhci_command *command,
2637 bool ctx_change, bool must_succeed)
2640 unsigned long flags;
2641 struct xhci_input_control_ctx *ctrl_ctx;
2642 struct xhci_virt_device *virt_dev;
2647 spin_lock_irqsave(&xhci->lock, flags);
2648 virt_dev = xhci->devs[udev->slot_id];
2650 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2652 spin_unlock_irqrestore(&xhci->lock, flags);
2653 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2658 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2659 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2660 spin_unlock_irqrestore(&xhci->lock, flags);
2661 xhci_warn(xhci, "Not enough host resources, "
2662 "active endpoint contexts = %u\n",
2663 xhci->num_active_eps);
2666 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2667 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2668 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2669 xhci_free_host_resources(xhci, ctrl_ctx);
2670 spin_unlock_irqrestore(&xhci->lock, flags);
2671 xhci_warn(xhci, "Not enough bandwidth\n");
2676 ret = xhci_queue_configure_endpoint(xhci, command,
2677 command->in_ctx->dma,
2678 udev->slot_id, must_succeed);
2680 ret = xhci_queue_evaluate_context(xhci, command,
2681 command->in_ctx->dma,
2682 udev->slot_id, must_succeed);
2684 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2685 xhci_free_host_resources(xhci, ctrl_ctx);
2686 spin_unlock_irqrestore(&xhci->lock, flags);
2687 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2688 "FIXME allocate a new ring segment");
2691 xhci_ring_cmd_db(xhci);
2692 spin_unlock_irqrestore(&xhci->lock, flags);
2694 /* Wait for the configure endpoint command to complete */
2695 wait_for_completion(command->completion);
2698 ret = xhci_configure_endpoint_result(xhci, udev,
2701 ret = xhci_evaluate_context_result(xhci, udev,
2704 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2705 spin_lock_irqsave(&xhci->lock, flags);
2706 /* If the command failed, remove the reserved resources.
2707 * Otherwise, clean up the estimate to include dropped eps.
2710 xhci_free_host_resources(xhci, ctrl_ctx);
2712 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2713 spin_unlock_irqrestore(&xhci->lock, flags);
2718 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2719 struct xhci_virt_device *vdev, int i)
2721 struct xhci_virt_ep *ep = &vdev->eps[i];
2723 if (ep->ep_state & EP_HAS_STREAMS) {
2724 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2725 xhci_get_endpoint_address(i));
2726 xhci_free_stream_info(xhci, ep->stream_info);
2727 ep->stream_info = NULL;
2728 ep->ep_state &= ~EP_HAS_STREAMS;
2732 /* Called after one or more calls to xhci_add_endpoint() or
2733 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2734 * to call xhci_reset_bandwidth().
2736 * Since we are in the middle of changing either configuration or
2737 * installing a new alt setting, the USB core won't allow URBs to be
2738 * enqueued for any endpoint on the old config or interface. Nothing
2739 * else should be touching the xhci->devs[slot_id] structure, so we
2740 * don't need to take the xhci->lock for manipulating that.
2742 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2746 struct xhci_hcd *xhci;
2747 struct xhci_virt_device *virt_dev;
2748 struct xhci_input_control_ctx *ctrl_ctx;
2749 struct xhci_slot_ctx *slot_ctx;
2750 struct xhci_command *command;
2752 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2755 xhci = hcd_to_xhci(hcd);
2756 if (xhci->xhc_state & XHCI_STATE_DYING)
2759 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2760 virt_dev = xhci->devs[udev->slot_id];
2762 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2766 command->in_ctx = virt_dev->in_ctx;
2768 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2769 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2771 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2774 goto command_cleanup;
2776 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2777 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2778 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2780 /* Don't issue the command if there's no endpoints to update. */
2781 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2782 ctrl_ctx->drop_flags == 0) {
2784 goto command_cleanup;
2786 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2787 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2788 for (i = 31; i >= 1; i--) {
2789 __le32 le32 = cpu_to_le32(BIT(i));
2791 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2792 || (ctrl_ctx->add_flags & le32) || i == 1) {
2793 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2794 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2798 xhci_dbg(xhci, "New Input Control Context:\n");
2799 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2800 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2802 ret = xhci_configure_endpoint(xhci, udev, command,
2805 /* Callee should call reset_bandwidth() */
2806 goto command_cleanup;
2808 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2809 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2810 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2812 /* Free any rings that were dropped, but not changed. */
2813 for (i = 1; i < 31; ++i) {
2814 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2815 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2816 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2817 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2820 xhci_zero_in_ctx(xhci, virt_dev);
2822 * Install any rings for completely new endpoints or changed endpoints,
2823 * and free or cache any old rings from changed endpoints.
2825 for (i = 1; i < 31; ++i) {
2826 if (!virt_dev->eps[i].new_ring)
2828 /* Only cache or free the old ring if it exists.
2829 * It may not if this is the first add of an endpoint.
2831 if (virt_dev->eps[i].ring) {
2832 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2834 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2835 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2836 virt_dev->eps[i].new_ring = NULL;
2839 kfree(command->completion);
2845 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2847 struct xhci_hcd *xhci;
2848 struct xhci_virt_device *virt_dev;
2851 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2854 xhci = hcd_to_xhci(hcd);
2856 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2857 virt_dev = xhci->devs[udev->slot_id];
2858 /* Free any rings allocated for added endpoints */
2859 for (i = 0; i < 31; ++i) {
2860 if (virt_dev->eps[i].new_ring) {
2861 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2862 virt_dev->eps[i].new_ring = NULL;
2865 xhci_zero_in_ctx(xhci, virt_dev);
2868 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2869 struct xhci_container_ctx *in_ctx,
2870 struct xhci_container_ctx *out_ctx,
2871 struct xhci_input_control_ctx *ctrl_ctx,
2872 u32 add_flags, u32 drop_flags)
2874 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2875 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2876 xhci_slot_copy(xhci, in_ctx, out_ctx);
2877 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2879 xhci_dbg(xhci, "Input Context:\n");
2880 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2883 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2884 unsigned int slot_id, unsigned int ep_index,
2885 struct xhci_dequeue_state *deq_state)
2887 struct xhci_input_control_ctx *ctrl_ctx;
2888 struct xhci_container_ctx *in_ctx;
2889 struct xhci_ep_ctx *ep_ctx;
2893 in_ctx = xhci->devs[slot_id]->in_ctx;
2894 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2896 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2901 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2902 xhci->devs[slot_id]->out_ctx, ep_index);
2903 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2904 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2905 deq_state->new_deq_ptr);
2907 xhci_warn(xhci, "WARN Cannot submit config ep after "
2908 "reset ep command\n");
2909 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2910 deq_state->new_deq_seg,
2911 deq_state->new_deq_ptr);
2914 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2916 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2917 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2918 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2919 added_ctxs, added_ctxs);
2922 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2923 unsigned int ep_index, struct xhci_td *td)
2925 struct xhci_dequeue_state deq_state;
2926 struct xhci_virt_ep *ep;
2927 struct usb_device *udev = td->urb->dev;
2929 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2930 "Cleaning up stalled endpoint ring");
2931 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2932 /* We need to move the HW's dequeue pointer past this TD,
2933 * or it will attempt to resend it on the next doorbell ring.
2935 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2936 ep_index, ep->stopped_stream, td, &deq_state);
2938 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2941 /* HW with the reset endpoint quirk will use the saved dequeue state to
2942 * issue a configure endpoint command later.
2944 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2945 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2946 "Queueing new dequeue state");
2947 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2948 ep_index, ep->stopped_stream, &deq_state);
2950 /* Better hope no one uses the input context between now and the
2951 * reset endpoint completion!
2952 * XXX: No idea how this hardware will react when stream rings
2955 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2956 "Setting up input context for "
2957 "configure endpoint command");
2958 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2959 ep_index, &deq_state);
2963 /* Called when clearing halted device. The core should have sent the control
2964 * message to clear the device halt condition. The host side of the halt should
2965 * already be cleared with a reset endpoint command issued when the STALL tx
2966 * event was received.
2968 * Context: in_interrupt
2971 void xhci_endpoint_reset(struct usb_hcd *hcd,
2972 struct usb_host_endpoint *ep)
2974 struct xhci_hcd *xhci;
2976 xhci = hcd_to_xhci(hcd);
2979 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2980 * The Reset Endpoint Command may only be issued to endpoints in the
2981 * Halted state. If software wishes reset the Data Toggle or Sequence
2982 * Number of an endpoint that isn't in the Halted state, then software
2983 * may issue a Configure Endpoint Command with the Drop and Add bits set
2984 * for the target endpoint. that is in the Stopped state.
2987 /* For now just print debug to follow the situation */
2988 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2989 ep->desc.bEndpointAddress);
2992 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2993 struct usb_device *udev, struct usb_host_endpoint *ep,
2994 unsigned int slot_id)
2997 unsigned int ep_index;
2998 unsigned int ep_state;
3002 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3005 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3006 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3007 " descriptor for ep 0x%x does not support streams\n",
3008 ep->desc.bEndpointAddress);
3012 ep_index = xhci_get_endpoint_index(&ep->desc);
3013 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3014 if (ep_state & EP_HAS_STREAMS ||
3015 ep_state & EP_GETTING_STREAMS) {
3016 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3017 "already has streams set up.\n",
3018 ep->desc.bEndpointAddress);
3019 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3020 "dynamic stream context array reallocation.\n");
3023 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3024 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3025 "endpoint 0x%x; URBs are pending.\n",
3026 ep->desc.bEndpointAddress);
3032 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3033 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3035 unsigned int max_streams;
3037 /* The stream context array size must be a power of two */
3038 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3040 * Find out how many primary stream array entries the host controller
3041 * supports. Later we may use secondary stream arrays (similar to 2nd
3042 * level page entries), but that's an optional feature for xHCI host
3043 * controllers. xHCs must support at least 4 stream IDs.
3045 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3046 if (*num_stream_ctxs > max_streams) {
3047 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3049 *num_stream_ctxs = max_streams;
3050 *num_streams = max_streams;
3054 /* Returns an error code if one of the endpoint already has streams.
3055 * This does not change any data structures, it only checks and gathers
3058 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3059 struct usb_device *udev,
3060 struct usb_host_endpoint **eps, unsigned int num_eps,
3061 unsigned int *num_streams, u32 *changed_ep_bitmask)
3063 unsigned int max_streams;
3064 unsigned int endpoint_flag;
3068 for (i = 0; i < num_eps; i++) {
3069 ret = xhci_check_streams_endpoint(xhci, udev,
3070 eps[i], udev->slot_id);
3074 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3075 if (max_streams < (*num_streams - 1)) {
3076 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3077 eps[i]->desc.bEndpointAddress,
3079 *num_streams = max_streams+1;
3082 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3083 if (*changed_ep_bitmask & endpoint_flag)
3085 *changed_ep_bitmask |= endpoint_flag;
3090 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3091 struct usb_device *udev,
3092 struct usb_host_endpoint **eps, unsigned int num_eps)
3094 u32 changed_ep_bitmask = 0;
3095 unsigned int slot_id;
3096 unsigned int ep_index;
3097 unsigned int ep_state;
3100 slot_id = udev->slot_id;
3101 if (!xhci->devs[slot_id])
3104 for (i = 0; i < num_eps; i++) {
3105 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3106 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3107 /* Are streams already being freed for the endpoint? */
3108 if (ep_state & EP_GETTING_NO_STREAMS) {
3109 xhci_warn(xhci, "WARN Can't disable streams for "
3111 "streams are being disabled already\n",
3112 eps[i]->desc.bEndpointAddress);
3115 /* Are there actually any streams to free? */
3116 if (!(ep_state & EP_HAS_STREAMS) &&
3117 !(ep_state & EP_GETTING_STREAMS)) {
3118 xhci_warn(xhci, "WARN Can't disable streams for "
3120 "streams are already disabled!\n",
3121 eps[i]->desc.bEndpointAddress);
3122 xhci_warn(xhci, "WARN xhci_free_streams() called "
3123 "with non-streams endpoint\n");
3126 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3128 return changed_ep_bitmask;
3132 * The USB device drivers use this function (through the HCD interface in USB
3133 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3134 * coordinate mass storage command queueing across multiple endpoints (basically
3135 * a stream ID == a task ID).
3137 * Setting up streams involves allocating the same size stream context array
3138 * for each endpoint and issuing a configure endpoint command for all endpoints.
3140 * Don't allow the call to succeed if one endpoint only supports one stream
3141 * (which means it doesn't support streams at all).
3143 * Drivers may get less stream IDs than they asked for, if the host controller
3144 * hardware or endpoints claim they can't support the number of requested
3147 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3148 struct usb_host_endpoint **eps, unsigned int num_eps,
3149 unsigned int num_streams, gfp_t mem_flags)
3152 struct xhci_hcd *xhci;
3153 struct xhci_virt_device *vdev;
3154 struct xhci_command *config_cmd;
3155 struct xhci_input_control_ctx *ctrl_ctx;
3156 unsigned int ep_index;
3157 unsigned int num_stream_ctxs;
3158 unsigned long flags;
3159 u32 changed_ep_bitmask = 0;
3164 /* Add one to the number of streams requested to account for
3165 * stream 0 that is reserved for xHCI usage.
3168 xhci = hcd_to_xhci(hcd);
3169 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3172 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3173 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3174 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3175 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3179 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3181 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3184 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3186 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3188 xhci_free_command(xhci, config_cmd);
3192 /* Check to make sure all endpoints are not already configured for
3193 * streams. While we're at it, find the maximum number of streams that
3194 * all the endpoints will support and check for duplicate endpoints.
3196 spin_lock_irqsave(&xhci->lock, flags);
3197 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3198 num_eps, &num_streams, &changed_ep_bitmask);
3200 xhci_free_command(xhci, config_cmd);
3201 spin_unlock_irqrestore(&xhci->lock, flags);
3204 if (num_streams <= 1) {
3205 xhci_warn(xhci, "WARN: endpoints can't handle "
3206 "more than one stream.\n");
3207 xhci_free_command(xhci, config_cmd);
3208 spin_unlock_irqrestore(&xhci->lock, flags);
3211 vdev = xhci->devs[udev->slot_id];
3212 /* Mark each endpoint as being in transition, so
3213 * xhci_urb_enqueue() will reject all URBs.
3215 for (i = 0; i < num_eps; i++) {
3216 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3217 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3219 spin_unlock_irqrestore(&xhci->lock, flags);
3221 /* Setup internal data structures and allocate HW data structures for
3222 * streams (but don't install the HW structures in the input context
3223 * until we're sure all memory allocation succeeded).
3225 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3226 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3227 num_stream_ctxs, num_streams);
3229 for (i = 0; i < num_eps; i++) {
3230 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3231 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3233 num_streams, mem_flags);
3234 if (!vdev->eps[ep_index].stream_info)
3236 /* Set maxPstreams in endpoint context and update deq ptr to
3237 * point to stream context array. FIXME
3241 /* Set up the input context for a configure endpoint command. */
3242 for (i = 0; i < num_eps; i++) {
3243 struct xhci_ep_ctx *ep_ctx;
3245 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3246 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3248 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3249 vdev->out_ctx, ep_index);
3250 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3251 vdev->eps[ep_index].stream_info);
3253 /* Tell the HW to drop its old copy of the endpoint context info
3254 * and add the updated copy from the input context.
3256 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3257 vdev->out_ctx, ctrl_ctx,
3258 changed_ep_bitmask, changed_ep_bitmask);
3260 /* Issue and wait for the configure endpoint command */
3261 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3264 /* xHC rejected the configure endpoint command for some reason, so we
3265 * leave the old ring intact and free our internal streams data
3271 spin_lock_irqsave(&xhci->lock, flags);
3272 for (i = 0; i < num_eps; i++) {
3273 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3274 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3275 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3276 udev->slot_id, ep_index);
3277 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3279 xhci_free_command(xhci, config_cmd);
3280 spin_unlock_irqrestore(&xhci->lock, flags);
3282 /* Subtract 1 for stream 0, which drivers can't use */
3283 return num_streams - 1;
3286 /* If it didn't work, free the streams! */
3287 for (i = 0; i < num_eps; i++) {
3288 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3289 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3290 vdev->eps[ep_index].stream_info = NULL;
3291 /* FIXME Unset maxPstreams in endpoint context and
3292 * update deq ptr to point to normal string ring.
3294 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3295 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3296 xhci_endpoint_zero(xhci, vdev, eps[i]);
3298 xhci_free_command(xhci, config_cmd);
3302 /* Transition the endpoint from using streams to being a "normal" endpoint
3305 * Modify the endpoint context state, submit a configure endpoint command,
3306 * and free all endpoint rings for streams if that completes successfully.
3308 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3309 struct usb_host_endpoint **eps, unsigned int num_eps,
3313 struct xhci_hcd *xhci;
3314 struct xhci_virt_device *vdev;
3315 struct xhci_command *command;
3316 struct xhci_input_control_ctx *ctrl_ctx;
3317 unsigned int ep_index;
3318 unsigned long flags;
3319 u32 changed_ep_bitmask;
3321 xhci = hcd_to_xhci(hcd);
3322 vdev = xhci->devs[udev->slot_id];
3324 /* Set up a configure endpoint command to remove the streams rings */
3325 spin_lock_irqsave(&xhci->lock, flags);
3326 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3327 udev, eps, num_eps);
3328 if (changed_ep_bitmask == 0) {
3329 spin_unlock_irqrestore(&xhci->lock, flags);
3333 /* Use the xhci_command structure from the first endpoint. We may have
3334 * allocated too many, but the driver may call xhci_free_streams() for
3335 * each endpoint it grouped into one call to xhci_alloc_streams().
3337 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3338 command = vdev->eps[ep_index].stream_info->free_streams_command;
3339 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3341 spin_unlock_irqrestore(&xhci->lock, flags);
3342 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3347 for (i = 0; i < num_eps; i++) {
3348 struct xhci_ep_ctx *ep_ctx;
3350 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3351 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3352 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3353 EP_GETTING_NO_STREAMS;
3355 xhci_endpoint_copy(xhci, command->in_ctx,
3356 vdev->out_ctx, ep_index);
3357 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3358 &vdev->eps[ep_index]);
3360 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3361 vdev->out_ctx, ctrl_ctx,
3362 changed_ep_bitmask, changed_ep_bitmask);
3363 spin_unlock_irqrestore(&xhci->lock, flags);
3365 /* Issue and wait for the configure endpoint command,
3366 * which must succeed.
3368 ret = xhci_configure_endpoint(xhci, udev, command,
3371 /* xHC rejected the configure endpoint command for some reason, so we
3372 * leave the streams rings intact.
3377 spin_lock_irqsave(&xhci->lock, flags);
3378 for (i = 0; i < num_eps; i++) {
3379 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3380 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3381 vdev->eps[ep_index].stream_info = NULL;
3382 /* FIXME Unset maxPstreams in endpoint context and
3383 * update deq ptr to point to normal string ring.
3385 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3386 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3388 spin_unlock_irqrestore(&xhci->lock, flags);
3394 * Deletes endpoint resources for endpoints that were active before a Reset
3395 * Device command, or a Disable Slot command. The Reset Device command leaves
3396 * the control endpoint intact, whereas the Disable Slot command deletes it.
3398 * Must be called with xhci->lock held.
3400 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3401 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3404 unsigned int num_dropped_eps = 0;
3405 unsigned int drop_flags = 0;
3407 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3408 if (virt_dev->eps[i].ring) {
3409 drop_flags |= 1 << i;
3413 xhci->num_active_eps -= num_dropped_eps;
3414 if (num_dropped_eps)
3415 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3416 "Dropped %u ep ctxs, flags = 0x%x, "
3418 num_dropped_eps, drop_flags,
3419 xhci->num_active_eps);
3423 * This submits a Reset Device Command, which will set the device state to 0,
3424 * set the device address to 0, and disable all the endpoints except the default
3425 * control endpoint. The USB core should come back and call
3426 * xhci_address_device(), and then re-set up the configuration. If this is
3427 * called because of a usb_reset_and_verify_device(), then the old alternate
3428 * settings will be re-installed through the normal bandwidth allocation
3431 * Wait for the Reset Device command to finish. Remove all structures
3432 * associated with the endpoints that were disabled. Clear the input device
3433 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3435 * If the virt_dev to be reset does not exist or does not match the udev,
3436 * it means the device is lost, possibly due to the xHC restore error and
3437 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3438 * re-allocate the device.
3440 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3443 unsigned long flags;
3444 struct xhci_hcd *xhci;
3445 unsigned int slot_id;
3446 struct xhci_virt_device *virt_dev;
3447 struct xhci_command *reset_device_cmd;
3448 int last_freed_endpoint;
3449 struct xhci_slot_ctx *slot_ctx;
3450 int old_active_eps = 0;
3452 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3455 xhci = hcd_to_xhci(hcd);
3456 slot_id = udev->slot_id;
3457 virt_dev = xhci->devs[slot_id];
3459 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3460 "not exist. Re-allocate the device\n", slot_id);
3461 ret = xhci_alloc_dev(hcd, udev);
3468 if (virt_dev->tt_info)
3469 old_active_eps = virt_dev->tt_info->active_eps;
3471 if (virt_dev->udev != udev) {
3472 /* If the virt_dev and the udev does not match, this virt_dev
3473 * may belong to another udev.
3474 * Re-allocate the device.
3476 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3477 "not match the udev. Re-allocate the device\n",
3479 ret = xhci_alloc_dev(hcd, udev);
3486 /* If device is not setup, there is no point in resetting it */
3487 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3488 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3489 SLOT_STATE_DISABLED)
3492 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3493 /* Allocate the command structure that holds the struct completion.
3494 * Assume we're in process context, since the normal device reset
3495 * process has to wait for the device anyway. Storage devices are
3496 * reset as part of error handling, so use GFP_NOIO instead of
3499 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3500 if (!reset_device_cmd) {
3501 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3505 /* Attempt to submit the Reset Device command to the command ring */
3506 spin_lock_irqsave(&xhci->lock, flags);
3508 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3510 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3511 spin_unlock_irqrestore(&xhci->lock, flags);
3512 goto command_cleanup;
3514 xhci_ring_cmd_db(xhci);
3515 spin_unlock_irqrestore(&xhci->lock, flags);
3517 /* Wait for the Reset Device command to finish */
3518 wait_for_completion(reset_device_cmd->completion);
3520 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3521 * unless we tried to reset a slot ID that wasn't enabled,
3522 * or the device wasn't in the addressed or configured state.
3524 ret = reset_device_cmd->status;
3526 case COMP_CMD_ABORT:
3528 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3530 goto command_cleanup;
3531 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3532 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3533 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3535 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3536 xhci_dbg(xhci, "Not freeing device rings.\n");
3537 /* Don't treat this as an error. May change my mind later. */
3539 goto command_cleanup;
3541 xhci_dbg(xhci, "Successful reset device command.\n");
3544 if (xhci_is_vendor_info_code(xhci, ret))
3546 xhci_warn(xhci, "Unknown completion code %u for "
3547 "reset device command.\n", ret);
3549 goto command_cleanup;
3552 /* Free up host controller endpoint resources */
3553 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3554 spin_lock_irqsave(&xhci->lock, flags);
3555 /* Don't delete the default control endpoint resources */
3556 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3557 spin_unlock_irqrestore(&xhci->lock, flags);
3560 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3561 last_freed_endpoint = 1;
3562 for (i = 1; i < 31; ++i) {
3563 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3565 if (ep->ep_state & EP_HAS_STREAMS) {
3566 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3567 xhci_get_endpoint_address(i));
3568 xhci_free_stream_info(xhci, ep->stream_info);
3569 ep->stream_info = NULL;
3570 ep->ep_state &= ~EP_HAS_STREAMS;
3574 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3575 last_freed_endpoint = i;
3577 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3578 xhci_drop_ep_from_interval_table(xhci,
3579 &virt_dev->eps[i].bw_info,
3584 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3586 /* If necessary, update the number of active TTs on this root port */
3587 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3589 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3590 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3594 xhci_free_command(xhci, reset_device_cmd);
3599 * At this point, the struct usb_device is about to go away, the device has
3600 * disconnected, and all traffic has been stopped and the endpoints have been
3601 * disabled. Free any HC data structures associated with that device.
3603 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3605 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3606 struct xhci_virt_device *virt_dev;
3607 unsigned long flags;
3610 struct xhci_command *command;
3612 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3616 #ifndef CONFIG_USB_DEFAULT_PERSIST
3618 * We called pm_runtime_get_noresume when the device was attached.
3619 * Decrement the counter here to allow controller to runtime suspend
3620 * if no devices remain.
3622 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3623 pm_runtime_put_noidle(hcd->self.controller);
3626 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3627 /* If the host is halted due to driver unload, we still need to free the
3630 if (ret <= 0 && ret != -ENODEV) {
3635 virt_dev = xhci->devs[udev->slot_id];
3637 /* Stop any wayward timer functions (which may grab the lock) */
3638 for (i = 0; i < 31; ++i) {
3639 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3640 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3643 spin_lock_irqsave(&xhci->lock, flags);
3644 /* Don't disable the slot if the host controller is dead. */
3645 state = readl(&xhci->op_regs->status);
3646 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3647 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3648 xhci_free_virt_device(xhci, udev->slot_id);
3649 spin_unlock_irqrestore(&xhci->lock, flags);
3654 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3656 spin_unlock_irqrestore(&xhci->lock, flags);
3657 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3660 xhci_ring_cmd_db(xhci);
3661 spin_unlock_irqrestore(&xhci->lock, flags);
3664 * Event command completion handler will free any data structures
3665 * associated with the slot. XXX Can free sleep?
3670 * Checks if we have enough host controller resources for the default control
3673 * Must be called with xhci->lock held.
3675 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3677 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3678 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3679 "Not enough ep ctxs: "
3680 "%u active, need to add 1, limit is %u.",
3681 xhci->num_active_eps, xhci->limit_active_eps);
3684 xhci->num_active_eps += 1;
3685 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3686 "Adding 1 ep ctx, %u now active.",
3687 xhci->num_active_eps);
3693 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3694 * timed out, or allocating memory failed. Returns 1 on success.
3696 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3698 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3699 unsigned long flags;
3701 struct xhci_command *command;
3703 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3707 /* xhci->slot_id and xhci->addr_dev are not thread-safe */
3708 mutex_lock(&xhci->mutex);
3709 spin_lock_irqsave(&xhci->lock, flags);
3710 command->completion = &xhci->addr_dev;
3711 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3713 spin_unlock_irqrestore(&xhci->lock, flags);
3714 mutex_unlock(&xhci->mutex);
3715 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3719 xhci_ring_cmd_db(xhci);
3720 spin_unlock_irqrestore(&xhci->lock, flags);
3722 wait_for_completion(command->completion);
3723 slot_id = xhci->slot_id;
3724 mutex_unlock(&xhci->mutex);
3726 if (!slot_id || command->status != COMP_SUCCESS) {
3727 xhci_err(xhci, "Error while assigning device slot ID\n");
3728 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3730 readl(&xhci->cap_regs->hcs_params1)));
3735 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3736 spin_lock_irqsave(&xhci->lock, flags);
3737 ret = xhci_reserve_host_control_ep_resources(xhci);
3739 spin_unlock_irqrestore(&xhci->lock, flags);
3740 xhci_warn(xhci, "Not enough host resources, "
3741 "active endpoint contexts = %u\n",
3742 xhci->num_active_eps);
3745 spin_unlock_irqrestore(&xhci->lock, flags);
3747 /* Use GFP_NOIO, since this function can be called from
3748 * xhci_discover_or_reset_device(), which may be called as part of
3749 * mass storage driver error handling.
3751 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3752 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3755 udev->slot_id = slot_id;
3757 #ifndef CONFIG_USB_DEFAULT_PERSIST
3759 * If resetting upon resume, we can't put the controller into runtime
3760 * suspend if there is a device attached.
3762 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3763 pm_runtime_get_noresume(hcd->self.controller);
3768 /* Is this a LS or FS device under a HS hub? */
3769 /* Hub or peripherial? */
3773 /* Disable slot, if we can do it without mem alloc */
3774 spin_lock_irqsave(&xhci->lock, flags);
3775 command->completion = NULL;
3776 command->status = 0;
3777 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3779 xhci_ring_cmd_db(xhci);
3780 spin_unlock_irqrestore(&xhci->lock, flags);
3785 * Issue an Address Device command and optionally send a corresponding
3786 * SetAddress request to the device.
3788 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3789 enum xhci_setup_dev setup)
3791 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3792 unsigned long flags;
3793 struct xhci_virt_device *virt_dev;
3795 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3796 struct xhci_slot_ctx *slot_ctx;
3797 struct xhci_input_control_ctx *ctrl_ctx;
3799 struct xhci_command *command = NULL;
3801 mutex_lock(&xhci->mutex);
3803 if (xhci->xhc_state) /* dying or halted */
3806 if (!udev->slot_id) {
3807 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3808 "Bad Slot ID %d", udev->slot_id);
3813 virt_dev = xhci->devs[udev->slot_id];
3815 if (WARN_ON(!virt_dev)) {
3817 * In plug/unplug torture test with an NEC controller,
3818 * a zero-dereference was observed once due to virt_dev = 0.
3819 * Print useful debug rather than crash if it is observed again!
3821 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3827 if (setup == SETUP_CONTEXT_ONLY) {
3828 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3829 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3830 SLOT_STATE_DEFAULT) {
3831 xhci_dbg(xhci, "Slot already in default state\n");
3836 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3842 command->in_ctx = virt_dev->in_ctx;
3843 command->completion = &xhci->addr_dev;
3845 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3846 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3848 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3854 * If this is the first Set Address since device plug-in or
3855 * virt_device realloaction after a resume with an xHCI power loss,
3856 * then set up the slot context.
3858 if (!slot_ctx->dev_info)
3859 xhci_setup_addressable_virt_dev(xhci, udev);
3860 /* Otherwise, update the control endpoint ring enqueue pointer. */
3862 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3863 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3864 ctrl_ctx->drop_flags = 0;
3866 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3867 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3868 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3869 le32_to_cpu(slot_ctx->dev_info) >> 27);
3871 spin_lock_irqsave(&xhci->lock, flags);
3872 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3873 udev->slot_id, setup);
3875 spin_unlock_irqrestore(&xhci->lock, flags);
3876 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3877 "FIXME: allocate a command ring segment");
3880 xhci_ring_cmd_db(xhci);
3881 spin_unlock_irqrestore(&xhci->lock, flags);
3883 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3884 wait_for_completion(command->completion);
3886 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3887 * the SetAddress() "recovery interval" required by USB and aborting the
3888 * command on a timeout.
3890 switch (command->status) {
3891 case COMP_CMD_ABORT:
3893 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3896 case COMP_CTX_STATE:
3898 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3899 act, udev->slot_id);
3903 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3907 dev_warn(&udev->dev,
3908 "ERROR: Incompatible device for setup %s command\n", act);
3912 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3913 "Successful setup %s command", act);
3917 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3918 act, command->status);
3919 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3920 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3921 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3927 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3928 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3929 "Op regs DCBAA ptr = %#016llx", temp_64);
3930 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3931 "Slot ID %d dcbaa entry @%p = %#016llx",
3933 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3934 (unsigned long long)
3935 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3936 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3937 "Output Context DMA address = %#08llx",
3938 (unsigned long long)virt_dev->out_ctx->dma);
3939 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3940 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3941 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3942 le32_to_cpu(slot_ctx->dev_info) >> 27);
3943 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3944 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3946 * USB core uses address 1 for the roothubs, so we add one to the
3947 * address given back to us by the HC.
3949 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3950 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3951 le32_to_cpu(slot_ctx->dev_info) >> 27);
3952 /* Zero the input context control for later use */
3953 ctrl_ctx->add_flags = 0;
3954 ctrl_ctx->drop_flags = 0;
3956 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3957 "Internal device address = %d",
3958 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3960 mutex_unlock(&xhci->mutex);
3965 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3967 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3970 int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3972 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3976 * Transfer the port index into real index in the HW port status
3977 * registers. Caculate offset between the port's PORTSC register
3978 * and port status base. Divide the number of per port register
3979 * to get the real index. The raw port number bases 1.
3981 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3983 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3984 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3985 __le32 __iomem *addr;
3988 if (hcd->speed < HCD_USB3)
3989 addr = xhci->usb2_ports[port1 - 1];
3991 addr = xhci->usb3_ports[port1 - 1];
3993 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3998 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3999 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4001 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4002 struct usb_device *udev, u16 max_exit_latency)
4004 struct xhci_virt_device *virt_dev;
4005 struct xhci_command *command;
4006 struct xhci_input_control_ctx *ctrl_ctx;
4007 struct xhci_slot_ctx *slot_ctx;
4008 unsigned long flags;
4011 spin_lock_irqsave(&xhci->lock, flags);
4013 virt_dev = xhci->devs[udev->slot_id];
4016 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4017 * xHC was re-initialized. Exit latency will be set later after
4018 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4021 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4022 spin_unlock_irqrestore(&xhci->lock, flags);
4026 /* Attempt to issue an Evaluate Context command to change the MEL. */
4027 command = xhci->lpm_command;
4028 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4030 spin_unlock_irqrestore(&xhci->lock, flags);
4031 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4036 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4037 spin_unlock_irqrestore(&xhci->lock, flags);
4039 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4040 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4041 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4042 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4043 slot_ctx->dev_state = 0;
4045 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4046 "Set up evaluate context for LPM MEL change.");
4047 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4048 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4050 /* Issue and wait for the evaluate context command. */
4051 ret = xhci_configure_endpoint(xhci, udev, command,
4053 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4054 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4057 spin_lock_irqsave(&xhci->lock, flags);
4058 virt_dev->current_mel = max_exit_latency;
4059 spin_unlock_irqrestore(&xhci->lock, flags);
4066 /* BESL to HIRD Encoding array for USB2 LPM */
4067 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4068 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4070 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
4071 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4072 struct usb_device *udev)
4074 int u2del, besl, besl_host;
4075 int besl_device = 0;
4078 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4079 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4081 if (field & USB_BESL_SUPPORT) {
4082 for (besl_host = 0; besl_host < 16; besl_host++) {
4083 if (xhci_besl_encoding[besl_host] >= u2del)
4086 /* Use baseline BESL value as default */
4087 if (field & USB_BESL_BASELINE_VALID)
4088 besl_device = USB_GET_BESL_BASELINE(field);
4089 else if (field & USB_BESL_DEEP_VALID)
4090 besl_device = USB_GET_BESL_DEEP(field);
4095 besl_host = (u2del - 51) / 75 + 1;
4098 besl = besl_host + besl_device;
4105 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4106 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4113 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4115 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4116 l1 = udev->l1_params.timeout / 256;
4118 /* device has preferred BESLD */
4119 if (field & USB_BESL_DEEP_VALID) {
4120 besld = USB_GET_BESL_DEEP(field);
4124 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4127 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4128 struct usb_device *udev, int enable)
4130 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4131 __le32 __iomem **port_array;
4132 __le32 __iomem *pm_addr, *hlpm_addr;
4133 u32 pm_val, hlpm_val, field;
4134 unsigned int port_num;
4135 unsigned long flags;
4136 int hird, exit_latency;
4139 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4143 if (!udev->parent || udev->parent->parent ||
4144 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4147 if (udev->usb2_hw_lpm_capable != 1)
4150 spin_lock_irqsave(&xhci->lock, flags);
4152 port_array = xhci->usb2_ports;
4153 port_num = udev->portnum - 1;
4154 pm_addr = port_array[port_num] + PORTPMSC;
4155 pm_val = readl(pm_addr);
4156 hlpm_addr = port_array[port_num] + PORTHLPMC;
4157 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4159 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4160 enable ? "enable" : "disable", port_num + 1);
4163 /* Host supports BESL timeout instead of HIRD */
4164 if (udev->usb2_hw_lpm_besl_capable) {
4165 /* if device doesn't have a preferred BESL value use a
4166 * default one which works with mixed HIRD and BESL
4167 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4169 if ((field & USB_BESL_SUPPORT) &&
4170 (field & USB_BESL_BASELINE_VALID))
4171 hird = USB_GET_BESL_BASELINE(field);
4173 hird = udev->l1_params.besl;
4175 exit_latency = xhci_besl_encoding[hird];
4176 spin_unlock_irqrestore(&xhci->lock, flags);
4178 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4179 * input context for link powermanagement evaluate
4180 * context commands. It is protected by hcd->bandwidth
4181 * mutex and is shared by all devices. We need to set
4182 * the max ext latency in USB 2 BESL LPM as well, so
4183 * use the same mutex and xhci_change_max_exit_latency()
4185 mutex_lock(hcd->bandwidth_mutex);
4186 ret = xhci_change_max_exit_latency(xhci, udev,
4188 mutex_unlock(hcd->bandwidth_mutex);
4192 spin_lock_irqsave(&xhci->lock, flags);
4194 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4195 writel(hlpm_val, hlpm_addr);
4199 hird = xhci_calculate_hird_besl(xhci, udev);
4202 pm_val &= ~PORT_HIRD_MASK;
4203 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4204 writel(pm_val, pm_addr);
4205 pm_val = readl(pm_addr);
4207 writel(pm_val, pm_addr);
4211 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4212 writel(pm_val, pm_addr);
4215 if (udev->usb2_hw_lpm_besl_capable) {
4216 spin_unlock_irqrestore(&xhci->lock, flags);
4217 mutex_lock(hcd->bandwidth_mutex);
4218 xhci_change_max_exit_latency(xhci, udev, 0);
4219 mutex_unlock(hcd->bandwidth_mutex);
4224 spin_unlock_irqrestore(&xhci->lock, flags);
4228 /* check if a usb2 port supports a given extened capability protocol
4229 * only USB2 ports extended protocol capability values are cached.
4230 * Return 1 if capability is supported
4232 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4233 unsigned capability)
4235 u32 port_offset, port_count;
4238 for (i = 0; i < xhci->num_ext_caps; i++) {
4239 if (xhci->ext_caps[i] & capability) {
4240 /* port offsets starts at 1 */
4241 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4242 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4243 if (port >= port_offset &&
4244 port < port_offset + port_count)
4251 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4253 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4254 int portnum = udev->portnum - 1;
4256 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4260 /* we only support lpm for non-hub device connected to root hub yet */
4261 if (!udev->parent || udev->parent->parent ||
4262 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4265 if (xhci->hw_lpm_support == 1 &&
4266 xhci_check_usb2_port_capability(
4267 xhci, portnum, XHCI_HLC)) {
4268 udev->usb2_hw_lpm_capable = 1;
4269 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4270 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4271 if (xhci_check_usb2_port_capability(xhci, portnum,
4273 udev->usb2_hw_lpm_besl_capable = 1;
4279 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4281 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4282 static unsigned long long xhci_service_interval_to_ns(
4283 struct usb_endpoint_descriptor *desc)
4285 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4288 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4289 enum usb3_link_state state)
4291 unsigned long long sel;
4292 unsigned long long pel;
4293 unsigned int max_sel_pel;
4298 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4299 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4300 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4301 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4305 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4306 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4307 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4311 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4313 return USB3_LPM_DISABLED;
4316 if (sel <= max_sel_pel && pel <= max_sel_pel)
4317 return USB3_LPM_DEVICE_INITIATED;
4319 if (sel > max_sel_pel)
4320 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4321 "due to long SEL %llu ms\n",
4324 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4325 "due to long PEL %llu ms\n",
4327 return USB3_LPM_DISABLED;
4330 /* The U1 timeout should be the maximum of the following values:
4331 * - For control endpoints, U1 system exit latency (SEL) * 3
4332 * - For bulk endpoints, U1 SEL * 5
4333 * - For interrupt endpoints:
4334 * - Notification EPs, U1 SEL * 3
4335 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4336 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4338 static unsigned long long xhci_calculate_intel_u1_timeout(
4339 struct usb_device *udev,
4340 struct usb_endpoint_descriptor *desc)
4342 unsigned long long timeout_ns;
4346 ep_type = usb_endpoint_type(desc);
4348 case USB_ENDPOINT_XFER_CONTROL:
4349 timeout_ns = udev->u1_params.sel * 3;
4351 case USB_ENDPOINT_XFER_BULK:
4352 timeout_ns = udev->u1_params.sel * 5;
4354 case USB_ENDPOINT_XFER_INT:
4355 intr_type = usb_endpoint_interrupt_type(desc);
4356 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4357 timeout_ns = udev->u1_params.sel * 3;
4360 /* Otherwise the calculation is the same as isoc eps */
4361 case USB_ENDPOINT_XFER_ISOC:
4362 timeout_ns = xhci_service_interval_to_ns(desc);
4363 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4364 if (timeout_ns < udev->u1_params.sel * 2)
4365 timeout_ns = udev->u1_params.sel * 2;
4374 /* Returns the hub-encoded U1 timeout value. */
4375 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4376 struct usb_device *udev,
4377 struct usb_endpoint_descriptor *desc)
4379 unsigned long long timeout_ns;
4381 if (xhci->quirks & XHCI_INTEL_HOST)
4382 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4384 timeout_ns = udev->u1_params.sel;
4386 /* The U1 timeout is encoded in 1us intervals.
4387 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4389 if (timeout_ns == USB3_LPM_DISABLED)
4392 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4394 /* If the necessary timeout value is bigger than what we can set in the
4395 * USB 3.0 hub, we have to disable hub-initiated U1.
4397 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4399 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4400 "due to long timeout %llu ms\n", timeout_ns);
4401 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4404 /* The U2 timeout should be the maximum of:
4405 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4406 * - largest bInterval of any active periodic endpoint (to avoid going
4407 * into lower power link states between intervals).
4408 * - the U2 Exit Latency of the device
4410 static unsigned long long xhci_calculate_intel_u2_timeout(
4411 struct usb_device *udev,
4412 struct usb_endpoint_descriptor *desc)
4414 unsigned long long timeout_ns;
4415 unsigned long long u2_del_ns;
4417 timeout_ns = 10 * 1000 * 1000;
4419 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4420 (xhci_service_interval_to_ns(desc) > timeout_ns))
4421 timeout_ns = xhci_service_interval_to_ns(desc);
4423 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4424 if (u2_del_ns > timeout_ns)
4425 timeout_ns = u2_del_ns;
4430 /* Returns the hub-encoded U2 timeout value. */
4431 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4432 struct usb_device *udev,
4433 struct usb_endpoint_descriptor *desc)
4435 unsigned long long timeout_ns;
4437 if (xhci->quirks & XHCI_INTEL_HOST)
4438 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4440 timeout_ns = udev->u2_params.sel;
4442 /* The U2 timeout is encoded in 256us intervals */
4443 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4444 /* If the necessary timeout value is bigger than what we can set in the
4445 * USB 3.0 hub, we have to disable hub-initiated U2.
4447 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4449 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4450 "due to long timeout %llu ms\n", timeout_ns);
4451 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4454 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4455 struct usb_device *udev,
4456 struct usb_endpoint_descriptor *desc,
4457 enum usb3_link_state state,
4460 if (state == USB3_LPM_U1)
4461 return xhci_calculate_u1_timeout(xhci, udev, desc);
4462 else if (state == USB3_LPM_U2)
4463 return xhci_calculate_u2_timeout(xhci, udev, desc);
4465 return USB3_LPM_DISABLED;
4468 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4469 struct usb_device *udev,
4470 struct usb_endpoint_descriptor *desc,
4471 enum usb3_link_state state,
4476 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4477 desc, state, timeout);
4479 /* If we found we can't enable hub-initiated LPM, or
4480 * the U1 or U2 exit latency was too high to allow
4481 * device-initiated LPM as well, just stop searching.
4483 if (alt_timeout == USB3_LPM_DISABLED ||
4484 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4485 *timeout = alt_timeout;
4488 if (alt_timeout > *timeout)
4489 *timeout = alt_timeout;
4493 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4494 struct usb_device *udev,
4495 struct usb_host_interface *alt,
4496 enum usb3_link_state state,
4501 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4502 if (xhci_update_timeout_for_endpoint(xhci, udev,
4503 &alt->endpoint[j].desc, state, timeout))
4510 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4511 enum usb3_link_state state)
4513 struct usb_device *parent;
4514 unsigned int num_hubs;
4516 if (state == USB3_LPM_U2)
4519 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4520 for (parent = udev->parent, num_hubs = 0; parent->parent;
4521 parent = parent->parent)
4527 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4528 " below second-tier hub.\n");
4529 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4530 "to decrease power consumption.\n");
4534 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4535 struct usb_device *udev,
4536 enum usb3_link_state state)
4538 if (xhci->quirks & XHCI_INTEL_HOST)
4539 return xhci_check_intel_tier_policy(udev, state);
4544 /* Returns the U1 or U2 timeout that should be enabled.
4545 * If the tier check or timeout setting functions return with a non-zero exit
4546 * code, that means the timeout value has been finalized and we shouldn't look
4547 * at any more endpoints.
4549 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4550 struct usb_device *udev, enum usb3_link_state state)
4552 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4553 struct usb_host_config *config;
4556 u16 timeout = USB3_LPM_DISABLED;
4558 if (state == USB3_LPM_U1)
4560 else if (state == USB3_LPM_U2)
4563 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4568 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4571 /* Gather some information about the currently installed configuration
4572 * and alternate interface settings.
4574 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4578 config = udev->actconfig;
4582 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4583 struct usb_driver *driver;
4584 struct usb_interface *intf = config->interface[i];
4589 /* Check if any currently bound drivers want hub-initiated LPM
4592 if (intf->dev.driver) {
4593 driver = to_usb_driver(intf->dev.driver);
4594 if (driver && driver->disable_hub_initiated_lpm) {
4595 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4596 "at request of driver %s\n",
4597 state_name, driver->name);
4598 return xhci_get_timeout_no_hub_lpm(udev, state);
4602 /* Not sure how this could happen... */
4603 if (!intf->cur_altsetting)
4606 if (xhci_update_timeout_for_interface(xhci, udev,
4607 intf->cur_altsetting,
4614 static int calculate_max_exit_latency(struct usb_device *udev,
4615 enum usb3_link_state state_changed,
4616 u16 hub_encoded_timeout)
4618 unsigned long long u1_mel_us = 0;
4619 unsigned long long u2_mel_us = 0;
4620 unsigned long long mel_us = 0;
4626 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4627 hub_encoded_timeout == USB3_LPM_DISABLED);
4628 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4629 hub_encoded_timeout == USB3_LPM_DISABLED);
4631 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4632 hub_encoded_timeout != USB3_LPM_DISABLED);
4633 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4634 hub_encoded_timeout != USB3_LPM_DISABLED);
4636 /* If U1 was already enabled and we're not disabling it,
4637 * or we're going to enable U1, account for the U1 max exit latency.
4639 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4641 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4642 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4644 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4646 if (u1_mel_us > u2_mel_us)
4650 /* xHCI host controller max exit latency field is only 16 bits wide. */
4651 if (mel_us > MAX_EXIT) {
4652 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4653 "is too big.\n", mel_us);
4659 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4660 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4661 struct usb_device *udev, enum usb3_link_state state)
4663 struct xhci_hcd *xhci;
4664 u16 hub_encoded_timeout;
4668 xhci = hcd_to_xhci(hcd);
4669 /* The LPM timeout values are pretty host-controller specific, so don't
4670 * enable hub-initiated timeouts unless the vendor has provided
4671 * information about their timeout algorithm.
4673 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4674 !xhci->devs[udev->slot_id])
4675 return USB3_LPM_DISABLED;
4677 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4678 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4680 /* Max Exit Latency is too big, disable LPM. */
4681 hub_encoded_timeout = USB3_LPM_DISABLED;
4685 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4688 return hub_encoded_timeout;
4691 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4692 struct usb_device *udev, enum usb3_link_state state)
4694 struct xhci_hcd *xhci;
4697 xhci = hcd_to_xhci(hcd);
4698 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4699 !xhci->devs[udev->slot_id])
4702 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4703 return xhci_change_max_exit_latency(xhci, udev, mel);
4705 #else /* CONFIG_PM */
4707 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4708 struct usb_device *udev, int enable)
4713 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4718 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4719 struct usb_device *udev, enum usb3_link_state state)
4721 return USB3_LPM_DISABLED;
4724 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4725 struct usb_device *udev, enum usb3_link_state state)
4729 #endif /* CONFIG_PM */
4731 /*-------------------------------------------------------------------------*/
4733 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4734 * internal data structures for the device.
4736 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4737 struct usb_tt *tt, gfp_t mem_flags)
4739 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4740 struct xhci_virt_device *vdev;
4741 struct xhci_command *config_cmd;
4742 struct xhci_input_control_ctx *ctrl_ctx;
4743 struct xhci_slot_ctx *slot_ctx;
4744 unsigned long flags;
4745 unsigned think_time;
4748 /* Ignore root hubs */
4752 vdev = xhci->devs[hdev->slot_id];
4754 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4757 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4759 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4762 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4764 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4766 xhci_free_command(xhci, config_cmd);
4770 spin_lock_irqsave(&xhci->lock, flags);
4771 if (hdev->speed == USB_SPEED_HIGH &&
4772 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4773 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4774 xhci_free_command(xhci, config_cmd);
4775 spin_unlock_irqrestore(&xhci->lock, flags);
4779 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4780 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4781 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4782 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4784 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4785 * but it may be already set to 1 when setup an xHCI virtual
4786 * device, so clear it anyway.
4789 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4790 else if (hdev->speed == USB_SPEED_FULL)
4791 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4793 if (xhci->hci_version > 0x95) {
4794 xhci_dbg(xhci, "xHCI version %x needs hub "
4795 "TT think time and number of ports\n",
4796 (unsigned int) xhci->hci_version);
4797 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4798 /* Set TT think time - convert from ns to FS bit times.
4799 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4800 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4802 * xHCI 1.0: this field shall be 0 if the device is not a
4805 think_time = tt->think_time;
4806 if (think_time != 0)
4807 think_time = (think_time / 666) - 1;
4808 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4809 slot_ctx->tt_info |=
4810 cpu_to_le32(TT_THINK_TIME(think_time));
4812 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4813 "TT think time or number of ports\n",
4814 (unsigned int) xhci->hci_version);
4816 slot_ctx->dev_state = 0;
4817 spin_unlock_irqrestore(&xhci->lock, flags);
4819 xhci_dbg(xhci, "Set up %s for hub device.\n",
4820 (xhci->hci_version > 0x95) ?
4821 "configure endpoint" : "evaluate context");
4822 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4823 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4825 /* Issue and wait for the configure endpoint or
4826 * evaluate context command.
4828 if (xhci->hci_version > 0x95)
4829 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4832 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4835 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4836 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4838 xhci_free_command(xhci, config_cmd);
4842 int xhci_get_frame(struct usb_hcd *hcd)
4844 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4845 /* EHCI mods by the periodic size. Why? */
4846 return readl(&xhci->run_regs->microframe_index) >> 3;
4849 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4851 struct xhci_hcd *xhci;
4852 struct device *dev = hcd->self.controller;
4855 /* Accept arbitrarily long scatter-gather lists */
4856 hcd->self.sg_tablesize = ~0;
4858 /* support to build packet from discontinuous buffers */
4859 hcd->self.no_sg_constraint = 1;
4861 /* XHCI controllers don't stop the ep queue on short packets :| */
4862 hcd->self.no_stop_on_short = 1;
4864 xhci = hcd_to_xhci(hcd);
4866 if (usb_hcd_is_primary_hcd(hcd)) {
4867 xhci->main_hcd = hcd;
4868 /* Mark the first roothub as being USB 2.0.
4869 * The xHCI driver will register the USB 3.0 roothub.
4871 hcd->speed = HCD_USB2;
4872 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4874 * USB 2.0 roothub under xHCI has an integrated TT,
4875 * (rate matching hub) as opposed to having an OHCI/UHCI
4876 * companion controller.
4880 if (xhci->sbrn == 0x31) {
4881 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4882 hcd->speed = HCD_USB31;
4884 /* xHCI private pointer was set in xhci_pci_probe for the second
4885 * registered roothub.
4890 mutex_init(&xhci->mutex);
4891 xhci->cap_regs = hcd->regs;
4892 xhci->op_regs = hcd->regs +
4893 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4894 xhci->run_regs = hcd->regs +
4895 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4896 /* Cache read-only capability registers */
4897 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4898 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4899 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4900 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4901 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4902 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4903 if (xhci->hci_version > 0x100)
4904 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4905 xhci_print_registers(xhci);
4907 xhci->quirks = quirks;
4909 get_quirks(dev, xhci);
4911 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4912 * success event after a short transfer. This quirk will ignore such
4915 if (xhci->hci_version > 0x96)
4916 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4918 /* Make sure the HC is halted. */
4919 retval = xhci_halt(xhci);
4923 xhci_dbg(xhci, "Resetting HCD\n");
4924 /* Reset the internal HC memory state and registers. */
4925 retval = xhci_reset(xhci);
4928 xhci_dbg(xhci, "Reset complete\n");
4930 /* Set dma_mask and coherent_dma_mask to 64-bits,
4931 * if xHC supports 64-bit addressing */
4932 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4933 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4934 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4935 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4938 * This is to avoid error in cases where a 32-bit USB
4939 * controller is used on a 64-bit capable system.
4941 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4944 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4945 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4948 xhci_dbg(xhci, "Calling HCD init\n");
4949 /* Initialize HCD and host controller data structures. */
4950 retval = xhci_init(hcd);
4953 xhci_dbg(xhci, "Called HCD init\n");
4955 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4956 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4960 EXPORT_SYMBOL_GPL(xhci_gen_setup);
4962 static const struct hc_driver xhci_hc_driver = {
4963 .description = "xhci-hcd",
4964 .product_desc = "xHCI Host Controller",
4965 .hcd_priv_size = sizeof(struct xhci_hcd *),
4968 * generic hardware linkage
4971 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4974 * basic lifecycle operations
4976 .reset = NULL, /* set in xhci_init_driver() */
4979 .shutdown = xhci_shutdown,
4982 * managing i/o requests and associated device resources
4984 .urb_enqueue = xhci_urb_enqueue,
4985 .urb_dequeue = xhci_urb_dequeue,
4986 .alloc_dev = xhci_alloc_dev,
4987 .free_dev = xhci_free_dev,
4988 .alloc_streams = xhci_alloc_streams,
4989 .free_streams = xhci_free_streams,
4990 .add_endpoint = xhci_add_endpoint,
4991 .drop_endpoint = xhci_drop_endpoint,
4992 .endpoint_reset = xhci_endpoint_reset,
4993 .check_bandwidth = xhci_check_bandwidth,
4994 .reset_bandwidth = xhci_reset_bandwidth,
4995 .address_device = xhci_address_device,
4996 .enable_device = xhci_enable_device,
4997 .update_hub_device = xhci_update_hub_device,
4998 .reset_device = xhci_discover_or_reset_device,
5001 * scheduling support
5003 .get_frame_number = xhci_get_frame,
5008 .hub_control = xhci_hub_control,
5009 .hub_status_data = xhci_hub_status_data,
5010 .bus_suspend = xhci_bus_suspend,
5011 .bus_resume = xhci_bus_resume,
5014 * call back when device connected and addressed
5016 .update_device = xhci_update_device,
5017 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5018 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5019 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5020 .find_raw_port_number = xhci_find_raw_port_number,
5023 void xhci_init_driver(struct hc_driver *drv,
5024 const struct xhci_driver_overrides *over)
5028 /* Copy the generic table to drv then apply the overrides */
5029 *drv = xhci_hc_driver;
5032 drv->hcd_priv_size += over->extra_priv_size;
5034 drv->reset = over->reset;
5036 drv->start = over->start;
5039 EXPORT_SYMBOL_GPL(xhci_init_driver);
5041 MODULE_DESCRIPTION(DRIVER_DESC);
5042 MODULE_AUTHOR(DRIVER_AUTHOR);
5043 MODULE_LICENSE("GPL");
5045 static int __init xhci_hcd_init(void)
5048 * Check the compiler generated sizes of structures that must be laid
5049 * out in specific ways for hardware access.
5051 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5052 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5053 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5054 /* xhci_device_control has eight fields, and also
5055 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5057 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5058 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5059 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5060 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5061 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5062 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5063 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5072 * If an init function is provided, an exit function must also be provided
5073 * to allow module unload.
5075 static void __exit xhci_hcd_fini(void) { }
5077 module_init(xhci_hcd_init);
5078 module_exit(xhci_hcd_fini);