2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
30 #include <linux/dma-mapping.h>
33 #include "xhci-trace.h"
35 #define DRIVER_AUTHOR "Sarah Sharp"
36 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
40 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
41 static int link_quirk;
42 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
45 static unsigned int quirks;
46 module_param(quirks, uint, S_IRUGO);
47 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
49 /* TODO: copied from ehci-hcd.c - can this be refactored? */
51 * xhci_handshake - spin reading hc until handshake completes or fails
52 * @ptr: address of hc register to be read
53 * @mask: bits to look at in result of read
54 * @done: value of those bits when handshake succeeds
55 * @usec: timeout in microseconds
57 * Returns negative errno, or zero on success
59 * Success happens when the "mask" bits have the specified value (hardware
60 * handshake done). There are two failure modes: "usec" have passed (major
61 * hardware flakeout), or the register reads as all-ones (hardware removed).
63 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
69 if (result == ~(u32)0) /* card removed */
81 * Disable interrupts and begin the xHCI halting process.
83 void xhci_quiesce(struct xhci_hcd *xhci)
90 halted = readl(&xhci->op_regs->status) & STS_HALT;
94 cmd = readl(&xhci->op_regs->command);
96 writel(cmd, &xhci->op_regs->command);
100 * Force HC into halt state.
102 * Disable any IRQs and clear the run/stop bit.
103 * HC will complete any current and actively pipelined transactions, and
104 * should halt within 16 ms of the run/stop bit being cleared.
105 * Read HC Halted bit in the status register to see when the HC is finished.
107 int xhci_halt(struct xhci_hcd *xhci)
110 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
113 ret = xhci_handshake(&xhci->op_regs->status,
114 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
116 xhci->xhc_state |= XHCI_STATE_HALTED;
117 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
119 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
125 * Set the run bit and wait for the host to be running.
127 static int xhci_start(struct xhci_hcd *xhci)
132 temp = readl(&xhci->op_regs->command);
134 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
136 writel(temp, &xhci->op_regs->command);
139 * Wait for the HCHalted Status bit to be 0 to indicate the host is
142 ret = xhci_handshake(&xhci->op_regs->status,
143 STS_HALT, 0, XHCI_MAX_HALT_USEC);
144 if (ret == -ETIMEDOUT)
145 xhci_err(xhci, "Host took too long to start, "
146 "waited %u microseconds.\n",
149 /* clear state flags. Including dying, halted or removing */
158 * This resets pipelines, timers, counters, state machines, etc.
159 * Transactions will be terminated immediately, and operational registers
160 * will be set to their defaults.
162 int xhci_reset(struct xhci_hcd *xhci)
168 state = readl(&xhci->op_regs->status);
169 if ((state & STS_HALT) == 0) {
170 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
174 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
175 command = readl(&xhci->op_regs->command);
176 command |= CMD_RESET;
177 writel(command, &xhci->op_regs->command);
179 /* Existing Intel xHCI controllers require a delay of 1 mS,
180 * after setting the CMD_RESET bit, and before accessing any
181 * HC registers. This allows the HC to complete the
182 * reset operation and be ready for HC register access.
183 * Without this delay, the subsequent HC register access,
184 * may result in a system hang very rarely.
186 if (xhci->quirks & XHCI_INTEL_HOST)
189 ret = xhci_handshake(&xhci->op_regs->command,
190 CMD_RESET, 0, 10 * 1000 * 1000);
194 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
195 "Wait for controller to be ready for doorbell rings");
197 * xHCI cannot write to any doorbells or operational registers other
198 * than status until the "Controller Not Ready" flag is cleared.
200 ret = xhci_handshake(&xhci->op_regs->status,
201 STS_CNR, 0, 10 * 1000 * 1000);
203 for (i = 0; i < 2; ++i) {
204 xhci->bus_state[i].port_c_suspend = 0;
205 xhci->bus_state[i].suspended_ports = 0;
206 xhci->bus_state[i].resuming_ports = 0;
213 static int xhci_free_msi(struct xhci_hcd *xhci)
217 if (!xhci->msix_entries)
220 for (i = 0; i < xhci->msix_count; i++)
221 if (xhci->msix_entries[i].vector)
222 free_irq(xhci->msix_entries[i].vector,
230 static int xhci_setup_msi(struct xhci_hcd *xhci)
233 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
235 ret = pci_enable_msi(pdev);
237 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
238 "failed to allocate MSI entry");
242 ret = request_irq(pdev->irq, xhci_msi_irq,
243 0, "xhci_hcd", xhci_to_hcd(xhci));
245 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
246 "disable MSI interrupt");
247 pci_disable_msi(pdev);
255 * free all IRQs request
257 static void xhci_free_irq(struct xhci_hcd *xhci)
259 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
262 /* return if using legacy interrupt */
263 if (xhci_to_hcd(xhci)->irq > 0)
266 ret = xhci_free_msi(xhci);
270 free_irq(pdev->irq, xhci_to_hcd(xhci));
278 static int xhci_setup_msix(struct xhci_hcd *xhci)
281 struct usb_hcd *hcd = xhci_to_hcd(xhci);
282 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
285 * calculate number of msi-x vectors supported.
286 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
287 * with max number of interrupters based on the xhci HCSPARAMS1.
288 * - num_online_cpus: maximum msi-x vectors per CPUs core.
289 * Add additional 1 vector to ensure always available interrupt.
291 xhci->msix_count = min(num_online_cpus() + 1,
292 HCS_MAX_INTRS(xhci->hcs_params1));
295 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
297 if (!xhci->msix_entries) {
298 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
302 for (i = 0; i < xhci->msix_count; i++) {
303 xhci->msix_entries[i].entry = i;
304 xhci->msix_entries[i].vector = 0;
307 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
309 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
310 "Failed to enable MSI-X");
314 for (i = 0; i < xhci->msix_count; i++) {
315 ret = request_irq(xhci->msix_entries[i].vector,
317 0, "xhci_hcd", xhci_to_hcd(xhci));
322 hcd->msix_enabled = 1;
326 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
328 pci_disable_msix(pdev);
330 kfree(xhci->msix_entries);
331 xhci->msix_entries = NULL;
335 /* Free any IRQs and disable MSI-X */
336 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
338 struct usb_hcd *hcd = xhci_to_hcd(xhci);
339 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
341 if (xhci->quirks & XHCI_PLAT)
346 if (xhci->msix_entries) {
347 pci_disable_msix(pdev);
348 kfree(xhci->msix_entries);
349 xhci->msix_entries = NULL;
351 pci_disable_msi(pdev);
354 hcd->msix_enabled = 0;
358 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
362 if (xhci->msix_entries) {
363 for (i = 0; i < xhci->msix_count; i++)
364 synchronize_irq(xhci->msix_entries[i].vector);
368 static int xhci_try_enable_msi(struct usb_hcd *hcd)
370 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
371 struct pci_dev *pdev;
374 /* The xhci platform device has set up IRQs through usb_add_hcd. */
375 if (xhci->quirks & XHCI_PLAT)
378 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
380 * Some Fresco Logic host controllers advertise MSI, but fail to
381 * generate interrupts. Don't even try to enable MSI.
383 if (xhci->quirks & XHCI_BROKEN_MSI)
386 /* unregister the legacy interrupt */
388 free_irq(hcd->irq, hcd);
391 ret = xhci_setup_msix(xhci);
393 /* fall back to msi*/
394 ret = xhci_setup_msi(xhci);
397 /* hcd->irq is 0, we have MSI */
401 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
406 if (!strlen(hcd->irq_descr))
407 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
408 hcd->driver->description, hcd->self.busnum);
410 /* fall back to legacy interrupt*/
411 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
412 hcd->irq_descr, hcd);
414 xhci_err(xhci, "request interrupt %d failed\n",
418 hcd->irq = pdev->irq;
424 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
429 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
433 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
439 static void compliance_mode_recovery(unsigned long arg)
441 struct xhci_hcd *xhci;
446 xhci = (struct xhci_hcd *)arg;
448 for (i = 0; i < xhci->num_usb3_ports; i++) {
449 temp = readl(xhci->usb3_ports[i]);
450 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
452 * Compliance Mode Detected. Letting USB Core
453 * handle the Warm Reset
455 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
456 "Compliance mode detected->port %d",
458 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
459 "Attempting compliance mode recovery");
460 hcd = xhci->shared_hcd;
462 if (hcd->state == HC_STATE_SUSPENDED)
463 usb_hcd_resume_root_hub(hcd);
465 usb_hcd_poll_rh_status(hcd);
469 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
470 mod_timer(&xhci->comp_mode_recovery_timer,
471 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
475 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
476 * that causes ports behind that hardware to enter compliance mode sometimes.
477 * The quirk creates a timer that polls every 2 seconds the link state of
478 * each host controller's port and recovers it by issuing a Warm reset
479 * if Compliance mode is detected, otherwise the port will become "dead" (no
480 * device connections or disconnections will be detected anymore). Becasue no
481 * status event is generated when entering compliance mode (per xhci spec),
482 * this quirk is needed on systems that have the failing hardware installed.
484 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
486 xhci->port_status_u0 = 0;
487 setup_timer(&xhci->comp_mode_recovery_timer,
488 compliance_mode_recovery, (unsigned long)xhci);
489 xhci->comp_mode_recovery_timer.expires = jiffies +
490 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
492 set_timer_slack(&xhci->comp_mode_recovery_timer,
493 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
494 add_timer(&xhci->comp_mode_recovery_timer);
495 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
496 "Compliance mode recovery timer initialized");
500 * This function identifies the systems that have installed the SN65LVPE502CP
501 * USB3.0 re-driver and that need the Compliance Mode Quirk.
503 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
505 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
507 const char *dmi_product_name, *dmi_sys_vendor;
509 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
510 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
511 if (!dmi_product_name || !dmi_sys_vendor)
514 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
517 if (strstr(dmi_product_name, "Z420") ||
518 strstr(dmi_product_name, "Z620") ||
519 strstr(dmi_product_name, "Z820") ||
520 strstr(dmi_product_name, "Z1 Workstation"))
526 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
528 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
533 * Initialize memory for HCD and xHC (one-time init).
535 * Program the PAGESIZE register, initialize the device context array, create
536 * device contexts (?), set up a command ring segment (or two?), create event
537 * ring (one for now).
539 int xhci_init(struct usb_hcd *hcd)
541 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
545 spin_lock_init(&xhci->lock);
546 if (xhci->hci_version == 0x95 && link_quirk) {
547 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
548 "QUIRK: Not clearing Link TRB chain bits.");
549 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
551 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
552 "xHCI doesn't need link TRB QUIRK");
554 retval = xhci_mem_init(xhci, GFP_KERNEL);
555 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
557 /* Initializing Compliance Mode Recovery Data If Needed */
558 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
559 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
560 compliance_mode_recovery_timer_init(xhci);
566 /*-------------------------------------------------------------------------*/
569 static int xhci_run_finished(struct xhci_hcd *xhci)
571 if (xhci_start(xhci)) {
575 xhci->shared_hcd->state = HC_STATE_RUNNING;
576 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
578 if (xhci->quirks & XHCI_NEC_HOST)
579 xhci_ring_cmd_db(xhci);
581 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
582 "Finished xhci_run for USB3 roothub");
587 * Start the HC after it was halted.
589 * This function is called by the USB core when the HC driver is added.
590 * Its opposite is xhci_stop().
592 * xhci_init() must be called once before this function can be called.
593 * Reset the HC, enable device slot contexts, program DCBAAP, and
594 * set command ring pointer and event ring pointer.
596 * Setup MSI-X vectors and enable interrupts.
598 int xhci_run(struct usb_hcd *hcd)
603 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
605 /* Start the xHCI host controller running only after the USB 2.0 roothub
609 hcd->uses_new_polling = 1;
610 if (!usb_hcd_is_primary_hcd(hcd))
611 return xhci_run_finished(xhci);
613 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
615 ret = xhci_try_enable_msi(hcd);
619 xhci_dbg(xhci, "Command ring memory map follows:\n");
620 xhci_debug_ring(xhci, xhci->cmd_ring);
621 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
622 xhci_dbg_cmd_ptrs(xhci);
624 xhci_dbg(xhci, "ERST memory map follows:\n");
625 xhci_dbg_erst(xhci, &xhci->erst);
626 xhci_dbg(xhci, "Event ring:\n");
627 xhci_debug_ring(xhci, xhci->event_ring);
628 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
629 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
630 temp_64 &= ~ERST_PTR_MASK;
631 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
632 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
634 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
635 "// Set the interrupt modulation register");
636 temp = readl(&xhci->ir_set->irq_control);
637 temp &= ~ER_IRQ_INTERVAL_MASK;
639 writel(temp, &xhci->ir_set->irq_control);
641 /* Set the HCD state before we enable the irqs */
642 temp = readl(&xhci->op_regs->command);
644 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
645 "// Enable interrupts, cmd = 0x%x.", temp);
646 writel(temp, &xhci->op_regs->command);
648 temp = readl(&xhci->ir_set->irq_pending);
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
650 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
651 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
652 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
653 xhci_print_ir_set(xhci, 0);
655 if (xhci->quirks & XHCI_NEC_HOST) {
656 struct xhci_command *command;
657 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
660 xhci_queue_vendor_command(xhci, command, 0, 0, 0,
661 TRB_TYPE(TRB_NEC_GET_FW));
663 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
664 "Finished xhci_run for USB2 roothub");
667 EXPORT_SYMBOL_GPL(xhci_run);
672 * This function is called by the USB core when the HC driver is removed.
673 * Its opposite is xhci_run().
675 * Disable device contexts, disable IRQs, and quiesce the HC.
676 * Reset the HC, finish any completed transactions, and cleanup memory.
678 void xhci_stop(struct usb_hcd *hcd)
681 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
683 mutex_lock(&xhci->mutex);
685 if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
686 spin_lock_irq(&xhci->lock);
688 xhci->xhc_state |= XHCI_STATE_HALTED;
689 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
693 spin_unlock_irq(&xhci->lock);
696 if (!usb_hcd_is_primary_hcd(hcd)) {
697 mutex_unlock(&xhci->mutex);
701 xhci_cleanup_msix(xhci);
703 /* Deleting Compliance Mode Recovery Timer */
704 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
705 (!(xhci_all_ports_seen_u0(xhci)))) {
706 del_timer_sync(&xhci->comp_mode_recovery_timer);
707 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
708 "%s: compliance mode recovery timer deleted",
712 if (xhci->quirks & XHCI_AMD_PLL_FIX)
715 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
716 "// Disabling event ring interrupts");
717 temp = readl(&xhci->op_regs->status);
718 writel(temp & ~STS_EINT, &xhci->op_regs->status);
719 temp = readl(&xhci->ir_set->irq_pending);
720 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
721 xhci_print_ir_set(xhci, 0);
723 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
724 xhci_mem_cleanup(xhci);
725 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
726 "xhci_stop completed - status = %x",
727 readl(&xhci->op_regs->status));
728 mutex_unlock(&xhci->mutex);
732 * Shutdown HC (not bus-specific)
734 * This is called when the machine is rebooting or halting. We assume that the
735 * machine will be powered off, and the HC's internal state will be reset.
736 * Don't bother to free memory.
738 * This will only ever be called with the main usb_hcd (the USB3 roothub).
740 void xhci_shutdown(struct usb_hcd *hcd)
742 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
744 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
745 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
747 spin_lock_irq(&xhci->lock);
749 /* Workaround for spurious wakeups at shutdown with HSW */
750 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
752 spin_unlock_irq(&xhci->lock);
754 xhci_cleanup_msix(xhci);
756 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
757 "xhci_shutdown completed - status = %x",
758 readl(&xhci->op_regs->status));
760 /* Yet another workaround for spurious wakeups at shutdown with HSW */
761 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
762 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
766 static void xhci_save_registers(struct xhci_hcd *xhci)
768 xhci->s3.command = readl(&xhci->op_regs->command);
769 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
770 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
771 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
772 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
773 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
774 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
775 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
776 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
779 static void xhci_restore_registers(struct xhci_hcd *xhci)
781 writel(xhci->s3.command, &xhci->op_regs->command);
782 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
783 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
784 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
785 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
786 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
787 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
788 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
789 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
792 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
796 /* step 2: initialize command ring buffer */
797 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
798 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
799 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
800 xhci->cmd_ring->dequeue) &
801 (u64) ~CMD_RING_RSVD_BITS) |
802 xhci->cmd_ring->cycle_state;
803 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
804 "// Setting command ring address to 0x%llx",
805 (long unsigned long) val_64);
806 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
810 * The whole command ring must be cleared to zero when we suspend the host.
812 * The host doesn't save the command ring pointer in the suspend well, so we
813 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
814 * aligned, because of the reserved bits in the command ring dequeue pointer
815 * register. Therefore, we can't just set the dequeue pointer back in the
816 * middle of the ring (TRBs are 16-byte aligned).
818 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
820 struct xhci_ring *ring;
821 struct xhci_segment *seg;
823 ring = xhci->cmd_ring;
827 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
828 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
829 cpu_to_le32(~TRB_CYCLE);
831 } while (seg != ring->deq_seg);
833 /* Reset the software enqueue and dequeue pointers */
834 ring->deq_seg = ring->first_seg;
835 ring->dequeue = ring->first_seg->trbs;
836 ring->enq_seg = ring->deq_seg;
837 ring->enqueue = ring->dequeue;
839 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
841 * Ring is now zeroed, so the HW should look for change of ownership
842 * when the cycle bit is set to 1.
844 ring->cycle_state = 1;
847 * Reset the hardware dequeue pointer.
848 * Yes, this will need to be re-written after resume, but we're paranoid
849 * and want to make sure the hardware doesn't access bogus memory
850 * because, say, the BIOS or an SMI started the host without changing
851 * the command ring pointers.
853 xhci_set_cmd_ring_deq(xhci);
856 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
859 __le32 __iomem **port_array;
863 spin_lock_irqsave(&xhci->lock, flags);
865 /* disble usb3 ports Wake bits*/
866 port_index = xhci->num_usb3_ports;
867 port_array = xhci->usb3_ports;
868 while (port_index--) {
869 t1 = readl(port_array[port_index]);
870 t1 = xhci_port_state_to_neutral(t1);
871 t2 = t1 & ~PORT_WAKE_BITS;
873 writel(t2, port_array[port_index]);
876 /* disble usb2 ports Wake bits*/
877 port_index = xhci->num_usb2_ports;
878 port_array = xhci->usb2_ports;
879 while (port_index--) {
880 t1 = readl(port_array[port_index]);
881 t1 = xhci_port_state_to_neutral(t1);
882 t2 = t1 & ~PORT_WAKE_BITS;
884 writel(t2, port_array[port_index]);
887 spin_unlock_irqrestore(&xhci->lock, flags);
891 * Stop HC (not bus-specific)
893 * This is called when the machine transition into S3/S4 mode.
896 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
899 unsigned int delay = XHCI_MAX_HALT_USEC;
900 struct usb_hcd *hcd = xhci_to_hcd(xhci);
906 if (hcd->state != HC_STATE_SUSPENDED ||
907 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
910 /* Clear root port wake on bits if wakeup not allowed. */
912 xhci_disable_port_wake_on_bits(xhci);
914 /* Don't poll the roothubs on bus suspend. */
915 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
916 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
917 del_timer_sync(&hcd->rh_timer);
918 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
919 del_timer_sync(&xhci->shared_hcd->rh_timer);
921 spin_lock_irq(&xhci->lock);
922 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
923 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
924 /* step 1: stop endpoint */
925 /* skipped assuming that port suspend has done */
927 /* step 2: clear Run/Stop bit */
928 command = readl(&xhci->op_regs->command);
930 writel(command, &xhci->op_regs->command);
932 /* Some chips from Fresco Logic need an extraordinary delay */
933 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
935 if (xhci_handshake(&xhci->op_regs->status,
936 STS_HALT, STS_HALT, delay)) {
937 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
938 spin_unlock_irq(&xhci->lock);
941 xhci_clear_command_ring(xhci);
943 /* step 3: save registers */
944 xhci_save_registers(xhci);
946 /* step 4: set CSS flag */
947 command = readl(&xhci->op_regs->command);
949 writel(command, &xhci->op_regs->command);
950 if (xhci_handshake(&xhci->op_regs->status,
951 STS_SAVE, 0, 10 * 1000)) {
952 xhci_warn(xhci, "WARN: xHC save state timeout\n");
953 spin_unlock_irq(&xhci->lock);
956 spin_unlock_irq(&xhci->lock);
959 * Deleting Compliance Mode Recovery Timer because the xHCI Host
960 * is about to be suspended.
962 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
963 (!(xhci_all_ports_seen_u0(xhci)))) {
964 del_timer_sync(&xhci->comp_mode_recovery_timer);
965 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
966 "%s: compliance mode recovery timer deleted",
970 /* step 5: remove core well power */
971 /* synchronize irq when using MSI-X */
972 xhci_msix_sync_irqs(xhci);
976 EXPORT_SYMBOL_GPL(xhci_suspend);
979 * start xHC (not bus-specific)
981 * This is called when the machine transition from S3/S4 mode.
984 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
986 u32 command, temp = 0, status;
987 struct usb_hcd *hcd = xhci_to_hcd(xhci);
988 struct usb_hcd *secondary_hcd;
990 bool comp_timer_running = false;
995 /* Wait a bit if either of the roothubs need to settle from the
996 * transition into bus suspend.
998 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1000 xhci->bus_state[1].next_statechange))
1003 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1004 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1006 spin_lock_irq(&xhci->lock);
1007 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1011 /* step 1: restore register */
1012 xhci_restore_registers(xhci);
1013 /* step 2: initialize command ring buffer */
1014 xhci_set_cmd_ring_deq(xhci);
1015 /* step 3: restore state and start state*/
1016 /* step 3: set CRS flag */
1017 command = readl(&xhci->op_regs->command);
1019 writel(command, &xhci->op_regs->command);
1020 if (xhci_handshake(&xhci->op_regs->status,
1021 STS_RESTORE, 0, 10 * 1000)) {
1022 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1023 spin_unlock_irq(&xhci->lock);
1026 temp = readl(&xhci->op_regs->status);
1029 /* If restore operation fails, re-initialize the HC during resume */
1030 if ((temp & STS_SRE) || hibernated) {
1032 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1033 !(xhci_all_ports_seen_u0(xhci))) {
1034 del_timer_sync(&xhci->comp_mode_recovery_timer);
1035 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1036 "Compliance Mode Recovery Timer deleted!");
1039 /* Let the USB core know _both_ roothubs lost power. */
1040 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1041 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1043 xhci_dbg(xhci, "Stop HCD\n");
1046 spin_unlock_irq(&xhci->lock);
1047 xhci_cleanup_msix(xhci);
1049 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1050 temp = readl(&xhci->op_regs->status);
1051 writel(temp & ~STS_EINT, &xhci->op_regs->status);
1052 temp = readl(&xhci->ir_set->irq_pending);
1053 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1054 xhci_print_ir_set(xhci, 0);
1056 xhci_dbg(xhci, "cleaning up memory\n");
1057 xhci_mem_cleanup(xhci);
1058 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1059 readl(&xhci->op_regs->status));
1061 /* USB core calls the PCI reinit and start functions twice:
1062 * first with the primary HCD, and then with the secondary HCD.
1063 * If we don't do the same, the host will never be started.
1065 if (!usb_hcd_is_primary_hcd(hcd))
1066 secondary_hcd = hcd;
1068 secondary_hcd = xhci->shared_hcd;
1070 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1071 retval = xhci_init(hcd->primary_hcd);
1074 comp_timer_running = true;
1076 xhci_dbg(xhci, "Start the primary HCD\n");
1077 retval = xhci_run(hcd->primary_hcd);
1079 xhci_dbg(xhci, "Start the secondary HCD\n");
1080 retval = xhci_run(secondary_hcd);
1082 hcd->state = HC_STATE_SUSPENDED;
1083 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1087 /* step 4: set Run/Stop bit */
1088 command = readl(&xhci->op_regs->command);
1090 writel(command, &xhci->op_regs->command);
1091 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1094 /* step 5: walk topology and initialize portsc,
1095 * portpmsc and portli
1097 /* this is done in bus_resume */
1099 /* step 6: restart each of the previously
1100 * Running endpoints by ringing their doorbells
1103 spin_unlock_irq(&xhci->lock);
1107 /* Resume root hubs only when have pending events. */
1108 status = readl(&xhci->op_regs->status);
1109 if (status & STS_EINT) {
1110 usb_hcd_resume_root_hub(xhci->shared_hcd);
1111 usb_hcd_resume_root_hub(hcd);
1116 * If system is subject to the Quirk, Compliance Mode Timer needs to
1117 * be re-initialized Always after a system resume. Ports are subject
1118 * to suffer the Compliance Mode issue again. It doesn't matter if
1119 * ports have entered previously to U0 before system's suspension.
1121 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1122 compliance_mode_recovery_timer_init(xhci);
1124 /* Re-enable port polling. */
1125 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1126 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1127 usb_hcd_poll_rh_status(xhci->shared_hcd);
1128 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1129 usb_hcd_poll_rh_status(hcd);
1133 EXPORT_SYMBOL_GPL(xhci_resume);
1134 #endif /* CONFIG_PM */
1136 /*-------------------------------------------------------------------------*/
1139 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1140 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1141 * value to right shift 1 for the bitmask.
1143 * Index = (epnum * 2) + direction - 1,
1144 * where direction = 0 for OUT, 1 for IN.
1145 * For control endpoints, the IN index is used (OUT index is unused), so
1146 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1148 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1151 if (usb_endpoint_xfer_control(desc))
1152 index = (unsigned int) (usb_endpoint_num(desc)*2);
1154 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1155 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1159 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1160 * address from the XHCI endpoint index.
1162 unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1164 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1165 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1166 return direction | number;
1169 /* Find the flag for this endpoint (for use in the control context). Use the
1170 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1173 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1175 return 1 << (xhci_get_endpoint_index(desc) + 1);
1178 /* Find the flag for this endpoint (for use in the control context). Use the
1179 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1182 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1184 return 1 << (ep_index + 1);
1187 /* Compute the last valid endpoint context index. Basically, this is the
1188 * endpoint index plus one. For slot contexts with more than valid endpoint,
1189 * we find the most significant bit set in the added contexts flags.
1190 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1191 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1193 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1195 return fls(added_ctxs) - 1;
1198 /* Returns 1 if the arguments are OK;
1199 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1201 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1202 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1204 struct xhci_hcd *xhci;
1205 struct xhci_virt_device *virt_dev;
1207 if (!hcd || (check_ep && !ep) || !udev) {
1208 pr_debug("xHCI %s called with invalid args\n", func);
1211 if (!udev->parent) {
1212 pr_debug("xHCI %s called for root hub\n", func);
1216 xhci = hcd_to_xhci(hcd);
1217 if (check_virt_dev) {
1218 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1219 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1224 virt_dev = xhci->devs[udev->slot_id];
1225 if (virt_dev->udev != udev) {
1226 xhci_dbg(xhci, "xHCI %s called with udev and "
1227 "virt_dev does not match\n", func);
1232 if (xhci->xhc_state & XHCI_STATE_HALTED)
1238 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1239 struct usb_device *udev, struct xhci_command *command,
1240 bool ctx_change, bool must_succeed);
1243 * Full speed devices may have a max packet size greater than 8 bytes, but the
1244 * USB core doesn't know that until it reads the first 8 bytes of the
1245 * descriptor. If the usb_device's max packet size changes after that point,
1246 * we need to issue an evaluate context command and wait on it.
1248 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1249 unsigned int ep_index, struct urb *urb)
1251 struct xhci_container_ctx *out_ctx;
1252 struct xhci_input_control_ctx *ctrl_ctx;
1253 struct xhci_ep_ctx *ep_ctx;
1254 struct xhci_command *command;
1255 int max_packet_size;
1256 int hw_max_packet_size;
1259 out_ctx = xhci->devs[slot_id]->out_ctx;
1260 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1261 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1262 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1263 if (hw_max_packet_size != max_packet_size) {
1264 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1265 "Max Packet Size for ep 0 changed.");
1266 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1267 "Max packet size in usb_device = %d",
1269 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1270 "Max packet size in xHCI HW = %d",
1271 hw_max_packet_size);
1272 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1273 "Issuing evaluate context command.");
1275 /* Set up the input context flags for the command */
1276 /* FIXME: This won't work if a non-default control endpoint
1277 * changes max packet sizes.
1280 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1284 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1285 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1287 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1290 goto command_cleanup;
1292 /* Set up the modified control endpoint 0 */
1293 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1294 xhci->devs[slot_id]->out_ctx, ep_index);
1296 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1297 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1298 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1300 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1301 ctrl_ctx->drop_flags = 0;
1303 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1304 xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1305 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1306 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1308 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1311 /* Clean up the input context for later use by bandwidth
1314 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1316 kfree(command->completion);
1323 * non-error returns are a promise to giveback() the urb later
1324 * we drop ownership so next owner (or urb unlink) can get it
1326 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1328 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1329 struct xhci_td *buffer;
1330 unsigned long flags;
1332 unsigned int slot_id, ep_index;
1333 struct urb_priv *urb_priv;
1336 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1337 true, true, __func__) <= 0)
1340 slot_id = urb->dev->slot_id;
1341 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1343 if (!HCD_HW_ACCESSIBLE(hcd)) {
1344 if (!in_interrupt())
1345 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1350 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1351 size = urb->number_of_packets;
1352 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1353 urb->transfer_buffer_length > 0 &&
1354 urb->transfer_flags & URB_ZERO_PACKET &&
1355 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1360 urb_priv = kzalloc(sizeof(struct urb_priv) +
1361 size * sizeof(struct xhci_td *), mem_flags);
1365 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1371 for (i = 0; i < size; i++) {
1372 urb_priv->td[i] = buffer;
1376 urb_priv->length = size;
1377 urb_priv->td_cnt = 0;
1378 urb->hcpriv = urb_priv;
1380 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1381 /* Check to see if the max packet size for the default control
1382 * endpoint changed during FS device enumeration
1384 if (urb->dev->speed == USB_SPEED_FULL) {
1385 ret = xhci_check_maxpacket(xhci, slot_id,
1388 xhci_urb_free_priv(urb_priv);
1394 /* We have a spinlock and interrupts disabled, so we must pass
1395 * atomic context to this function, which may allocate memory.
1397 spin_lock_irqsave(&xhci->lock, flags);
1398 if (xhci->xhc_state & XHCI_STATE_DYING)
1400 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1404 spin_unlock_irqrestore(&xhci->lock, flags);
1405 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1406 spin_lock_irqsave(&xhci->lock, flags);
1407 if (xhci->xhc_state & XHCI_STATE_DYING)
1409 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1410 EP_GETTING_STREAMS) {
1411 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1412 "is transitioning to using streams.\n");
1414 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1415 EP_GETTING_NO_STREAMS) {
1416 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1417 "is transitioning to "
1418 "not having streams.\n");
1421 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1426 spin_unlock_irqrestore(&xhci->lock, flags);
1427 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1428 spin_lock_irqsave(&xhci->lock, flags);
1429 if (xhci->xhc_state & XHCI_STATE_DYING)
1431 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1435 spin_unlock_irqrestore(&xhci->lock, flags);
1437 spin_lock_irqsave(&xhci->lock, flags);
1438 if (xhci->xhc_state & XHCI_STATE_DYING)
1440 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1444 spin_unlock_irqrestore(&xhci->lock, flags);
1449 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1450 "non-responsive xHCI host.\n",
1451 urb->ep->desc.bEndpointAddress, urb);
1454 xhci_urb_free_priv(urb_priv);
1456 spin_unlock_irqrestore(&xhci->lock, flags);
1460 /* Get the right ring for the given URB.
1461 * If the endpoint supports streams, boundary check the URB's stream ID.
1462 * If the endpoint doesn't support streams, return the singular endpoint ring.
1464 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1467 unsigned int slot_id;
1468 unsigned int ep_index;
1469 unsigned int stream_id;
1470 struct xhci_virt_ep *ep;
1472 slot_id = urb->dev->slot_id;
1473 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1474 stream_id = urb->stream_id;
1475 ep = &xhci->devs[slot_id]->eps[ep_index];
1476 /* Common case: no streams */
1477 if (!(ep->ep_state & EP_HAS_STREAMS))
1480 if (stream_id == 0) {
1482 "WARN: Slot ID %u, ep index %u has streams, "
1483 "but URB has no stream ID.\n",
1488 if (stream_id < ep->stream_info->num_streams)
1489 return ep->stream_info->stream_rings[stream_id];
1492 "WARN: Slot ID %u, ep index %u has "
1493 "stream IDs 1 to %u allocated, "
1494 "but stream ID %u is requested.\n",
1496 ep->stream_info->num_streams - 1,
1502 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1503 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1504 * should pick up where it left off in the TD, unless a Set Transfer Ring
1505 * Dequeue Pointer is issued.
1507 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1508 * the ring. Since the ring is a contiguous structure, they can't be physically
1509 * removed. Instead, there are two options:
1511 * 1) If the HC is in the middle of processing the URB to be canceled, we
1512 * simply move the ring's dequeue pointer past those TRBs using the Set
1513 * Transfer Ring Dequeue Pointer command. This will be the common case,
1514 * when drivers timeout on the last submitted URB and attempt to cancel.
1516 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1517 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1518 * HC will need to invalidate the any TRBs it has cached after the stop
1519 * endpoint command, as noted in the xHCI 0.95 errata.
1521 * 3) The TD may have completed by the time the Stop Endpoint Command
1522 * completes, so software needs to handle that case too.
1524 * This function should protect against the TD enqueueing code ringing the
1525 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1526 * It also needs to account for multiple cancellations on happening at the same
1527 * time for the same endpoint.
1529 * Note that this function can be called in any context, or so says
1530 * usb_hcd_unlink_urb()
1532 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1534 unsigned long flags;
1537 struct xhci_hcd *xhci;
1538 struct urb_priv *urb_priv;
1540 unsigned int ep_index;
1541 struct xhci_ring *ep_ring;
1542 struct xhci_virt_ep *ep;
1543 struct xhci_command *command;
1545 xhci = hcd_to_xhci(hcd);
1546 spin_lock_irqsave(&xhci->lock, flags);
1547 /* Make sure the URB hasn't completed or been unlinked already */
1548 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1549 if (ret || !urb->hcpriv)
1551 temp = readl(&xhci->op_regs->status);
1552 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1553 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1554 "HW died, freeing TD.");
1555 urb_priv = urb->hcpriv;
1556 for (i = urb_priv->td_cnt;
1557 i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1559 td = urb_priv->td[i];
1560 if (!list_empty(&td->td_list))
1561 list_del_init(&td->td_list);
1562 if (!list_empty(&td->cancelled_td_list))
1563 list_del_init(&td->cancelled_td_list);
1566 usb_hcd_unlink_urb_from_ep(hcd, urb);
1567 spin_unlock_irqrestore(&xhci->lock, flags);
1568 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1569 xhci_urb_free_priv(urb_priv);
1573 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1574 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1575 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1581 urb_priv = urb->hcpriv;
1582 i = urb_priv->td_cnt;
1583 if (i < urb_priv->length)
1584 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1585 "Cancel URB %p, dev %s, ep 0x%x, "
1586 "starting at offset 0x%llx",
1587 urb, urb->dev->devpath,
1588 urb->ep->desc.bEndpointAddress,
1589 (unsigned long long) xhci_trb_virt_to_dma(
1590 urb_priv->td[i]->start_seg,
1591 urb_priv->td[i]->first_trb));
1593 for (; i < urb_priv->length; i++) {
1594 td = urb_priv->td[i];
1595 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1598 /* Queue a stop endpoint command, but only if this is
1599 * the first cancellation to be handled.
1601 if (!(ep->ep_state & EP_HALT_PENDING)) {
1602 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1607 ep->ep_state |= EP_HALT_PENDING;
1608 ep->stop_cmds_pending++;
1609 ep->stop_cmd_timer.expires = jiffies +
1610 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1611 add_timer(&ep->stop_cmd_timer);
1612 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1614 xhci_ring_cmd_db(xhci);
1617 spin_unlock_irqrestore(&xhci->lock, flags);
1621 /* Drop an endpoint from a new bandwidth configuration for this device.
1622 * Only one call to this function is allowed per endpoint before
1623 * check_bandwidth() or reset_bandwidth() must be called.
1624 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1625 * add the endpoint to the schedule with possibly new parameters denoted by a
1626 * different endpoint descriptor in usb_host_endpoint.
1627 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1630 * The USB core will not allow URBs to be queued to an endpoint that is being
1631 * disabled, so there's no need for mutual exclusion to protect
1632 * the xhci->devs[slot_id] structure.
1634 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1635 struct usb_host_endpoint *ep)
1637 struct xhci_hcd *xhci;
1638 struct xhci_container_ctx *in_ctx, *out_ctx;
1639 struct xhci_input_control_ctx *ctrl_ctx;
1640 unsigned int ep_index;
1641 struct xhci_ep_ctx *ep_ctx;
1643 u32 new_add_flags, new_drop_flags;
1646 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1649 xhci = hcd_to_xhci(hcd);
1650 if (xhci->xhc_state & XHCI_STATE_DYING)
1653 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1654 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1655 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1656 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1657 __func__, drop_flag);
1661 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1662 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1663 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1665 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1670 ep_index = xhci_get_endpoint_index(&ep->desc);
1671 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1672 /* If the HC already knows the endpoint is disabled,
1673 * or the HCD has noted it is disabled, ignore this request
1675 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1676 cpu_to_le32(EP_STATE_DISABLED)) ||
1677 le32_to_cpu(ctrl_ctx->drop_flags) &
1678 xhci_get_endpoint_flag(&ep->desc)) {
1679 /* Do not warn when called after a usb_device_reset */
1680 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1681 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1686 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1687 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1689 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1690 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1692 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1694 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1695 (unsigned int) ep->desc.bEndpointAddress,
1697 (unsigned int) new_drop_flags,
1698 (unsigned int) new_add_flags);
1702 /* Add an endpoint to a new possible bandwidth configuration for this device.
1703 * Only one call to this function is allowed per endpoint before
1704 * check_bandwidth() or reset_bandwidth() must be called.
1705 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1706 * add the endpoint to the schedule with possibly new parameters denoted by a
1707 * different endpoint descriptor in usb_host_endpoint.
1708 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1711 * The USB core will not allow URBs to be queued to an endpoint until the
1712 * configuration or alt setting is installed in the device, so there's no need
1713 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1715 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1716 struct usb_host_endpoint *ep)
1718 struct xhci_hcd *xhci;
1719 struct xhci_container_ctx *in_ctx;
1720 unsigned int ep_index;
1721 struct xhci_input_control_ctx *ctrl_ctx;
1723 u32 new_add_flags, new_drop_flags;
1724 struct xhci_virt_device *virt_dev;
1727 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1729 /* So we won't queue a reset ep command for a root hub */
1733 xhci = hcd_to_xhci(hcd);
1734 if (xhci->xhc_state & XHCI_STATE_DYING)
1737 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1738 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1739 /* FIXME when we have to issue an evaluate endpoint command to
1740 * deal with ep0 max packet size changing once we get the
1743 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1744 __func__, added_ctxs);
1748 virt_dev = xhci->devs[udev->slot_id];
1749 in_ctx = virt_dev->in_ctx;
1750 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1752 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1757 ep_index = xhci_get_endpoint_index(&ep->desc);
1758 /* If this endpoint is already in use, and the upper layers are trying
1759 * to add it again without dropping it, reject the addition.
1761 if (virt_dev->eps[ep_index].ring &&
1762 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1763 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1764 "without dropping it.\n",
1765 (unsigned int) ep->desc.bEndpointAddress);
1769 /* If the HCD has already noted the endpoint is enabled,
1770 * ignore this request.
1772 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1773 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1779 * Configuration and alternate setting changes must be done in
1780 * process context, not interrupt context (or so documenation
1781 * for usb_set_interface() and usb_set_configuration() claim).
1783 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1784 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1785 __func__, ep->desc.bEndpointAddress);
1789 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1790 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1792 /* If xhci_endpoint_disable() was called for this endpoint, but the
1793 * xHC hasn't been notified yet through the check_bandwidth() call,
1794 * this re-adds a new state for the endpoint from the new endpoint
1795 * descriptors. We must drop and re-add this endpoint, so we leave the
1798 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1800 /* Store the usb_device pointer for later use */
1803 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1804 (unsigned int) ep->desc.bEndpointAddress,
1806 (unsigned int) new_drop_flags,
1807 (unsigned int) new_add_flags);
1811 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1813 struct xhci_input_control_ctx *ctrl_ctx;
1814 struct xhci_ep_ctx *ep_ctx;
1815 struct xhci_slot_ctx *slot_ctx;
1818 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1820 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1825 /* When a device's add flag and drop flag are zero, any subsequent
1826 * configure endpoint command will leave that endpoint's state
1827 * untouched. Make sure we don't leave any old state in the input
1828 * endpoint contexts.
1830 ctrl_ctx->drop_flags = 0;
1831 ctrl_ctx->add_flags = 0;
1832 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1833 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1834 /* Endpoint 0 is always valid */
1835 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1836 for (i = 1; i < 31; ++i) {
1837 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1838 ep_ctx->ep_info = 0;
1839 ep_ctx->ep_info2 = 0;
1841 ep_ctx->tx_info = 0;
1845 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1846 struct usb_device *udev, u32 *cmd_status)
1850 switch (*cmd_status) {
1851 case COMP_CMD_ABORT:
1853 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1857 dev_warn(&udev->dev,
1858 "Not enough host controller resources for new device state.\n");
1860 /* FIXME: can we allocate more resources for the HC? */
1863 case COMP_2ND_BW_ERR:
1864 dev_warn(&udev->dev,
1865 "Not enough bandwidth for new device state.\n");
1867 /* FIXME: can we go back to the old state? */
1870 /* the HCD set up something wrong */
1871 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1873 "and endpoint is not disabled.\n");
1877 dev_warn(&udev->dev,
1878 "ERROR: Incompatible device for endpoint configure command.\n");
1882 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1883 "Successful Endpoint Configure command");
1887 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1895 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1896 struct usb_device *udev, u32 *cmd_status)
1899 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1901 switch (*cmd_status) {
1902 case COMP_CMD_ABORT:
1904 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1908 dev_warn(&udev->dev,
1909 "WARN: xHCI driver setup invalid evaluate context command.\n");
1913 dev_warn(&udev->dev,
1914 "WARN: slot not enabled for evaluate context command.\n");
1917 case COMP_CTX_STATE:
1918 dev_warn(&udev->dev,
1919 "WARN: invalid context state for evaluate context command.\n");
1920 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1924 dev_warn(&udev->dev,
1925 "ERROR: Incompatible device for evaluate context command.\n");
1929 /* Max Exit Latency too large error */
1930 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1934 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1935 "Successful evaluate context command");
1939 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1947 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1948 struct xhci_input_control_ctx *ctrl_ctx)
1950 u32 valid_add_flags;
1951 u32 valid_drop_flags;
1953 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1954 * (bit 1). The default control endpoint is added during the Address
1955 * Device command and is never removed until the slot is disabled.
1957 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1958 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1960 /* Use hweight32 to count the number of ones in the add flags, or
1961 * number of endpoints added. Don't count endpoints that are changed
1962 * (both added and dropped).
1964 return hweight32(valid_add_flags) -
1965 hweight32(valid_add_flags & valid_drop_flags);
1968 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1969 struct xhci_input_control_ctx *ctrl_ctx)
1971 u32 valid_add_flags;
1972 u32 valid_drop_flags;
1974 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1975 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1977 return hweight32(valid_drop_flags) -
1978 hweight32(valid_add_flags & valid_drop_flags);
1982 * We need to reserve the new number of endpoints before the configure endpoint
1983 * command completes. We can't subtract the dropped endpoints from the number
1984 * of active endpoints until the command completes because we can oversubscribe
1985 * the host in this case:
1987 * - the first configure endpoint command drops more endpoints than it adds
1988 * - a second configure endpoint command that adds more endpoints is queued
1989 * - the first configure endpoint command fails, so the config is unchanged
1990 * - the second command may succeed, even though there isn't enough resources
1992 * Must be called with xhci->lock held.
1994 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1995 struct xhci_input_control_ctx *ctrl_ctx)
1999 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2000 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2001 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2002 "Not enough ep ctxs: "
2003 "%u active, need to add %u, limit is %u.",
2004 xhci->num_active_eps, added_eps,
2005 xhci->limit_active_eps);
2008 xhci->num_active_eps += added_eps;
2009 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2010 "Adding %u ep ctxs, %u now active.", added_eps,
2011 xhci->num_active_eps);
2016 * The configure endpoint was failed by the xHC for some other reason, so we
2017 * need to revert the resources that failed configuration would have used.
2019 * Must be called with xhci->lock held.
2021 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2022 struct xhci_input_control_ctx *ctrl_ctx)
2026 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2027 xhci->num_active_eps -= num_failed_eps;
2028 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2029 "Removing %u failed ep ctxs, %u now active.",
2031 xhci->num_active_eps);
2035 * Now that the command has completed, clean up the active endpoint count by
2036 * subtracting out the endpoints that were dropped (but not changed).
2038 * Must be called with xhci->lock held.
2040 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2041 struct xhci_input_control_ctx *ctrl_ctx)
2043 u32 num_dropped_eps;
2045 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2046 xhci->num_active_eps -= num_dropped_eps;
2047 if (num_dropped_eps)
2048 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2049 "Removing %u dropped ep ctxs, %u now active.",
2051 xhci->num_active_eps);
2054 static unsigned int xhci_get_block_size(struct usb_device *udev)
2056 switch (udev->speed) {
2058 case USB_SPEED_FULL:
2060 case USB_SPEED_HIGH:
2062 case USB_SPEED_SUPER:
2063 case USB_SPEED_SUPER_PLUS:
2065 case USB_SPEED_UNKNOWN:
2066 case USB_SPEED_WIRELESS:
2068 /* Should never happen */
2074 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2076 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2078 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2083 /* If we are changing a LS/FS device under a HS hub,
2084 * make sure (if we are activating a new TT) that the HS bus has enough
2085 * bandwidth for this new TT.
2087 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2088 struct xhci_virt_device *virt_dev,
2091 struct xhci_interval_bw_table *bw_table;
2092 struct xhci_tt_bw_info *tt_info;
2094 /* Find the bandwidth table for the root port this TT is attached to. */
2095 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2096 tt_info = virt_dev->tt_info;
2097 /* If this TT already had active endpoints, the bandwidth for this TT
2098 * has already been added. Removing all periodic endpoints (and thus
2099 * making the TT enactive) will only decrease the bandwidth used.
2103 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2104 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2108 /* Not sure why we would have no new active endpoints...
2110 * Maybe because of an Evaluate Context change for a hub update or a
2111 * control endpoint 0 max packet size change?
2112 * FIXME: skip the bandwidth calculation in that case.
2117 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2118 struct xhci_virt_device *virt_dev)
2120 unsigned int bw_reserved;
2122 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2123 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2126 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2127 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2134 * This algorithm is a very conservative estimate of the worst-case scheduling
2135 * scenario for any one interval. The hardware dynamically schedules the
2136 * packets, so we can't tell which microframe could be the limiting factor in
2137 * the bandwidth scheduling. This only takes into account periodic endpoints.
2139 * Obviously, we can't solve an NP complete problem to find the minimum worst
2140 * case scenario. Instead, we come up with an estimate that is no less than
2141 * the worst case bandwidth used for any one microframe, but may be an
2144 * We walk the requirements for each endpoint by interval, starting with the
2145 * smallest interval, and place packets in the schedule where there is only one
2146 * possible way to schedule packets for that interval. In order to simplify
2147 * this algorithm, we record the largest max packet size for each interval, and
2148 * assume all packets will be that size.
2150 * For interval 0, we obviously must schedule all packets for each interval.
2151 * The bandwidth for interval 0 is just the amount of data to be transmitted
2152 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2153 * the number of packets).
2155 * For interval 1, we have two possible microframes to schedule those packets
2156 * in. For this algorithm, if we can schedule the same number of packets for
2157 * each possible scheduling opportunity (each microframe), we will do so. The
2158 * remaining number of packets will be saved to be transmitted in the gaps in
2159 * the next interval's scheduling sequence.
2161 * As we move those remaining packets to be scheduled with interval 2 packets,
2162 * we have to double the number of remaining packets to transmit. This is
2163 * because the intervals are actually powers of 2, and we would be transmitting
2164 * the previous interval's packets twice in this interval. We also have to be
2165 * sure that when we look at the largest max packet size for this interval, we
2166 * also look at the largest max packet size for the remaining packets and take
2167 * the greater of the two.
2169 * The algorithm continues to evenly distribute packets in each scheduling
2170 * opportunity, and push the remaining packets out, until we get to the last
2171 * interval. Then those packets and their associated overhead are just added
2172 * to the bandwidth used.
2174 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2175 struct xhci_virt_device *virt_dev,
2178 unsigned int bw_reserved;
2179 unsigned int max_bandwidth;
2180 unsigned int bw_used;
2181 unsigned int block_size;
2182 struct xhci_interval_bw_table *bw_table;
2183 unsigned int packet_size = 0;
2184 unsigned int overhead = 0;
2185 unsigned int packets_transmitted = 0;
2186 unsigned int packets_remaining = 0;
2189 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2190 return xhci_check_ss_bw(xhci, virt_dev);
2192 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2193 max_bandwidth = HS_BW_LIMIT;
2194 /* Convert percent of bus BW reserved to blocks reserved */
2195 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2197 max_bandwidth = FS_BW_LIMIT;
2198 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2201 bw_table = virt_dev->bw_table;
2202 /* We need to translate the max packet size and max ESIT payloads into
2203 * the units the hardware uses.
2205 block_size = xhci_get_block_size(virt_dev->udev);
2207 /* If we are manipulating a LS/FS device under a HS hub, double check
2208 * that the HS bus has enough bandwidth if we are activing a new TT.
2210 if (virt_dev->tt_info) {
2211 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2212 "Recalculating BW for rootport %u",
2213 virt_dev->real_port);
2214 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2215 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2216 "newly activated TT.\n");
2219 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2220 "Recalculating BW for TT slot %u port %u",
2221 virt_dev->tt_info->slot_id,
2222 virt_dev->tt_info->ttport);
2224 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2225 "Recalculating BW for rootport %u",
2226 virt_dev->real_port);
2229 /* Add in how much bandwidth will be used for interval zero, or the
2230 * rounded max ESIT payload + number of packets * largest overhead.
2232 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2233 bw_table->interval_bw[0].num_packets *
2234 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2236 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2237 unsigned int bw_added;
2238 unsigned int largest_mps;
2239 unsigned int interval_overhead;
2242 * How many packets could we transmit in this interval?
2243 * If packets didn't fit in the previous interval, we will need
2244 * to transmit that many packets twice within this interval.
2246 packets_remaining = 2 * packets_remaining +
2247 bw_table->interval_bw[i].num_packets;
2249 /* Find the largest max packet size of this or the previous
2252 if (list_empty(&bw_table->interval_bw[i].endpoints))
2255 struct xhci_virt_ep *virt_ep;
2256 struct list_head *ep_entry;
2258 ep_entry = bw_table->interval_bw[i].endpoints.next;
2259 virt_ep = list_entry(ep_entry,
2260 struct xhci_virt_ep, bw_endpoint_list);
2261 /* Convert to blocks, rounding up */
2262 largest_mps = DIV_ROUND_UP(
2263 virt_ep->bw_info.max_packet_size,
2266 if (largest_mps > packet_size)
2267 packet_size = largest_mps;
2269 /* Use the larger overhead of this or the previous interval. */
2270 interval_overhead = xhci_get_largest_overhead(
2271 &bw_table->interval_bw[i]);
2272 if (interval_overhead > overhead)
2273 overhead = interval_overhead;
2275 /* How many packets can we evenly distribute across
2276 * (1 << (i + 1)) possible scheduling opportunities?
2278 packets_transmitted = packets_remaining >> (i + 1);
2280 /* Add in the bandwidth used for those scheduled packets */
2281 bw_added = packets_transmitted * (overhead + packet_size);
2283 /* How many packets do we have remaining to transmit? */
2284 packets_remaining = packets_remaining % (1 << (i + 1));
2286 /* What largest max packet size should those packets have? */
2287 /* If we've transmitted all packets, don't carry over the
2288 * largest packet size.
2290 if (packets_remaining == 0) {
2293 } else if (packets_transmitted > 0) {
2294 /* Otherwise if we do have remaining packets, and we've
2295 * scheduled some packets in this interval, take the
2296 * largest max packet size from endpoints with this
2299 packet_size = largest_mps;
2300 overhead = interval_overhead;
2302 /* Otherwise carry over packet_size and overhead from the last
2303 * time we had a remainder.
2305 bw_used += bw_added;
2306 if (bw_used > max_bandwidth) {
2307 xhci_warn(xhci, "Not enough bandwidth. "
2308 "Proposed: %u, Max: %u\n",
2309 bw_used, max_bandwidth);
2314 * Ok, we know we have some packets left over after even-handedly
2315 * scheduling interval 15. We don't know which microframes they will
2316 * fit into, so we over-schedule and say they will be scheduled every
2319 if (packets_remaining > 0)
2320 bw_used += overhead + packet_size;
2322 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2323 unsigned int port_index = virt_dev->real_port - 1;
2325 /* OK, we're manipulating a HS device attached to a
2326 * root port bandwidth domain. Include the number of active TTs
2327 * in the bandwidth used.
2329 bw_used += TT_HS_OVERHEAD *
2330 xhci->rh_bw[port_index].num_active_tts;
2333 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2334 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2335 "Available: %u " "percent",
2336 bw_used, max_bandwidth, bw_reserved,
2337 (max_bandwidth - bw_used - bw_reserved) * 100 /
2340 bw_used += bw_reserved;
2341 if (bw_used > max_bandwidth) {
2342 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2343 bw_used, max_bandwidth);
2347 bw_table->bw_used = bw_used;
2351 static bool xhci_is_async_ep(unsigned int ep_type)
2353 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2354 ep_type != ISOC_IN_EP &&
2355 ep_type != INT_IN_EP);
2358 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2360 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2363 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2365 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2367 if (ep_bw->ep_interval == 0)
2368 return SS_OVERHEAD_BURST +
2369 (ep_bw->mult * ep_bw->num_packets *
2370 (SS_OVERHEAD + mps));
2371 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2372 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2373 1 << ep_bw->ep_interval);
2377 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2378 struct xhci_bw_info *ep_bw,
2379 struct xhci_interval_bw_table *bw_table,
2380 struct usb_device *udev,
2381 struct xhci_virt_ep *virt_ep,
2382 struct xhci_tt_bw_info *tt_info)
2384 struct xhci_interval_bw *interval_bw;
2385 int normalized_interval;
2387 if (xhci_is_async_ep(ep_bw->type))
2390 if (udev->speed >= USB_SPEED_SUPER) {
2391 if (xhci_is_sync_in_ep(ep_bw->type))
2392 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2393 xhci_get_ss_bw_consumed(ep_bw);
2395 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2396 xhci_get_ss_bw_consumed(ep_bw);
2400 /* SuperSpeed endpoints never get added to intervals in the table, so
2401 * this check is only valid for HS/FS/LS devices.
2403 if (list_empty(&virt_ep->bw_endpoint_list))
2405 /* For LS/FS devices, we need to translate the interval expressed in
2406 * microframes to frames.
2408 if (udev->speed == USB_SPEED_HIGH)
2409 normalized_interval = ep_bw->ep_interval;
2411 normalized_interval = ep_bw->ep_interval - 3;
2413 if (normalized_interval == 0)
2414 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2415 interval_bw = &bw_table->interval_bw[normalized_interval];
2416 interval_bw->num_packets -= ep_bw->num_packets;
2417 switch (udev->speed) {
2419 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2421 case USB_SPEED_FULL:
2422 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2424 case USB_SPEED_HIGH:
2425 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2427 case USB_SPEED_SUPER:
2428 case USB_SPEED_SUPER_PLUS:
2429 case USB_SPEED_UNKNOWN:
2430 case USB_SPEED_WIRELESS:
2431 /* Should never happen because only LS/FS/HS endpoints will get
2432 * added to the endpoint list.
2437 tt_info->active_eps -= 1;
2438 list_del_init(&virt_ep->bw_endpoint_list);
2441 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2442 struct xhci_bw_info *ep_bw,
2443 struct xhci_interval_bw_table *bw_table,
2444 struct usb_device *udev,
2445 struct xhci_virt_ep *virt_ep,
2446 struct xhci_tt_bw_info *tt_info)
2448 struct xhci_interval_bw *interval_bw;
2449 struct xhci_virt_ep *smaller_ep;
2450 int normalized_interval;
2452 if (xhci_is_async_ep(ep_bw->type))
2455 if (udev->speed == USB_SPEED_SUPER) {
2456 if (xhci_is_sync_in_ep(ep_bw->type))
2457 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2458 xhci_get_ss_bw_consumed(ep_bw);
2460 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2461 xhci_get_ss_bw_consumed(ep_bw);
2465 /* For LS/FS devices, we need to translate the interval expressed in
2466 * microframes to frames.
2468 if (udev->speed == USB_SPEED_HIGH)
2469 normalized_interval = ep_bw->ep_interval;
2471 normalized_interval = ep_bw->ep_interval - 3;
2473 if (normalized_interval == 0)
2474 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2475 interval_bw = &bw_table->interval_bw[normalized_interval];
2476 interval_bw->num_packets += ep_bw->num_packets;
2477 switch (udev->speed) {
2479 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2481 case USB_SPEED_FULL:
2482 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2484 case USB_SPEED_HIGH:
2485 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2487 case USB_SPEED_SUPER:
2488 case USB_SPEED_SUPER_PLUS:
2489 case USB_SPEED_UNKNOWN:
2490 case USB_SPEED_WIRELESS:
2491 /* Should never happen because only LS/FS/HS endpoints will get
2492 * added to the endpoint list.
2498 tt_info->active_eps += 1;
2499 /* Insert the endpoint into the list, largest max packet size first. */
2500 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2502 if (ep_bw->max_packet_size >=
2503 smaller_ep->bw_info.max_packet_size) {
2504 /* Add the new ep before the smaller endpoint */
2505 list_add_tail(&virt_ep->bw_endpoint_list,
2506 &smaller_ep->bw_endpoint_list);
2510 /* Add the new endpoint at the end of the list. */
2511 list_add_tail(&virt_ep->bw_endpoint_list,
2512 &interval_bw->endpoints);
2515 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2516 struct xhci_virt_device *virt_dev,
2519 struct xhci_root_port_bw_info *rh_bw_info;
2520 if (!virt_dev->tt_info)
2523 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2524 if (old_active_eps == 0 &&
2525 virt_dev->tt_info->active_eps != 0) {
2526 rh_bw_info->num_active_tts += 1;
2527 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2528 } else if (old_active_eps != 0 &&
2529 virt_dev->tt_info->active_eps == 0) {
2530 rh_bw_info->num_active_tts -= 1;
2531 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2535 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2536 struct xhci_virt_device *virt_dev,
2537 struct xhci_container_ctx *in_ctx)
2539 struct xhci_bw_info ep_bw_info[31];
2541 struct xhci_input_control_ctx *ctrl_ctx;
2542 int old_active_eps = 0;
2544 if (virt_dev->tt_info)
2545 old_active_eps = virt_dev->tt_info->active_eps;
2547 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2549 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2554 for (i = 0; i < 31; i++) {
2555 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2558 /* Make a copy of the BW info in case we need to revert this */
2559 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2560 sizeof(ep_bw_info[i]));
2561 /* Drop the endpoint from the interval table if the endpoint is
2562 * being dropped or changed.
2564 if (EP_IS_DROPPED(ctrl_ctx, i))
2565 xhci_drop_ep_from_interval_table(xhci,
2566 &virt_dev->eps[i].bw_info,
2572 /* Overwrite the information stored in the endpoints' bw_info */
2573 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2574 for (i = 0; i < 31; i++) {
2575 /* Add any changed or added endpoints to the interval table */
2576 if (EP_IS_ADDED(ctrl_ctx, i))
2577 xhci_add_ep_to_interval_table(xhci,
2578 &virt_dev->eps[i].bw_info,
2585 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2586 /* Ok, this fits in the bandwidth we have.
2587 * Update the number of active TTs.
2589 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2593 /* We don't have enough bandwidth for this, revert the stored info. */
2594 for (i = 0; i < 31; i++) {
2595 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2598 /* Drop the new copies of any added or changed endpoints from
2599 * the interval table.
2601 if (EP_IS_ADDED(ctrl_ctx, i)) {
2602 xhci_drop_ep_from_interval_table(xhci,
2603 &virt_dev->eps[i].bw_info,
2609 /* Revert the endpoint back to its old information */
2610 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2611 sizeof(ep_bw_info[i]));
2612 /* Add any changed or dropped endpoints back into the table */
2613 if (EP_IS_DROPPED(ctrl_ctx, i))
2614 xhci_add_ep_to_interval_table(xhci,
2615 &virt_dev->eps[i].bw_info,
2625 /* Issue a configure endpoint command or evaluate context command
2626 * and wait for it to finish.
2628 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2629 struct usb_device *udev,
2630 struct xhci_command *command,
2631 bool ctx_change, bool must_succeed)
2634 unsigned long flags;
2635 struct xhci_input_control_ctx *ctrl_ctx;
2636 struct xhci_virt_device *virt_dev;
2641 spin_lock_irqsave(&xhci->lock, flags);
2642 virt_dev = xhci->devs[udev->slot_id];
2644 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2646 spin_unlock_irqrestore(&xhci->lock, flags);
2647 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2652 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2653 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2654 spin_unlock_irqrestore(&xhci->lock, flags);
2655 xhci_warn(xhci, "Not enough host resources, "
2656 "active endpoint contexts = %u\n",
2657 xhci->num_active_eps);
2660 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2661 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2662 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2663 xhci_free_host_resources(xhci, ctrl_ctx);
2664 spin_unlock_irqrestore(&xhci->lock, flags);
2665 xhci_warn(xhci, "Not enough bandwidth\n");
2670 ret = xhci_queue_configure_endpoint(xhci, command,
2671 command->in_ctx->dma,
2672 udev->slot_id, must_succeed);
2674 ret = xhci_queue_evaluate_context(xhci, command,
2675 command->in_ctx->dma,
2676 udev->slot_id, must_succeed);
2678 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2679 xhci_free_host_resources(xhci, ctrl_ctx);
2680 spin_unlock_irqrestore(&xhci->lock, flags);
2681 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2682 "FIXME allocate a new ring segment");
2685 xhci_ring_cmd_db(xhci);
2686 spin_unlock_irqrestore(&xhci->lock, flags);
2688 /* Wait for the configure endpoint command to complete */
2689 wait_for_completion(command->completion);
2692 ret = xhci_configure_endpoint_result(xhci, udev,
2695 ret = xhci_evaluate_context_result(xhci, udev,
2698 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2699 spin_lock_irqsave(&xhci->lock, flags);
2700 /* If the command failed, remove the reserved resources.
2701 * Otherwise, clean up the estimate to include dropped eps.
2704 xhci_free_host_resources(xhci, ctrl_ctx);
2706 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2707 spin_unlock_irqrestore(&xhci->lock, flags);
2712 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2713 struct xhci_virt_device *vdev, int i)
2715 struct xhci_virt_ep *ep = &vdev->eps[i];
2717 if (ep->ep_state & EP_HAS_STREAMS) {
2718 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2719 xhci_get_endpoint_address(i));
2720 xhci_free_stream_info(xhci, ep->stream_info);
2721 ep->stream_info = NULL;
2722 ep->ep_state &= ~EP_HAS_STREAMS;
2726 /* Called after one or more calls to xhci_add_endpoint() or
2727 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2728 * to call xhci_reset_bandwidth().
2730 * Since we are in the middle of changing either configuration or
2731 * installing a new alt setting, the USB core won't allow URBs to be
2732 * enqueued for any endpoint on the old config or interface. Nothing
2733 * else should be touching the xhci->devs[slot_id] structure, so we
2734 * don't need to take the xhci->lock for manipulating that.
2736 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2740 struct xhci_hcd *xhci;
2741 struct xhci_virt_device *virt_dev;
2742 struct xhci_input_control_ctx *ctrl_ctx;
2743 struct xhci_slot_ctx *slot_ctx;
2744 struct xhci_command *command;
2746 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2749 xhci = hcd_to_xhci(hcd);
2750 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2751 (xhci->xhc_state & XHCI_STATE_REMOVING))
2754 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2755 virt_dev = xhci->devs[udev->slot_id];
2757 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2761 command->in_ctx = virt_dev->in_ctx;
2763 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2764 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2766 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2769 goto command_cleanup;
2771 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2772 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2773 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2775 /* Don't issue the command if there's no endpoints to update. */
2776 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2777 ctrl_ctx->drop_flags == 0) {
2779 goto command_cleanup;
2781 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2782 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2783 for (i = 31; i >= 1; i--) {
2784 __le32 le32 = cpu_to_le32(BIT(i));
2786 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2787 || (ctrl_ctx->add_flags & le32) || i == 1) {
2788 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2789 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2793 xhci_dbg(xhci, "New Input Control Context:\n");
2794 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2795 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2797 ret = xhci_configure_endpoint(xhci, udev, command,
2800 /* Callee should call reset_bandwidth() */
2801 goto command_cleanup;
2803 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2804 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2805 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2807 /* Free any rings that were dropped, but not changed. */
2808 for (i = 1; i < 31; ++i) {
2809 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2810 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2811 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2812 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2815 xhci_zero_in_ctx(xhci, virt_dev);
2817 * Install any rings for completely new endpoints or changed endpoints,
2818 * and free or cache any old rings from changed endpoints.
2820 for (i = 1; i < 31; ++i) {
2821 if (!virt_dev->eps[i].new_ring)
2823 /* Only cache or free the old ring if it exists.
2824 * It may not if this is the first add of an endpoint.
2826 if (virt_dev->eps[i].ring) {
2827 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2829 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2830 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2831 virt_dev->eps[i].new_ring = NULL;
2834 kfree(command->completion);
2840 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2842 struct xhci_hcd *xhci;
2843 struct xhci_virt_device *virt_dev;
2846 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2849 xhci = hcd_to_xhci(hcd);
2851 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2852 virt_dev = xhci->devs[udev->slot_id];
2853 /* Free any rings allocated for added endpoints */
2854 for (i = 0; i < 31; ++i) {
2855 if (virt_dev->eps[i].new_ring) {
2856 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2857 virt_dev->eps[i].new_ring = NULL;
2860 xhci_zero_in_ctx(xhci, virt_dev);
2863 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2864 struct xhci_container_ctx *in_ctx,
2865 struct xhci_container_ctx *out_ctx,
2866 struct xhci_input_control_ctx *ctrl_ctx,
2867 u32 add_flags, u32 drop_flags)
2869 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2870 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2871 xhci_slot_copy(xhci, in_ctx, out_ctx);
2872 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2874 xhci_dbg(xhci, "Input Context:\n");
2875 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2878 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2879 unsigned int slot_id, unsigned int ep_index,
2880 struct xhci_dequeue_state *deq_state)
2882 struct xhci_input_control_ctx *ctrl_ctx;
2883 struct xhci_container_ctx *in_ctx;
2884 struct xhci_ep_ctx *ep_ctx;
2888 in_ctx = xhci->devs[slot_id]->in_ctx;
2889 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2891 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2896 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2897 xhci->devs[slot_id]->out_ctx, ep_index);
2898 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2899 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2900 deq_state->new_deq_ptr);
2902 xhci_warn(xhci, "WARN Cannot submit config ep after "
2903 "reset ep command\n");
2904 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2905 deq_state->new_deq_seg,
2906 deq_state->new_deq_ptr);
2909 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2911 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2912 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2913 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2914 added_ctxs, added_ctxs);
2917 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2918 unsigned int ep_index, struct xhci_td *td)
2920 struct xhci_dequeue_state deq_state;
2921 struct xhci_virt_ep *ep;
2922 struct usb_device *udev = td->urb->dev;
2924 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2925 "Cleaning up stalled endpoint ring");
2926 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2927 /* We need to move the HW's dequeue pointer past this TD,
2928 * or it will attempt to resend it on the next doorbell ring.
2930 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2931 ep_index, ep->stopped_stream, td, &deq_state);
2933 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2936 /* HW with the reset endpoint quirk will use the saved dequeue state to
2937 * issue a configure endpoint command later.
2939 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2940 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2941 "Queueing new dequeue state");
2942 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2943 ep_index, ep->stopped_stream, &deq_state);
2945 /* Better hope no one uses the input context between now and the
2946 * reset endpoint completion!
2947 * XXX: No idea how this hardware will react when stream rings
2950 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2951 "Setting up input context for "
2952 "configure endpoint command");
2953 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2954 ep_index, &deq_state);
2958 /* Called when clearing halted device. The core should have sent the control
2959 * message to clear the device halt condition. The host side of the halt should
2960 * already be cleared with a reset endpoint command issued when the STALL tx
2961 * event was received.
2963 * Context: in_interrupt
2966 void xhci_endpoint_reset(struct usb_hcd *hcd,
2967 struct usb_host_endpoint *ep)
2969 struct xhci_hcd *xhci;
2971 xhci = hcd_to_xhci(hcd);
2974 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2975 * The Reset Endpoint Command may only be issued to endpoints in the
2976 * Halted state. If software wishes reset the Data Toggle or Sequence
2977 * Number of an endpoint that isn't in the Halted state, then software
2978 * may issue a Configure Endpoint Command with the Drop and Add bits set
2979 * for the target endpoint. that is in the Stopped state.
2982 /* For now just print debug to follow the situation */
2983 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2984 ep->desc.bEndpointAddress);
2987 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2988 struct usb_device *udev, struct usb_host_endpoint *ep,
2989 unsigned int slot_id)
2992 unsigned int ep_index;
2993 unsigned int ep_state;
2997 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3000 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3001 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3002 " descriptor for ep 0x%x does not support streams\n",
3003 ep->desc.bEndpointAddress);
3007 ep_index = xhci_get_endpoint_index(&ep->desc);
3008 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3009 if (ep_state & EP_HAS_STREAMS ||
3010 ep_state & EP_GETTING_STREAMS) {
3011 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3012 "already has streams set up.\n",
3013 ep->desc.bEndpointAddress);
3014 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3015 "dynamic stream context array reallocation.\n");
3018 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3019 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3020 "endpoint 0x%x; URBs are pending.\n",
3021 ep->desc.bEndpointAddress);
3027 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3028 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3030 unsigned int max_streams;
3032 /* The stream context array size must be a power of two */
3033 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3035 * Find out how many primary stream array entries the host controller
3036 * supports. Later we may use secondary stream arrays (similar to 2nd
3037 * level page entries), but that's an optional feature for xHCI host
3038 * controllers. xHCs must support at least 4 stream IDs.
3040 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3041 if (*num_stream_ctxs > max_streams) {
3042 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3044 *num_stream_ctxs = max_streams;
3045 *num_streams = max_streams;
3049 /* Returns an error code if one of the endpoint already has streams.
3050 * This does not change any data structures, it only checks and gathers
3053 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3054 struct usb_device *udev,
3055 struct usb_host_endpoint **eps, unsigned int num_eps,
3056 unsigned int *num_streams, u32 *changed_ep_bitmask)
3058 unsigned int max_streams;
3059 unsigned int endpoint_flag;
3063 for (i = 0; i < num_eps; i++) {
3064 ret = xhci_check_streams_endpoint(xhci, udev,
3065 eps[i], udev->slot_id);
3069 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3070 if (max_streams < (*num_streams - 1)) {
3071 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3072 eps[i]->desc.bEndpointAddress,
3074 *num_streams = max_streams+1;
3077 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3078 if (*changed_ep_bitmask & endpoint_flag)
3080 *changed_ep_bitmask |= endpoint_flag;
3085 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3086 struct usb_device *udev,
3087 struct usb_host_endpoint **eps, unsigned int num_eps)
3089 u32 changed_ep_bitmask = 0;
3090 unsigned int slot_id;
3091 unsigned int ep_index;
3092 unsigned int ep_state;
3095 slot_id = udev->slot_id;
3096 if (!xhci->devs[slot_id])
3099 for (i = 0; i < num_eps; i++) {
3100 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3101 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3102 /* Are streams already being freed for the endpoint? */
3103 if (ep_state & EP_GETTING_NO_STREAMS) {
3104 xhci_warn(xhci, "WARN Can't disable streams for "
3106 "streams are being disabled already\n",
3107 eps[i]->desc.bEndpointAddress);
3110 /* Are there actually any streams to free? */
3111 if (!(ep_state & EP_HAS_STREAMS) &&
3112 !(ep_state & EP_GETTING_STREAMS)) {
3113 xhci_warn(xhci, "WARN Can't disable streams for "
3115 "streams are already disabled!\n",
3116 eps[i]->desc.bEndpointAddress);
3117 xhci_warn(xhci, "WARN xhci_free_streams() called "
3118 "with non-streams endpoint\n");
3121 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3123 return changed_ep_bitmask;
3127 * The USB device drivers use this function (through the HCD interface in USB
3128 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3129 * coordinate mass storage command queueing across multiple endpoints (basically
3130 * a stream ID == a task ID).
3132 * Setting up streams involves allocating the same size stream context array
3133 * for each endpoint and issuing a configure endpoint command for all endpoints.
3135 * Don't allow the call to succeed if one endpoint only supports one stream
3136 * (which means it doesn't support streams at all).
3138 * Drivers may get less stream IDs than they asked for, if the host controller
3139 * hardware or endpoints claim they can't support the number of requested
3142 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3143 struct usb_host_endpoint **eps, unsigned int num_eps,
3144 unsigned int num_streams, gfp_t mem_flags)
3147 struct xhci_hcd *xhci;
3148 struct xhci_virt_device *vdev;
3149 struct xhci_command *config_cmd;
3150 struct xhci_input_control_ctx *ctrl_ctx;
3151 unsigned int ep_index;
3152 unsigned int num_stream_ctxs;
3153 unsigned long flags;
3154 u32 changed_ep_bitmask = 0;
3159 /* Add one to the number of streams requested to account for
3160 * stream 0 that is reserved for xHCI usage.
3163 xhci = hcd_to_xhci(hcd);
3164 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3167 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3168 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3169 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3170 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3174 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3176 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3179 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3181 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3183 xhci_free_command(xhci, config_cmd);
3187 /* Check to make sure all endpoints are not already configured for
3188 * streams. While we're at it, find the maximum number of streams that
3189 * all the endpoints will support and check for duplicate endpoints.
3191 spin_lock_irqsave(&xhci->lock, flags);
3192 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3193 num_eps, &num_streams, &changed_ep_bitmask);
3195 xhci_free_command(xhci, config_cmd);
3196 spin_unlock_irqrestore(&xhci->lock, flags);
3199 if (num_streams <= 1) {
3200 xhci_warn(xhci, "WARN: endpoints can't handle "
3201 "more than one stream.\n");
3202 xhci_free_command(xhci, config_cmd);
3203 spin_unlock_irqrestore(&xhci->lock, flags);
3206 vdev = xhci->devs[udev->slot_id];
3207 /* Mark each endpoint as being in transition, so
3208 * xhci_urb_enqueue() will reject all URBs.
3210 for (i = 0; i < num_eps; i++) {
3211 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3212 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3214 spin_unlock_irqrestore(&xhci->lock, flags);
3216 /* Setup internal data structures and allocate HW data structures for
3217 * streams (but don't install the HW structures in the input context
3218 * until we're sure all memory allocation succeeded).
3220 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3221 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3222 num_stream_ctxs, num_streams);
3224 for (i = 0; i < num_eps; i++) {
3225 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3226 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3228 num_streams, mem_flags);
3229 if (!vdev->eps[ep_index].stream_info)
3231 /* Set maxPstreams in endpoint context and update deq ptr to
3232 * point to stream context array. FIXME
3236 /* Set up the input context for a configure endpoint command. */
3237 for (i = 0; i < num_eps; i++) {
3238 struct xhci_ep_ctx *ep_ctx;
3240 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3241 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3243 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3244 vdev->out_ctx, ep_index);
3245 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3246 vdev->eps[ep_index].stream_info);
3248 /* Tell the HW to drop its old copy of the endpoint context info
3249 * and add the updated copy from the input context.
3251 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3252 vdev->out_ctx, ctrl_ctx,
3253 changed_ep_bitmask, changed_ep_bitmask);
3255 /* Issue and wait for the configure endpoint command */
3256 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3259 /* xHC rejected the configure endpoint command for some reason, so we
3260 * leave the old ring intact and free our internal streams data
3266 spin_lock_irqsave(&xhci->lock, flags);
3267 for (i = 0; i < num_eps; i++) {
3268 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3269 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3270 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3271 udev->slot_id, ep_index);
3272 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3274 xhci_free_command(xhci, config_cmd);
3275 spin_unlock_irqrestore(&xhci->lock, flags);
3277 /* Subtract 1 for stream 0, which drivers can't use */
3278 return num_streams - 1;
3281 /* If it didn't work, free the streams! */
3282 for (i = 0; i < num_eps; i++) {
3283 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3284 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3285 vdev->eps[ep_index].stream_info = NULL;
3286 /* FIXME Unset maxPstreams in endpoint context and
3287 * update deq ptr to point to normal string ring.
3289 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3290 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3291 xhci_endpoint_zero(xhci, vdev, eps[i]);
3293 xhci_free_command(xhci, config_cmd);
3297 /* Transition the endpoint from using streams to being a "normal" endpoint
3300 * Modify the endpoint context state, submit a configure endpoint command,
3301 * and free all endpoint rings for streams if that completes successfully.
3303 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3304 struct usb_host_endpoint **eps, unsigned int num_eps,
3308 struct xhci_hcd *xhci;
3309 struct xhci_virt_device *vdev;
3310 struct xhci_command *command;
3311 struct xhci_input_control_ctx *ctrl_ctx;
3312 unsigned int ep_index;
3313 unsigned long flags;
3314 u32 changed_ep_bitmask;
3316 xhci = hcd_to_xhci(hcd);
3317 vdev = xhci->devs[udev->slot_id];
3319 /* Set up a configure endpoint command to remove the streams rings */
3320 spin_lock_irqsave(&xhci->lock, flags);
3321 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3322 udev, eps, num_eps);
3323 if (changed_ep_bitmask == 0) {
3324 spin_unlock_irqrestore(&xhci->lock, flags);
3328 /* Use the xhci_command structure from the first endpoint. We may have
3329 * allocated too many, but the driver may call xhci_free_streams() for
3330 * each endpoint it grouped into one call to xhci_alloc_streams().
3332 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3333 command = vdev->eps[ep_index].stream_info->free_streams_command;
3334 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3336 spin_unlock_irqrestore(&xhci->lock, flags);
3337 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3342 for (i = 0; i < num_eps; i++) {
3343 struct xhci_ep_ctx *ep_ctx;
3345 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3346 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3347 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3348 EP_GETTING_NO_STREAMS;
3350 xhci_endpoint_copy(xhci, command->in_ctx,
3351 vdev->out_ctx, ep_index);
3352 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3353 &vdev->eps[ep_index]);
3355 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3356 vdev->out_ctx, ctrl_ctx,
3357 changed_ep_bitmask, changed_ep_bitmask);
3358 spin_unlock_irqrestore(&xhci->lock, flags);
3360 /* Issue and wait for the configure endpoint command,
3361 * which must succeed.
3363 ret = xhci_configure_endpoint(xhci, udev, command,
3366 /* xHC rejected the configure endpoint command for some reason, so we
3367 * leave the streams rings intact.
3372 spin_lock_irqsave(&xhci->lock, flags);
3373 for (i = 0; i < num_eps; i++) {
3374 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3375 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3376 vdev->eps[ep_index].stream_info = NULL;
3377 /* FIXME Unset maxPstreams in endpoint context and
3378 * update deq ptr to point to normal string ring.
3380 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3381 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3383 spin_unlock_irqrestore(&xhci->lock, flags);
3389 * Deletes endpoint resources for endpoints that were active before a Reset
3390 * Device command, or a Disable Slot command. The Reset Device command leaves
3391 * the control endpoint intact, whereas the Disable Slot command deletes it.
3393 * Must be called with xhci->lock held.
3395 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3396 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3399 unsigned int num_dropped_eps = 0;
3400 unsigned int drop_flags = 0;
3402 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3403 if (virt_dev->eps[i].ring) {
3404 drop_flags |= 1 << i;
3408 xhci->num_active_eps -= num_dropped_eps;
3409 if (num_dropped_eps)
3410 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3411 "Dropped %u ep ctxs, flags = 0x%x, "
3413 num_dropped_eps, drop_flags,
3414 xhci->num_active_eps);
3418 * This submits a Reset Device Command, which will set the device state to 0,
3419 * set the device address to 0, and disable all the endpoints except the default
3420 * control endpoint. The USB core should come back and call
3421 * xhci_address_device(), and then re-set up the configuration. If this is
3422 * called because of a usb_reset_and_verify_device(), then the old alternate
3423 * settings will be re-installed through the normal bandwidth allocation
3426 * Wait for the Reset Device command to finish. Remove all structures
3427 * associated with the endpoints that were disabled. Clear the input device
3428 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3430 * If the virt_dev to be reset does not exist or does not match the udev,
3431 * it means the device is lost, possibly due to the xHC restore error and
3432 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3433 * re-allocate the device.
3435 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3438 unsigned long flags;
3439 struct xhci_hcd *xhci;
3440 unsigned int slot_id;
3441 struct xhci_virt_device *virt_dev;
3442 struct xhci_command *reset_device_cmd;
3443 int last_freed_endpoint;
3444 struct xhci_slot_ctx *slot_ctx;
3445 int old_active_eps = 0;
3447 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3450 xhci = hcd_to_xhci(hcd);
3451 slot_id = udev->slot_id;
3452 virt_dev = xhci->devs[slot_id];
3454 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3455 "not exist. Re-allocate the device\n", slot_id);
3456 ret = xhci_alloc_dev(hcd, udev);
3463 if (virt_dev->tt_info)
3464 old_active_eps = virt_dev->tt_info->active_eps;
3466 if (virt_dev->udev != udev) {
3467 /* If the virt_dev and the udev does not match, this virt_dev
3468 * may belong to another udev.
3469 * Re-allocate the device.
3471 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3472 "not match the udev. Re-allocate the device\n",
3474 ret = xhci_alloc_dev(hcd, udev);
3481 /* If device is not setup, there is no point in resetting it */
3482 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3483 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3484 SLOT_STATE_DISABLED)
3487 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3488 /* Allocate the command structure that holds the struct completion.
3489 * Assume we're in process context, since the normal device reset
3490 * process has to wait for the device anyway. Storage devices are
3491 * reset as part of error handling, so use GFP_NOIO instead of
3494 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3495 if (!reset_device_cmd) {
3496 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3500 /* Attempt to submit the Reset Device command to the command ring */
3501 spin_lock_irqsave(&xhci->lock, flags);
3503 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3505 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3506 spin_unlock_irqrestore(&xhci->lock, flags);
3507 goto command_cleanup;
3509 xhci_ring_cmd_db(xhci);
3510 spin_unlock_irqrestore(&xhci->lock, flags);
3512 /* Wait for the Reset Device command to finish */
3513 wait_for_completion(reset_device_cmd->completion);
3515 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3516 * unless we tried to reset a slot ID that wasn't enabled,
3517 * or the device wasn't in the addressed or configured state.
3519 ret = reset_device_cmd->status;
3521 case COMP_CMD_ABORT:
3523 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3525 goto command_cleanup;
3526 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3527 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3528 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3530 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3531 xhci_dbg(xhci, "Not freeing device rings.\n");
3532 /* Don't treat this as an error. May change my mind later. */
3534 goto command_cleanup;
3536 xhci_dbg(xhci, "Successful reset device command.\n");
3539 if (xhci_is_vendor_info_code(xhci, ret))
3541 xhci_warn(xhci, "Unknown completion code %u for "
3542 "reset device command.\n", ret);
3544 goto command_cleanup;
3547 /* Free up host controller endpoint resources */
3548 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3549 spin_lock_irqsave(&xhci->lock, flags);
3550 /* Don't delete the default control endpoint resources */
3551 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3552 spin_unlock_irqrestore(&xhci->lock, flags);
3555 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3556 last_freed_endpoint = 1;
3557 for (i = 1; i < 31; ++i) {
3558 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3560 if (ep->ep_state & EP_HAS_STREAMS) {
3561 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3562 xhci_get_endpoint_address(i));
3563 xhci_free_stream_info(xhci, ep->stream_info);
3564 ep->stream_info = NULL;
3565 ep->ep_state &= ~EP_HAS_STREAMS;
3569 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3570 last_freed_endpoint = i;
3572 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3573 xhci_drop_ep_from_interval_table(xhci,
3574 &virt_dev->eps[i].bw_info,
3579 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3581 /* If necessary, update the number of active TTs on this root port */
3582 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3584 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3585 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3589 xhci_free_command(xhci, reset_device_cmd);
3594 * At this point, the struct usb_device is about to go away, the device has
3595 * disconnected, and all traffic has been stopped and the endpoints have been
3596 * disabled. Free any HC data structures associated with that device.
3598 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3600 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3601 struct xhci_virt_device *virt_dev;
3602 unsigned long flags;
3605 struct xhci_command *command;
3607 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3611 #ifndef CONFIG_USB_DEFAULT_PERSIST
3613 * We called pm_runtime_get_noresume when the device was attached.
3614 * Decrement the counter here to allow controller to runtime suspend
3615 * if no devices remain.
3617 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3618 pm_runtime_put_noidle(hcd->self.controller);
3621 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3622 /* If the host is halted due to driver unload, we still need to free the
3625 if (ret <= 0 && ret != -ENODEV) {
3630 virt_dev = xhci->devs[udev->slot_id];
3632 /* Stop any wayward timer functions (which may grab the lock) */
3633 for (i = 0; i < 31; ++i) {
3634 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3635 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3638 spin_lock_irqsave(&xhci->lock, flags);
3639 /* Don't disable the slot if the host controller is dead. */
3640 state = readl(&xhci->op_regs->status);
3641 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3642 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3643 xhci_free_virt_device(xhci, udev->slot_id);
3644 spin_unlock_irqrestore(&xhci->lock, flags);
3649 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3651 spin_unlock_irqrestore(&xhci->lock, flags);
3652 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3655 xhci_ring_cmd_db(xhci);
3656 spin_unlock_irqrestore(&xhci->lock, flags);
3659 * Event command completion handler will free any data structures
3660 * associated with the slot. XXX Can free sleep?
3665 * Checks if we have enough host controller resources for the default control
3668 * Must be called with xhci->lock held.
3670 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3672 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3673 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3674 "Not enough ep ctxs: "
3675 "%u active, need to add 1, limit is %u.",
3676 xhci->num_active_eps, xhci->limit_active_eps);
3679 xhci->num_active_eps += 1;
3680 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3681 "Adding 1 ep ctx, %u now active.",
3682 xhci->num_active_eps);
3688 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3689 * timed out, or allocating memory failed. Returns 1 on success.
3691 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3693 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3694 unsigned long flags;
3696 struct xhci_command *command;
3698 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3702 /* xhci->slot_id and xhci->addr_dev are not thread-safe */
3703 mutex_lock(&xhci->mutex);
3704 spin_lock_irqsave(&xhci->lock, flags);
3705 command->completion = &xhci->addr_dev;
3706 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3708 spin_unlock_irqrestore(&xhci->lock, flags);
3709 mutex_unlock(&xhci->mutex);
3710 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3714 xhci_ring_cmd_db(xhci);
3715 spin_unlock_irqrestore(&xhci->lock, flags);
3717 wait_for_completion(command->completion);
3718 slot_id = xhci->slot_id;
3719 mutex_unlock(&xhci->mutex);
3721 if (!slot_id || command->status != COMP_SUCCESS) {
3722 xhci_err(xhci, "Error while assigning device slot ID\n");
3723 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3725 readl(&xhci->cap_regs->hcs_params1)));
3730 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3731 spin_lock_irqsave(&xhci->lock, flags);
3732 ret = xhci_reserve_host_control_ep_resources(xhci);
3734 spin_unlock_irqrestore(&xhci->lock, flags);
3735 xhci_warn(xhci, "Not enough host resources, "
3736 "active endpoint contexts = %u\n",
3737 xhci->num_active_eps);
3740 spin_unlock_irqrestore(&xhci->lock, flags);
3742 /* Use GFP_NOIO, since this function can be called from
3743 * xhci_discover_or_reset_device(), which may be called as part of
3744 * mass storage driver error handling.
3746 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3747 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3750 udev->slot_id = slot_id;
3752 #ifndef CONFIG_USB_DEFAULT_PERSIST
3754 * If resetting upon resume, we can't put the controller into runtime
3755 * suspend if there is a device attached.
3757 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3758 pm_runtime_get_noresume(hcd->self.controller);
3763 /* Is this a LS or FS device under a HS hub? */
3764 /* Hub or peripherial? */
3768 /* Disable slot, if we can do it without mem alloc */
3769 spin_lock_irqsave(&xhci->lock, flags);
3770 command->completion = NULL;
3771 command->status = 0;
3772 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3774 xhci_ring_cmd_db(xhci);
3775 spin_unlock_irqrestore(&xhci->lock, flags);
3780 * Issue an Address Device command and optionally send a corresponding
3781 * SetAddress request to the device.
3783 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3784 enum xhci_setup_dev setup)
3786 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3787 unsigned long flags;
3788 struct xhci_virt_device *virt_dev;
3790 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3791 struct xhci_slot_ctx *slot_ctx;
3792 struct xhci_input_control_ctx *ctrl_ctx;
3794 struct xhci_command *command = NULL;
3796 mutex_lock(&xhci->mutex);
3798 if (xhci->xhc_state) { /* dying, removing or halted */
3803 if (!udev->slot_id) {
3804 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3805 "Bad Slot ID %d", udev->slot_id);
3810 virt_dev = xhci->devs[udev->slot_id];
3812 if (WARN_ON(!virt_dev)) {
3814 * In plug/unplug torture test with an NEC controller,
3815 * a zero-dereference was observed once due to virt_dev = 0.
3816 * Print useful debug rather than crash if it is observed again!
3818 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3824 if (setup == SETUP_CONTEXT_ONLY) {
3825 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3826 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3827 SLOT_STATE_DEFAULT) {
3828 xhci_dbg(xhci, "Slot already in default state\n");
3833 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3839 command->in_ctx = virt_dev->in_ctx;
3840 command->completion = &xhci->addr_dev;
3842 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3843 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3845 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3851 * If this is the first Set Address since device plug-in or
3852 * virt_device realloaction after a resume with an xHCI power loss,
3853 * then set up the slot context.
3855 if (!slot_ctx->dev_info)
3856 xhci_setup_addressable_virt_dev(xhci, udev);
3857 /* Otherwise, update the control endpoint ring enqueue pointer. */
3859 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3860 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3861 ctrl_ctx->drop_flags = 0;
3863 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3864 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3865 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3866 le32_to_cpu(slot_ctx->dev_info) >> 27);
3868 spin_lock_irqsave(&xhci->lock, flags);
3869 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3870 udev->slot_id, setup);
3872 spin_unlock_irqrestore(&xhci->lock, flags);
3873 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3874 "FIXME: allocate a command ring segment");
3877 xhci_ring_cmd_db(xhci);
3878 spin_unlock_irqrestore(&xhci->lock, flags);
3880 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3881 wait_for_completion(command->completion);
3883 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3884 * the SetAddress() "recovery interval" required by USB and aborting the
3885 * command on a timeout.
3887 switch (command->status) {
3888 case COMP_CMD_ABORT:
3890 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3893 case COMP_CTX_STATE:
3895 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3896 act, udev->slot_id);
3900 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3904 dev_warn(&udev->dev,
3905 "ERROR: Incompatible device for setup %s command\n", act);
3909 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3910 "Successful setup %s command", act);
3914 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3915 act, command->status);
3916 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3917 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3918 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3924 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3925 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3926 "Op regs DCBAA ptr = %#016llx", temp_64);
3927 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3928 "Slot ID %d dcbaa entry @%p = %#016llx",
3930 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3931 (unsigned long long)
3932 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3933 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3934 "Output Context DMA address = %#08llx",
3935 (unsigned long long)virt_dev->out_ctx->dma);
3936 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3937 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3938 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3939 le32_to_cpu(slot_ctx->dev_info) >> 27);
3940 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3941 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3943 * USB core uses address 1 for the roothubs, so we add one to the
3944 * address given back to us by the HC.
3946 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3947 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3948 le32_to_cpu(slot_ctx->dev_info) >> 27);
3949 /* Zero the input context control for later use */
3950 ctrl_ctx->add_flags = 0;
3951 ctrl_ctx->drop_flags = 0;
3953 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3954 "Internal device address = %d",
3955 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3957 mutex_unlock(&xhci->mutex);
3962 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3964 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3967 int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3969 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3973 * Transfer the port index into real index in the HW port status
3974 * registers. Caculate offset between the port's PORTSC register
3975 * and port status base. Divide the number of per port register
3976 * to get the real index. The raw port number bases 1.
3978 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3980 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3981 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3982 __le32 __iomem *addr;
3985 if (hcd->speed < HCD_USB3)
3986 addr = xhci->usb2_ports[port1 - 1];
3988 addr = xhci->usb3_ports[port1 - 1];
3990 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3995 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3996 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3998 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3999 struct usb_device *udev, u16 max_exit_latency)
4001 struct xhci_virt_device *virt_dev;
4002 struct xhci_command *command;
4003 struct xhci_input_control_ctx *ctrl_ctx;
4004 struct xhci_slot_ctx *slot_ctx;
4005 unsigned long flags;
4008 spin_lock_irqsave(&xhci->lock, flags);
4010 virt_dev = xhci->devs[udev->slot_id];
4013 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4014 * xHC was re-initialized. Exit latency will be set later after
4015 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4018 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4019 spin_unlock_irqrestore(&xhci->lock, flags);
4023 /* Attempt to issue an Evaluate Context command to change the MEL. */
4024 command = xhci->lpm_command;
4025 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4027 spin_unlock_irqrestore(&xhci->lock, flags);
4028 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4033 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4034 spin_unlock_irqrestore(&xhci->lock, flags);
4036 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4037 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4038 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4039 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4040 slot_ctx->dev_state = 0;
4042 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4043 "Set up evaluate context for LPM MEL change.");
4044 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4045 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4047 /* Issue and wait for the evaluate context command. */
4048 ret = xhci_configure_endpoint(xhci, udev, command,
4050 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4051 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4054 spin_lock_irqsave(&xhci->lock, flags);
4055 virt_dev->current_mel = max_exit_latency;
4056 spin_unlock_irqrestore(&xhci->lock, flags);
4063 /* BESL to HIRD Encoding array for USB2 LPM */
4064 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4065 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4067 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
4068 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4069 struct usb_device *udev)
4071 int u2del, besl, besl_host;
4072 int besl_device = 0;
4075 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4076 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4078 if (field & USB_BESL_SUPPORT) {
4079 for (besl_host = 0; besl_host < 16; besl_host++) {
4080 if (xhci_besl_encoding[besl_host] >= u2del)
4083 /* Use baseline BESL value as default */
4084 if (field & USB_BESL_BASELINE_VALID)
4085 besl_device = USB_GET_BESL_BASELINE(field);
4086 else if (field & USB_BESL_DEEP_VALID)
4087 besl_device = USB_GET_BESL_DEEP(field);
4092 besl_host = (u2del - 51) / 75 + 1;
4095 besl = besl_host + besl_device;
4102 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4103 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4110 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4112 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4113 l1 = udev->l1_params.timeout / 256;
4115 /* device has preferred BESLD */
4116 if (field & USB_BESL_DEEP_VALID) {
4117 besld = USB_GET_BESL_DEEP(field);
4121 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4124 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4125 struct usb_device *udev, int enable)
4127 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4128 __le32 __iomem **port_array;
4129 __le32 __iomem *pm_addr, *hlpm_addr;
4130 u32 pm_val, hlpm_val, field;
4131 unsigned int port_num;
4132 unsigned long flags;
4133 int hird, exit_latency;
4136 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4140 if (!udev->parent || udev->parent->parent ||
4141 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4144 if (udev->usb2_hw_lpm_capable != 1)
4147 spin_lock_irqsave(&xhci->lock, flags);
4149 port_array = xhci->usb2_ports;
4150 port_num = udev->portnum - 1;
4151 pm_addr = port_array[port_num] + PORTPMSC;
4152 pm_val = readl(pm_addr);
4153 hlpm_addr = port_array[port_num] + PORTHLPMC;
4154 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4156 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4157 enable ? "enable" : "disable", port_num + 1);
4160 /* Host supports BESL timeout instead of HIRD */
4161 if (udev->usb2_hw_lpm_besl_capable) {
4162 /* if device doesn't have a preferred BESL value use a
4163 * default one which works with mixed HIRD and BESL
4164 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4166 if ((field & USB_BESL_SUPPORT) &&
4167 (field & USB_BESL_BASELINE_VALID))
4168 hird = USB_GET_BESL_BASELINE(field);
4170 hird = udev->l1_params.besl;
4172 exit_latency = xhci_besl_encoding[hird];
4173 spin_unlock_irqrestore(&xhci->lock, flags);
4175 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4176 * input context for link powermanagement evaluate
4177 * context commands. It is protected by hcd->bandwidth
4178 * mutex and is shared by all devices. We need to set
4179 * the max ext latency in USB 2 BESL LPM as well, so
4180 * use the same mutex and xhci_change_max_exit_latency()
4182 mutex_lock(hcd->bandwidth_mutex);
4183 ret = xhci_change_max_exit_latency(xhci, udev,
4185 mutex_unlock(hcd->bandwidth_mutex);
4189 spin_lock_irqsave(&xhci->lock, flags);
4191 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4192 writel(hlpm_val, hlpm_addr);
4196 hird = xhci_calculate_hird_besl(xhci, udev);
4199 pm_val &= ~PORT_HIRD_MASK;
4200 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4201 writel(pm_val, pm_addr);
4202 pm_val = readl(pm_addr);
4204 writel(pm_val, pm_addr);
4208 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4209 writel(pm_val, pm_addr);
4212 if (udev->usb2_hw_lpm_besl_capable) {
4213 spin_unlock_irqrestore(&xhci->lock, flags);
4214 mutex_lock(hcd->bandwidth_mutex);
4215 xhci_change_max_exit_latency(xhci, udev, 0);
4216 mutex_unlock(hcd->bandwidth_mutex);
4221 spin_unlock_irqrestore(&xhci->lock, flags);
4225 /* check if a usb2 port supports a given extened capability protocol
4226 * only USB2 ports extended protocol capability values are cached.
4227 * Return 1 if capability is supported
4229 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4230 unsigned capability)
4232 u32 port_offset, port_count;
4235 for (i = 0; i < xhci->num_ext_caps; i++) {
4236 if (xhci->ext_caps[i] & capability) {
4237 /* port offsets starts at 1 */
4238 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4239 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4240 if (port >= port_offset &&
4241 port < port_offset + port_count)
4248 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4250 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4251 int portnum = udev->portnum - 1;
4253 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4257 /* we only support lpm for non-hub device connected to root hub yet */
4258 if (!udev->parent || udev->parent->parent ||
4259 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4262 if (xhci->hw_lpm_support == 1 &&
4263 xhci_check_usb2_port_capability(
4264 xhci, portnum, XHCI_HLC)) {
4265 udev->usb2_hw_lpm_capable = 1;
4266 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4267 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4268 if (xhci_check_usb2_port_capability(xhci, portnum,
4270 udev->usb2_hw_lpm_besl_capable = 1;
4276 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4278 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4279 static unsigned long long xhci_service_interval_to_ns(
4280 struct usb_endpoint_descriptor *desc)
4282 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4285 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4286 enum usb3_link_state state)
4288 unsigned long long sel;
4289 unsigned long long pel;
4290 unsigned int max_sel_pel;
4295 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4296 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4297 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4298 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4302 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4303 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4304 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4308 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4310 return USB3_LPM_DISABLED;
4313 if (sel <= max_sel_pel && pel <= max_sel_pel)
4314 return USB3_LPM_DEVICE_INITIATED;
4316 if (sel > max_sel_pel)
4317 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4318 "due to long SEL %llu ms\n",
4321 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4322 "due to long PEL %llu ms\n",
4324 return USB3_LPM_DISABLED;
4327 /* The U1 timeout should be the maximum of the following values:
4328 * - For control endpoints, U1 system exit latency (SEL) * 3
4329 * - For bulk endpoints, U1 SEL * 5
4330 * - For interrupt endpoints:
4331 * - Notification EPs, U1 SEL * 3
4332 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4333 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4335 static unsigned long long xhci_calculate_intel_u1_timeout(
4336 struct usb_device *udev,
4337 struct usb_endpoint_descriptor *desc)
4339 unsigned long long timeout_ns;
4343 ep_type = usb_endpoint_type(desc);
4345 case USB_ENDPOINT_XFER_CONTROL:
4346 timeout_ns = udev->u1_params.sel * 3;
4348 case USB_ENDPOINT_XFER_BULK:
4349 timeout_ns = udev->u1_params.sel * 5;
4351 case USB_ENDPOINT_XFER_INT:
4352 intr_type = usb_endpoint_interrupt_type(desc);
4353 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4354 timeout_ns = udev->u1_params.sel * 3;
4357 /* Otherwise the calculation is the same as isoc eps */
4358 case USB_ENDPOINT_XFER_ISOC:
4359 timeout_ns = xhci_service_interval_to_ns(desc);
4360 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4361 if (timeout_ns < udev->u1_params.sel * 2)
4362 timeout_ns = udev->u1_params.sel * 2;
4371 /* Returns the hub-encoded U1 timeout value. */
4372 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4373 struct usb_device *udev,
4374 struct usb_endpoint_descriptor *desc)
4376 unsigned long long timeout_ns;
4378 if (xhci->quirks & XHCI_INTEL_HOST)
4379 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4381 timeout_ns = udev->u1_params.sel;
4383 /* The U1 timeout is encoded in 1us intervals.
4384 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4386 if (timeout_ns == USB3_LPM_DISABLED)
4389 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4391 /* If the necessary timeout value is bigger than what we can set in the
4392 * USB 3.0 hub, we have to disable hub-initiated U1.
4394 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4396 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4397 "due to long timeout %llu ms\n", timeout_ns);
4398 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4401 /* The U2 timeout should be the maximum of:
4402 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4403 * - largest bInterval of any active periodic endpoint (to avoid going
4404 * into lower power link states between intervals).
4405 * - the U2 Exit Latency of the device
4407 static unsigned long long xhci_calculate_intel_u2_timeout(
4408 struct usb_device *udev,
4409 struct usb_endpoint_descriptor *desc)
4411 unsigned long long timeout_ns;
4412 unsigned long long u2_del_ns;
4414 timeout_ns = 10 * 1000 * 1000;
4416 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4417 (xhci_service_interval_to_ns(desc) > timeout_ns))
4418 timeout_ns = xhci_service_interval_to_ns(desc);
4420 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4421 if (u2_del_ns > timeout_ns)
4422 timeout_ns = u2_del_ns;
4427 /* Returns the hub-encoded U2 timeout value. */
4428 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4429 struct usb_device *udev,
4430 struct usb_endpoint_descriptor *desc)
4432 unsigned long long timeout_ns;
4434 if (xhci->quirks & XHCI_INTEL_HOST)
4435 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4437 timeout_ns = udev->u2_params.sel;
4439 /* The U2 timeout is encoded in 256us intervals */
4440 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4441 /* If the necessary timeout value is bigger than what we can set in the
4442 * USB 3.0 hub, we have to disable hub-initiated U2.
4444 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4446 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4447 "due to long timeout %llu ms\n", timeout_ns);
4448 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4451 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4452 struct usb_device *udev,
4453 struct usb_endpoint_descriptor *desc,
4454 enum usb3_link_state state,
4457 if (state == USB3_LPM_U1)
4458 return xhci_calculate_u1_timeout(xhci, udev, desc);
4459 else if (state == USB3_LPM_U2)
4460 return xhci_calculate_u2_timeout(xhci, udev, desc);
4462 return USB3_LPM_DISABLED;
4465 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4466 struct usb_device *udev,
4467 struct usb_endpoint_descriptor *desc,
4468 enum usb3_link_state state,
4473 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4474 desc, state, timeout);
4476 /* If we found we can't enable hub-initiated LPM, or
4477 * the U1 or U2 exit latency was too high to allow
4478 * device-initiated LPM as well, just stop searching.
4480 if (alt_timeout == USB3_LPM_DISABLED ||
4481 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4482 *timeout = alt_timeout;
4485 if (alt_timeout > *timeout)
4486 *timeout = alt_timeout;
4490 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4491 struct usb_device *udev,
4492 struct usb_host_interface *alt,
4493 enum usb3_link_state state,
4498 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4499 if (xhci_update_timeout_for_endpoint(xhci, udev,
4500 &alt->endpoint[j].desc, state, timeout))
4507 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4508 enum usb3_link_state state)
4510 struct usb_device *parent;
4511 unsigned int num_hubs;
4513 if (state == USB3_LPM_U2)
4516 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4517 for (parent = udev->parent, num_hubs = 0; parent->parent;
4518 parent = parent->parent)
4524 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4525 " below second-tier hub.\n");
4526 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4527 "to decrease power consumption.\n");
4531 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4532 struct usb_device *udev,
4533 enum usb3_link_state state)
4535 if (xhci->quirks & XHCI_INTEL_HOST)
4536 return xhci_check_intel_tier_policy(udev, state);
4541 /* Returns the U1 or U2 timeout that should be enabled.
4542 * If the tier check or timeout setting functions return with a non-zero exit
4543 * code, that means the timeout value has been finalized and we shouldn't look
4544 * at any more endpoints.
4546 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4547 struct usb_device *udev, enum usb3_link_state state)
4549 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4550 struct usb_host_config *config;
4553 u16 timeout = USB3_LPM_DISABLED;
4555 if (state == USB3_LPM_U1)
4557 else if (state == USB3_LPM_U2)
4560 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4565 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4568 /* Gather some information about the currently installed configuration
4569 * and alternate interface settings.
4571 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4575 config = udev->actconfig;
4579 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4580 struct usb_driver *driver;
4581 struct usb_interface *intf = config->interface[i];
4586 /* Check if any currently bound drivers want hub-initiated LPM
4589 if (intf->dev.driver) {
4590 driver = to_usb_driver(intf->dev.driver);
4591 if (driver && driver->disable_hub_initiated_lpm) {
4592 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4593 "at request of driver %s\n",
4594 state_name, driver->name);
4595 return xhci_get_timeout_no_hub_lpm(udev, state);
4599 /* Not sure how this could happen... */
4600 if (!intf->cur_altsetting)
4603 if (xhci_update_timeout_for_interface(xhci, udev,
4604 intf->cur_altsetting,
4611 static int calculate_max_exit_latency(struct usb_device *udev,
4612 enum usb3_link_state state_changed,
4613 u16 hub_encoded_timeout)
4615 unsigned long long u1_mel_us = 0;
4616 unsigned long long u2_mel_us = 0;
4617 unsigned long long mel_us = 0;
4623 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4624 hub_encoded_timeout == USB3_LPM_DISABLED);
4625 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4626 hub_encoded_timeout == USB3_LPM_DISABLED);
4628 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4629 hub_encoded_timeout != USB3_LPM_DISABLED);
4630 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4631 hub_encoded_timeout != USB3_LPM_DISABLED);
4633 /* If U1 was already enabled and we're not disabling it,
4634 * or we're going to enable U1, account for the U1 max exit latency.
4636 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4638 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4639 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4641 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4643 if (u1_mel_us > u2_mel_us)
4647 /* xHCI host controller max exit latency field is only 16 bits wide. */
4648 if (mel_us > MAX_EXIT) {
4649 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4650 "is too big.\n", mel_us);
4656 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4657 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4658 struct usb_device *udev, enum usb3_link_state state)
4660 struct xhci_hcd *xhci;
4661 u16 hub_encoded_timeout;
4665 xhci = hcd_to_xhci(hcd);
4666 /* The LPM timeout values are pretty host-controller specific, so don't
4667 * enable hub-initiated timeouts unless the vendor has provided
4668 * information about their timeout algorithm.
4670 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4671 !xhci->devs[udev->slot_id])
4672 return USB3_LPM_DISABLED;
4674 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4675 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4677 /* Max Exit Latency is too big, disable LPM. */
4678 hub_encoded_timeout = USB3_LPM_DISABLED;
4682 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4685 return hub_encoded_timeout;
4688 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4689 struct usb_device *udev, enum usb3_link_state state)
4691 struct xhci_hcd *xhci;
4694 xhci = hcd_to_xhci(hcd);
4695 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4696 !xhci->devs[udev->slot_id])
4699 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4700 return xhci_change_max_exit_latency(xhci, udev, mel);
4702 #else /* CONFIG_PM */
4704 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4705 struct usb_device *udev, int enable)
4710 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4715 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4716 struct usb_device *udev, enum usb3_link_state state)
4718 return USB3_LPM_DISABLED;
4721 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4722 struct usb_device *udev, enum usb3_link_state state)
4726 #endif /* CONFIG_PM */
4728 /*-------------------------------------------------------------------------*/
4730 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4731 * internal data structures for the device.
4733 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4734 struct usb_tt *tt, gfp_t mem_flags)
4736 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4737 struct xhci_virt_device *vdev;
4738 struct xhci_command *config_cmd;
4739 struct xhci_input_control_ctx *ctrl_ctx;
4740 struct xhci_slot_ctx *slot_ctx;
4741 unsigned long flags;
4742 unsigned think_time;
4745 /* Ignore root hubs */
4749 vdev = xhci->devs[hdev->slot_id];
4751 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4754 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4756 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4759 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4761 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4763 xhci_free_command(xhci, config_cmd);
4767 spin_lock_irqsave(&xhci->lock, flags);
4768 if (hdev->speed == USB_SPEED_HIGH &&
4769 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4770 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4771 xhci_free_command(xhci, config_cmd);
4772 spin_unlock_irqrestore(&xhci->lock, flags);
4776 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4777 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4778 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4779 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4781 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4782 * but it may be already set to 1 when setup an xHCI virtual
4783 * device, so clear it anyway.
4786 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4787 else if (hdev->speed == USB_SPEED_FULL)
4788 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4790 if (xhci->hci_version > 0x95) {
4791 xhci_dbg(xhci, "xHCI version %x needs hub "
4792 "TT think time and number of ports\n",
4793 (unsigned int) xhci->hci_version);
4794 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4795 /* Set TT think time - convert from ns to FS bit times.
4796 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4797 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4799 * xHCI 1.0: this field shall be 0 if the device is not a
4802 think_time = tt->think_time;
4803 if (think_time != 0)
4804 think_time = (think_time / 666) - 1;
4805 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4806 slot_ctx->tt_info |=
4807 cpu_to_le32(TT_THINK_TIME(think_time));
4809 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4810 "TT think time or number of ports\n",
4811 (unsigned int) xhci->hci_version);
4813 slot_ctx->dev_state = 0;
4814 spin_unlock_irqrestore(&xhci->lock, flags);
4816 xhci_dbg(xhci, "Set up %s for hub device.\n",
4817 (xhci->hci_version > 0x95) ?
4818 "configure endpoint" : "evaluate context");
4819 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4820 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4822 /* Issue and wait for the configure endpoint or
4823 * evaluate context command.
4825 if (xhci->hci_version > 0x95)
4826 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4829 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4832 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4833 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4835 xhci_free_command(xhci, config_cmd);
4839 int xhci_get_frame(struct usb_hcd *hcd)
4841 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4842 /* EHCI mods by the periodic size. Why? */
4843 return readl(&xhci->run_regs->microframe_index) >> 3;
4846 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4848 struct xhci_hcd *xhci;
4849 struct device *dev = hcd->self.controller;
4852 /* Accept arbitrarily long scatter-gather lists */
4853 hcd->self.sg_tablesize = ~0;
4855 /* support to build packet from discontinuous buffers */
4856 hcd->self.no_sg_constraint = 1;
4858 /* XHCI controllers don't stop the ep queue on short packets :| */
4859 hcd->self.no_stop_on_short = 1;
4861 xhci = hcd_to_xhci(hcd);
4863 if (usb_hcd_is_primary_hcd(hcd)) {
4864 xhci->main_hcd = hcd;
4865 /* Mark the first roothub as being USB 2.0.
4866 * The xHCI driver will register the USB 3.0 roothub.
4868 hcd->speed = HCD_USB2;
4869 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4871 * USB 2.0 roothub under xHCI has an integrated TT,
4872 * (rate matching hub) as opposed to having an OHCI/UHCI
4873 * companion controller.
4877 if (xhci->sbrn == 0x31) {
4878 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4879 hcd->speed = HCD_USB31;
4881 /* xHCI private pointer was set in xhci_pci_probe for the second
4882 * registered roothub.
4887 mutex_init(&xhci->mutex);
4888 xhci->cap_regs = hcd->regs;
4889 xhci->op_regs = hcd->regs +
4890 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4891 xhci->run_regs = hcd->regs +
4892 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4893 /* Cache read-only capability registers */
4894 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4895 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4896 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4897 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4898 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4899 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4900 if (xhci->hci_version > 0x100)
4901 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4902 xhci_print_registers(xhci);
4904 xhci->quirks = quirks;
4906 get_quirks(dev, xhci);
4908 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4909 * success event after a short transfer. This quirk will ignore such
4912 if (xhci->hci_version > 0x96)
4913 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4915 /* Make sure the HC is halted. */
4916 retval = xhci_halt(xhci);
4920 xhci_dbg(xhci, "Resetting HCD\n");
4921 /* Reset the internal HC memory state and registers. */
4922 retval = xhci_reset(xhci);
4925 xhci_dbg(xhci, "Reset complete\n");
4927 /* Set dma_mask and coherent_dma_mask to 64-bits,
4928 * if xHC supports 64-bit addressing */
4929 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4930 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4931 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4932 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4935 * This is to avoid error in cases where a 32-bit USB
4936 * controller is used on a 64-bit capable system.
4938 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4941 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4942 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4945 xhci_dbg(xhci, "Calling HCD init\n");
4946 /* Initialize HCD and host controller data structures. */
4947 retval = xhci_init(hcd);
4950 xhci_dbg(xhci, "Called HCD init\n");
4952 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4953 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4957 EXPORT_SYMBOL_GPL(xhci_gen_setup);
4959 static const struct hc_driver xhci_hc_driver = {
4960 .description = "xhci-hcd",
4961 .product_desc = "xHCI Host Controller",
4962 .hcd_priv_size = sizeof(struct xhci_hcd *),
4965 * generic hardware linkage
4968 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4971 * basic lifecycle operations
4973 .reset = NULL, /* set in xhci_init_driver() */
4976 .shutdown = xhci_shutdown,
4979 * managing i/o requests and associated device resources
4981 .urb_enqueue = xhci_urb_enqueue,
4982 .urb_dequeue = xhci_urb_dequeue,
4983 .alloc_dev = xhci_alloc_dev,
4984 .free_dev = xhci_free_dev,
4985 .alloc_streams = xhci_alloc_streams,
4986 .free_streams = xhci_free_streams,
4987 .add_endpoint = xhci_add_endpoint,
4988 .drop_endpoint = xhci_drop_endpoint,
4989 .endpoint_reset = xhci_endpoint_reset,
4990 .check_bandwidth = xhci_check_bandwidth,
4991 .reset_bandwidth = xhci_reset_bandwidth,
4992 .address_device = xhci_address_device,
4993 .enable_device = xhci_enable_device,
4994 .update_hub_device = xhci_update_hub_device,
4995 .reset_device = xhci_discover_or_reset_device,
4998 * scheduling support
5000 .get_frame_number = xhci_get_frame,
5005 .hub_control = xhci_hub_control,
5006 .hub_status_data = xhci_hub_status_data,
5007 .bus_suspend = xhci_bus_suspend,
5008 .bus_resume = xhci_bus_resume,
5011 * call back when device connected and addressed
5013 .update_device = xhci_update_device,
5014 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5015 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5016 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5017 .find_raw_port_number = xhci_find_raw_port_number,
5020 void xhci_init_driver(struct hc_driver *drv,
5021 const struct xhci_driver_overrides *over)
5025 /* Copy the generic table to drv then apply the overrides */
5026 *drv = xhci_hc_driver;
5029 drv->hcd_priv_size += over->extra_priv_size;
5031 drv->reset = over->reset;
5033 drv->start = over->start;
5036 EXPORT_SYMBOL_GPL(xhci_init_driver);
5038 MODULE_DESCRIPTION(DRIVER_DESC);
5039 MODULE_AUTHOR(DRIVER_AUTHOR);
5040 MODULE_LICENSE("GPL");
5042 static int __init xhci_hcd_init(void)
5045 * Check the compiler generated sizes of structures that must be laid
5046 * out in specific ways for hardware access.
5048 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5049 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5050 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5051 /* xhci_device_control has eight fields, and also
5052 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5054 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5055 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5056 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5057 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5058 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5059 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5060 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5069 * If an init function is provided, an exit function must also be provided
5070 * to allow module unload.
5072 static void __exit xhci_hcd_fini(void) { }
5074 module_init(xhci_hcd_init);
5075 module_exit(xhci_hcd_fini);