usb: ehci: add rockchip relinquishing port quirk support
[firefly-linux-kernel-4.4.55.git] / drivers / usb / host / xhci-dbg.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include "xhci.h"
24
25 #define XHCI_INIT_VALUE 0x0
26
27 /* Add verbose debugging later, just print everything for now */
28
29 void xhci_dbg_regs(struct xhci_hcd *xhci)
30 {
31         u32 temp;
32
33         xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
34                         xhci->cap_regs);
35         temp = readl(&xhci->cap_regs->hc_capbase);
36         xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37                         &xhci->cap_regs->hc_capbase, temp);
38         xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
39                         (unsigned int) HC_LENGTH(temp));
40 #if 0
41         xhci_dbg(xhci, "//   HCIVERSION: 0x%x\n",
42                         (unsigned int) HC_VERSION(temp));
43 #endif
44
45         xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
46
47         temp = readl(&xhci->cap_regs->run_regs_off);
48         xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
49                         &xhci->cap_regs->run_regs_off,
50                         (unsigned int) temp & RTSOFF_MASK);
51         xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
52
53         temp = readl(&xhci->cap_regs->db_off);
54         xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
55         xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
56 }
57
58 static void xhci_print_cap_regs(struct xhci_hcd *xhci)
59 {
60         u32 temp;
61         u32 hci_version;
62
63         xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
64
65         temp = readl(&xhci->cap_regs->hc_capbase);
66         hci_version = HC_VERSION(temp);
67         xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
68                         (unsigned int) temp);
69         xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
70                         (unsigned int) HC_LENGTH(temp));
71         xhci_dbg(xhci, "HCIVERSION: 0x%x\n", hci_version);
72
73         temp = readl(&xhci->cap_regs->hcs_params1);
74         xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
75                         (unsigned int) temp);
76         xhci_dbg(xhci, "  Max device slots: %u\n",
77                         (unsigned int) HCS_MAX_SLOTS(temp));
78         xhci_dbg(xhci, "  Max interrupters: %u\n",
79                         (unsigned int) HCS_MAX_INTRS(temp));
80         xhci_dbg(xhci, "  Max ports: %u\n",
81                         (unsigned int) HCS_MAX_PORTS(temp));
82
83         temp = readl(&xhci->cap_regs->hcs_params2);
84         xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
85                         (unsigned int) temp);
86         xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
87                         (unsigned int) HCS_IST(temp));
88         xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
89                         (unsigned int) HCS_ERST_MAX(temp));
90
91         temp = readl(&xhci->cap_regs->hcs_params3);
92         xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
93                         (unsigned int) temp);
94         xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
95                         (unsigned int) HCS_U1_LATENCY(temp));
96         xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
97                         (unsigned int) HCS_U2_LATENCY(temp));
98
99         temp = readl(&xhci->cap_regs->hcc_params);
100         xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
101         xhci_dbg(xhci, "  HC generates %s bit addresses\n",
102                         HCC_64BIT_ADDR(temp) ? "64" : "32");
103         xhci_dbg(xhci, "  HC %s Contiguous Frame ID Capability\n",
104                         HCC_CFC(temp) ? "has" : "hasn't");
105         xhci_dbg(xhci, "  HC %s generate Stopped - Short Package event\n",
106                         HCC_SPC(temp) ? "can" : "can't");
107         /* FIXME */
108         xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
109
110         temp = readl(&xhci->cap_regs->run_regs_off);
111         xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
112
113         /* xhci 1.1 controllers have the HCCPARAMS2 register */
114         if (hci_version > 100) {
115                 temp = readl(&xhci->cap_regs->hcc_params2);
116                 xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
117                 xhci_dbg(xhci, "  HC %s Force save context capability",
118                          HCC2_FSC(temp) ? "supports" : "doesn't support");
119                 xhci_dbg(xhci, "  HC %s Large ESIT Payload Capability",
120                          HCC2_LEC(temp) ? "supports" : "doesn't support");
121                 xhci_dbg(xhci, "  HC %s Extended TBC capability",
122                          HCC2_ETC(temp) ? "supports" : "doesn't support");
123         }
124 }
125
126 static void xhci_print_command_reg(struct xhci_hcd *xhci)
127 {
128         u32 temp;
129
130         temp = readl(&xhci->op_regs->command);
131         xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
132         xhci_dbg(xhci, "  HC is %s\n",
133                         (temp & CMD_RUN) ? "running" : "being stopped");
134         xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
135                         (temp & CMD_RESET) ? "not " : "");
136         xhci_dbg(xhci, "  Event Interrupts %s\n",
137                         (temp & CMD_EIE) ? "enabled " : "disabled");
138         xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
139                         (temp & CMD_HSEIE) ? "enabled " : "disabled");
140         xhci_dbg(xhci, "  HC has %sfinished light reset\n",
141                         (temp & CMD_LRESET) ? "not " : "");
142 }
143
144 static void xhci_print_status(struct xhci_hcd *xhci)
145 {
146         u32 temp;
147
148         temp = readl(&xhci->op_regs->status);
149         xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
150         xhci_dbg(xhci, "  Event ring is %sempty\n",
151                         (temp & STS_EINT) ? "not " : "");
152         xhci_dbg(xhci, "  %sHost System Error\n",
153                         (temp & STS_FATAL) ? "WARNING: " : "No ");
154         xhci_dbg(xhci, "  HC is %s\n",
155                         (temp & STS_HALT) ? "halted" : "running");
156 }
157
158 static void xhci_print_op_regs(struct xhci_hcd *xhci)
159 {
160         xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
161         xhci_print_command_reg(xhci);
162         xhci_print_status(xhci);
163 }
164
165 static void xhci_print_ports(struct xhci_hcd *xhci)
166 {
167         __le32 __iomem *addr;
168         int i, j;
169         int ports;
170         char *names[NUM_PORT_REGS] = {
171                 "status",
172                 "power",
173                 "link",
174                 "reserved",
175         };
176
177         ports = HCS_MAX_PORTS(xhci->hcs_params1);
178         addr = &xhci->op_regs->port_status_base;
179         for (i = 0; i < ports; i++) {
180                 for (j = 0; j < NUM_PORT_REGS; ++j) {
181                         xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
182                                         addr, names[j],
183                                         (unsigned int) readl(addr));
184                         addr++;
185                 }
186         }
187 }
188
189 void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
190 {
191         struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
192         void __iomem *addr;
193         u32 temp;
194         u64 temp_64;
195
196         addr = &ir_set->irq_pending;
197         temp = readl(addr);
198         if (temp == XHCI_INIT_VALUE)
199                 return;
200
201         xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
202
203         xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
204                         (unsigned int)temp);
205
206         addr = &ir_set->irq_control;
207         temp = readl(addr);
208         xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
209                         (unsigned int)temp);
210
211         addr = &ir_set->erst_size;
212         temp = readl(addr);
213         xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
214                         (unsigned int)temp);
215
216         addr = &ir_set->rsvd;
217         temp = readl(addr);
218         if (temp != XHCI_INIT_VALUE)
219                 xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
220                                 addr, (unsigned int)temp);
221
222         addr = &ir_set->erst_base;
223         temp_64 = xhci_read_64(xhci, addr);
224         xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
225                         addr, temp_64);
226
227         addr = &ir_set->erst_dequeue;
228         temp_64 = xhci_read_64(xhci, addr);
229         xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
230                         addr, temp_64);
231 }
232
233 void xhci_print_run_regs(struct xhci_hcd *xhci)
234 {
235         u32 temp;
236         int i;
237
238         xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
239         temp = readl(&xhci->run_regs->microframe_index);
240         xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
241                         &xhci->run_regs->microframe_index,
242                         (unsigned int) temp);
243         for (i = 0; i < 7; ++i) {
244                 temp = readl(&xhci->run_regs->rsvd[i]);
245                 if (temp != XHCI_INIT_VALUE)
246                         xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
247                                         &xhci->run_regs->rsvd[i],
248                                         i, (unsigned int) temp);
249         }
250 }
251
252 void xhci_print_registers(struct xhci_hcd *xhci)
253 {
254         xhci_print_cap_regs(xhci);
255         xhci_print_op_regs(xhci);
256         xhci_print_ports(xhci);
257 }
258
259 void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
260 {
261         int i;
262         for (i = 0; i < 4; ++i)
263                 xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
264                                 i*4, trb->generic.field[i]);
265 }
266
267 /**
268  * Debug a transfer request block (TRB).
269  */
270 void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
271 {
272         u64     address;
273         u32     type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
274
275         switch (type) {
276         case TRB_TYPE(TRB_LINK):
277                 xhci_dbg(xhci, "Link TRB:\n");
278                 xhci_print_trb_offsets(xhci, trb);
279
280                 address = le64_to_cpu(trb->link.segment_ptr);
281                 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
282
283                 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
284                          GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
285                 xhci_dbg(xhci, "Cycle bit = %u\n",
286                          le32_to_cpu(trb->link.control) & TRB_CYCLE);
287                 xhci_dbg(xhci, "Toggle cycle bit = %u\n",
288                          le32_to_cpu(trb->link.control) & LINK_TOGGLE);
289                 xhci_dbg(xhci, "No Snoop bit = %u\n",
290                          le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
291                 break;
292         case TRB_TYPE(TRB_TRANSFER):
293                 address = le64_to_cpu(trb->trans_event.buffer);
294                 /*
295                  * FIXME: look at flags to figure out if it's an address or if
296                  * the data is directly in the buffer field.
297                  */
298                 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
299                 break;
300         case TRB_TYPE(TRB_COMPLETION):
301                 address = le64_to_cpu(trb->event_cmd.cmd_trb);
302                 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
303                 xhci_dbg(xhci, "Completion status = %u\n",
304                          GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
305                 xhci_dbg(xhci, "Flags = 0x%x\n",
306                          le32_to_cpu(trb->event_cmd.flags));
307                 break;
308         default:
309                 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
310                                 (unsigned int) type>>10);
311                 xhci_print_trb_offsets(xhci, trb);
312                 break;
313         }
314 }
315
316 /**
317  * Debug a segment with an xHCI ring.
318  *
319  * @return The Link TRB of the segment, or NULL if there is no Link TRB
320  * (which is a bug, since all segments must have a Link TRB).
321  *
322  * Prints out all TRBs in the segment, even those after the Link TRB.
323  *
324  * XXX: should we print out TRBs that the HC owns?  As long as we don't
325  * write, that should be fine...  We shouldn't expect that the memory pointed to
326  * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
327  * for HC debugging.
328  */
329 void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
330 {
331         int i;
332         u64 addr = seg->dma;
333         union xhci_trb *trb = seg->trbs;
334
335         for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
336                 trb = &seg->trbs[i];
337                 xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
338                          lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
339                          upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
340                          le32_to_cpu(trb->link.intr_target),
341                          le32_to_cpu(trb->link.control));
342                 addr += sizeof(*trb);
343         }
344 }
345
346 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
347 {
348         xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
349                         ring->dequeue,
350                         (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
351                                                             ring->dequeue));
352         xhci_dbg(xhci, "Ring deq updated %u times\n",
353                         ring->deq_updates);
354         xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
355                         ring->enqueue,
356                         (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
357                                                             ring->enqueue));
358         xhci_dbg(xhci, "Ring enq updated %u times\n",
359                         ring->enq_updates);
360 }
361
362 /**
363  * Debugging for an xHCI ring, which is a queue broken into multiple segments.
364  *
365  * Print out each segment in the ring.  Check that the DMA address in
366  * each link segment actually matches the segment's stored DMA address.
367  * Check that the link end bit is only set at the end of the ring.
368  * Check that the dequeue and enqueue pointers point to real data in this ring
369  * (not some other ring).
370  */
371 void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
372 {
373         /* FIXME: Throw an error if any segment doesn't have a Link TRB */
374         struct xhci_segment *seg;
375         struct xhci_segment *first_seg = ring->first_seg;
376         xhci_debug_segment(xhci, first_seg);
377
378         if (!ring->enq_updates && !ring->deq_updates) {
379                 xhci_dbg(xhci, "  Ring has not been updated\n");
380                 return;
381         }
382         for (seg = first_seg->next; seg != first_seg; seg = seg->next)
383                 xhci_debug_segment(xhci, seg);
384 }
385
386 void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
387                 unsigned int slot_id, unsigned int ep_index,
388                 struct xhci_virt_ep *ep)
389 {
390         int i;
391         struct xhci_ring *ring;
392
393         if (ep->ep_state & EP_HAS_STREAMS) {
394                 for (i = 1; i < ep->stream_info->num_streams; i++) {
395                         ring = ep->stream_info->stream_rings[i];
396                         xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
397                                 slot_id, ep_index, i);
398                         xhci_debug_segment(xhci, ring->deq_seg);
399                 }
400         } else {
401                 ring = ep->ring;
402                 if (!ring)
403                         return;
404                 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
405                                 slot_id, ep_index);
406                 xhci_debug_segment(xhci, ring->deq_seg);
407         }
408 }
409
410 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
411 {
412         u64 addr = erst->erst_dma_addr;
413         int i;
414         struct xhci_erst_entry *entry;
415
416         for (i = 0; i < erst->num_entries; ++i) {
417                 entry = &erst->entries[i];
418                 xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
419                          addr,
420                          lower_32_bits(le64_to_cpu(entry->seg_addr)),
421                          upper_32_bits(le64_to_cpu(entry->seg_addr)),
422                          le32_to_cpu(entry->seg_size),
423                          le32_to_cpu(entry->rsvd));
424                 addr += sizeof(*entry);
425         }
426 }
427
428 void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
429 {
430         u64 val;
431
432         val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
433         xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
434                         lower_32_bits(val));
435         xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
436                         upper_32_bits(val));
437 }
438
439 /* Print the last 32 bytes for 64-byte contexts */
440 static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
441 {
442         int i;
443         for (i = 0; i < 4; ++i) {
444                 xhci_dbg(xhci, "@%p (virt) @%08llx "
445                          "(dma) %#08llx - rsvd64[%d]\n",
446                          &ctx[4 + i], (unsigned long long)dma,
447                          ctx[4 + i], i);
448                 dma += 8;
449         }
450 }
451
452 char *xhci_get_slot_state(struct xhci_hcd *xhci,
453                 struct xhci_container_ctx *ctx)
454 {
455         struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
456
457         switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
458         case SLOT_STATE_ENABLED:
459                 return "enabled/disabled";
460         case SLOT_STATE_DEFAULT:
461                 return "default";
462         case SLOT_STATE_ADDRESSED:
463                 return "addressed";
464         case SLOT_STATE_CONFIGURED:
465                 return "configured";
466         default:
467                 return "reserved";
468         }
469 }
470
471 static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
472 {
473         /* Fields are 32 bits wide, DMA addresses are in bytes */
474         int field_size = 32 / 8;
475         int i;
476
477         struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
478         dma_addr_t dma = ctx->dma +
479                 ((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
480         int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
481
482         xhci_dbg(xhci, "Slot Context:\n");
483         xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
484                         &slot_ctx->dev_info,
485                         (unsigned long long)dma, slot_ctx->dev_info);
486         dma += field_size;
487         xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
488                         &slot_ctx->dev_info2,
489                         (unsigned long long)dma, slot_ctx->dev_info2);
490         dma += field_size;
491         xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
492                         &slot_ctx->tt_info,
493                         (unsigned long long)dma, slot_ctx->tt_info);
494         dma += field_size;
495         xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
496                         &slot_ctx->dev_state,
497                         (unsigned long long)dma, slot_ctx->dev_state);
498         dma += field_size;
499         for (i = 0; i < 4; ++i) {
500                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
501                                 &slot_ctx->reserved[i], (unsigned long long)dma,
502                                 slot_ctx->reserved[i], i);
503                 dma += field_size;
504         }
505
506         if (csz)
507                 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
508 }
509
510 static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
511                      struct xhci_container_ctx *ctx,
512                      unsigned int last_ep)
513 {
514         int i, j;
515         int last_ep_ctx = 31;
516         /* Fields are 32 bits wide, DMA addresses are in bytes */
517         int field_size = 32 / 8;
518         int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
519
520         if (last_ep < 31)
521                 last_ep_ctx = last_ep + 1;
522         for (i = 0; i < last_ep_ctx; ++i) {
523                 unsigned int epaddr = xhci_get_endpoint_address(i);
524                 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
525                 dma_addr_t dma = ctx->dma +
526                         ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
527
528                 xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
529                                 usb_endpoint_out(epaddr) ? "OUT" : "IN",
530                                 epaddr & USB_ENDPOINT_NUMBER_MASK, i);
531                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
532                                 &ep_ctx->ep_info,
533                                 (unsigned long long)dma, ep_ctx->ep_info);
534                 dma += field_size;
535                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
536                                 &ep_ctx->ep_info2,
537                                 (unsigned long long)dma, ep_ctx->ep_info2);
538                 dma += field_size;
539                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
540                                 &ep_ctx->deq,
541                                 (unsigned long long)dma, ep_ctx->deq);
542                 dma += 2*field_size;
543                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
544                                 &ep_ctx->tx_info,
545                                 (unsigned long long)dma, ep_ctx->tx_info);
546                 dma += field_size;
547                 for (j = 0; j < 3; ++j) {
548                         xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
549                                         &ep_ctx->reserved[j],
550                                         (unsigned long long)dma,
551                                         ep_ctx->reserved[j], j);
552                         dma += field_size;
553                 }
554
555                 if (csz)
556                         dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
557         }
558 }
559
560 void xhci_dbg_ctx(struct xhci_hcd *xhci,
561                   struct xhci_container_ctx *ctx,
562                   unsigned int last_ep)
563 {
564         int i;
565         /* Fields are 32 bits wide, DMA addresses are in bytes */
566         int field_size = 32 / 8;
567         dma_addr_t dma = ctx->dma;
568         int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
569
570         if (ctx->type == XHCI_CTX_TYPE_INPUT) {
571                 struct xhci_input_control_ctx *ctrl_ctx =
572                         xhci_get_input_control_ctx(ctx);
573                 if (!ctrl_ctx) {
574                         xhci_warn(xhci, "Could not get input context, bad type.\n");
575                         return;
576                 }
577
578                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
579                          &ctrl_ctx->drop_flags, (unsigned long long)dma,
580                          ctrl_ctx->drop_flags);
581                 dma += field_size;
582                 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
583                          &ctrl_ctx->add_flags, (unsigned long long)dma,
584                          ctrl_ctx->add_flags);
585                 dma += field_size;
586                 for (i = 0; i < 6; ++i) {
587                         xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
588                                  &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
589                                  ctrl_ctx->rsvd2[i], i);
590                         dma += field_size;
591                 }
592
593                 if (csz)
594                         dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
595         }
596
597         xhci_dbg_slot_ctx(xhci, ctx);
598         xhci_dbg_ep_ctx(xhci, ctx, last_ep);
599 }
600
601 void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
602                         const char *fmt, ...)
603 {
604         struct va_format vaf;
605         va_list args;
606
607         va_start(args, fmt);
608         vaf.fmt = fmt;
609         vaf.va = &args;
610         xhci_dbg(xhci, "%pV\n", &vaf);
611         trace(&vaf);
612         va_end(args);
613 }
614 EXPORT_SYMBOL_GPL(xhci_dbg_trace);