3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #undef TRACE_SYSTEM_VAR
51 #define TRACE_SYSTEM_VAR hfi1
53 #if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
54 #define __HFI1_TRACE_H
56 #include <linux/tracepoint.h>
57 #include <linux/trace_seq.h>
63 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
64 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
66 #define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
67 #define show_packettype(etype) \
68 __print_symbolic(etype, \
69 packettype_name(EXPECTED), \
70 packettype_name(EAGER), \
71 packettype_name(IB), \
72 packettype_name(ERROR), \
73 packettype_name(BYPASS))
76 #define TRACE_SYSTEM hfi1_rx
78 TRACE_EVENT(hfi1_rcvhdr,
79 TP_PROTO(struct hfi1_devdata *dd,
87 TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
100 __entry->eflags = eflags;
101 __entry->ctxt = ctxt;
102 __entry->etype = etype;
103 __entry->hlen = hlen;
104 __entry->tlen = tlen;
105 __entry->updegr = updegr;
106 __entry->etail = etail;
109 "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
113 __entry->etype, show_packettype(__entry->etype),
121 TRACE_EVENT(hfi1_receive_interrupt,
122 TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
127 __field(u8, slow_path)
128 __field(u8, dma_rtail)
132 __entry->ctxt = ctxt;
133 if (dd->rcd[ctxt]->do_interrupt ==
134 &handle_receive_interrupt) {
135 __entry->slow_path = 1;
136 __entry->dma_rtail = 0xFF;
137 } else if (dd->rcd[ctxt]->do_interrupt ==
138 &handle_receive_interrupt_dma_rtail){
139 __entry->dma_rtail = 1;
140 __entry->slow_path = 0;
141 } else if (dd->rcd[ctxt]->do_interrupt ==
142 &handle_receive_interrupt_nodma_rtail) {
143 __entry->dma_rtail = 0;
144 __entry->slow_path = 0;
148 "[%s] ctxt %d SlowPath: %d DmaRtail: %d",
156 const char *print_u64_array(struct trace_seq *, u64 *, int);
158 TRACE_EVENT(hfi1_exp_tid_map,
159 TP_PROTO(unsigned ctxt, u16 subctxt, int dir,
160 unsigned long *maps, u16 count),
161 TP_ARGS(ctxt, subctxt, dir, maps, count),
163 __field(unsigned, ctxt)
164 __field(u16, subctxt)
167 __dynamic_array(unsigned long, maps, sizeof(*maps) * count)
170 __entry->ctxt = ctxt;
171 __entry->subctxt = subctxt;
173 __entry->count = count;
174 memcpy(__get_dynamic_array(maps), maps,
175 sizeof(*maps) * count);
177 TP_printk("[%3u:%02u] %s tidmaps %s",
180 (__entry->dir ? ">" : "<"),
181 print_u64_array(p, __get_dynamic_array(maps),
186 TRACE_EVENT(hfi1_exp_rcv_set,
187 TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
188 unsigned long vaddr, u64 phys_addr, void *page),
189 TP_ARGS(ctxt, subctxt, tid, vaddr, phys_addr, page),
191 __field(unsigned, ctxt)
192 __field(u16, subctxt)
194 __field(unsigned long, vaddr)
195 __field(u64, phys_addr)
196 __field(void *, page)
199 __entry->ctxt = ctxt;
200 __entry->subctxt = subctxt;
202 __entry->vaddr = vaddr;
203 __entry->phys_addr = phys_addr;
204 __entry->page = page;
206 TP_printk("[%u:%u] TID %u, vaddrs 0x%lx, physaddr 0x%llx, pgp %p",
216 TRACE_EVENT(hfi1_exp_rcv_free,
217 TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
218 unsigned long phys, void *page),
219 TP_ARGS(ctxt, subctxt, tid, phys, page),
221 __field(unsigned, ctxt)
222 __field(u16, subctxt)
224 __field(unsigned long, phys)
225 __field(void *, page)
228 __entry->ctxt = ctxt;
229 __entry->subctxt = subctxt;
231 __entry->phys = phys;
232 __entry->page = page;
234 TP_printk("[%u:%u] freeing TID %u, 0x%lx, pgp %p",
243 #define TRACE_SYSTEM hfi1_tx
245 TRACE_EVENT(hfi1_piofree,
246 TP_PROTO(struct send_context *sc, int extra),
250 __field(u32, sw_index)
251 __field(u32, hw_context)
255 DD_DEV_ASSIGN(sc->dd);
256 __entry->sw_index = sc->sw_index;
257 __entry->hw_context = sc->hw_context;
258 __entry->extra = extra;
261 "[%s] ctxt %u(%u) extra %d",
269 TRACE_EVENT(hfi1_wantpiointr,
270 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
271 TP_ARGS(sc, needint, credit_ctrl),
274 __field(u32, sw_index)
275 __field(u32, hw_context)
276 __field(u32, needint)
277 __field(u64, credit_ctrl)
280 DD_DEV_ASSIGN(sc->dd);
281 __entry->sw_index = sc->sw_index;
282 __entry->hw_context = sc->hw_context;
283 __entry->needint = needint;
284 __entry->credit_ctrl = credit_ctrl;
287 "[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
292 (unsigned long long)__entry->credit_ctrl
296 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
297 TP_PROTO(struct hfi1_qp *qp, u32 flags),
300 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
303 __field(u32, s_flags)
306 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
307 __entry->flags = flags;
308 __entry->qpn = qp->ibqp.qp_num;
309 __entry->s_flags = qp->s_flags;
312 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
320 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
321 TP_PROTO(struct hfi1_qp *qp, u32 flags),
324 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
325 TP_PROTO(struct hfi1_qp *qp, u32 flags),
329 #define TRACE_SYSTEM hfi1_qphash
330 DECLARE_EVENT_CLASS(hfi1_qphash_template,
331 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
334 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
339 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
340 __entry->qpn = qp->ibqp.qp_num;
341 __entry->bucket = bucket;
344 "[%s] qpn 0x%x bucket %u",
351 DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert,
352 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
353 TP_ARGS(qp, bucket));
355 DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove,
356 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
357 TP_ARGS(qp, bucket));
360 #define TRACE_SYSTEM hfi1_ibhdrs
362 u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
363 const char *parse_everbs_hdrs(
368 #define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
370 const char *parse_sdma_flags(
372 u64 desc0, u64 desc1);
374 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
377 #define lrh_name(lrh) { HFI1_##lrh, #lrh }
378 #define show_lnh(lrh) \
379 __print_symbolic(lrh, \
383 #define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
384 #define show_ib_opcode(opcode) \
385 __print_symbolic(opcode, \
386 ib_opcode_name(RC_SEND_FIRST), \
387 ib_opcode_name(RC_SEND_MIDDLE), \
388 ib_opcode_name(RC_SEND_LAST), \
389 ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
390 ib_opcode_name(RC_SEND_ONLY), \
391 ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
392 ib_opcode_name(RC_RDMA_WRITE_FIRST), \
393 ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
394 ib_opcode_name(RC_RDMA_WRITE_LAST), \
395 ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
396 ib_opcode_name(RC_RDMA_WRITE_ONLY), \
397 ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
398 ib_opcode_name(RC_RDMA_READ_REQUEST), \
399 ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
400 ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
401 ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
402 ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
403 ib_opcode_name(RC_ACKNOWLEDGE), \
404 ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
405 ib_opcode_name(RC_COMPARE_SWAP), \
406 ib_opcode_name(RC_FETCH_ADD), \
407 ib_opcode_name(UC_SEND_FIRST), \
408 ib_opcode_name(UC_SEND_MIDDLE), \
409 ib_opcode_name(UC_SEND_LAST), \
410 ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
411 ib_opcode_name(UC_SEND_ONLY), \
412 ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
413 ib_opcode_name(UC_RDMA_WRITE_FIRST), \
414 ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
415 ib_opcode_name(UC_RDMA_WRITE_LAST), \
416 ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
417 ib_opcode_name(UC_RDMA_WRITE_ONLY), \
418 ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
419 ib_opcode_name(UD_SEND_ONLY), \
420 ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE))
423 #define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
425 "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
426 "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
427 #define EHDR_PRN "%s"
429 DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
430 TP_PROTO(struct hfi1_devdata *dd,
431 struct hfi1_ib_header *hdr),
455 /* extended headers */
456 __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
459 struct hfi1_other_headers *ohdr;
464 (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
466 (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
468 (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
470 (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
472 be16_to_cpu(hdr->lrh[1]);
473 /* allow for larger len */
475 be16_to_cpu(hdr->lrh[2]);
477 be16_to_cpu(hdr->lrh[3]);
479 if (__entry->lnh == HFI1_LRH_BTH)
482 ohdr = &hdr->u.l.oth;
484 (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
486 (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
488 (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
490 (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
492 (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
494 be32_to_cpu(ohdr->bth[0]) & 0xffff;
496 (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT)
499 (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT)
502 be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
504 (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
505 /* allow for larger PSN */
507 be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
508 /* extended headers */
510 __get_dynamic_array(ehdrs),
512 ibhdr_exhdr_len(hdr));
514 TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
520 __entry->lnh, show_lnh(__entry->lnh),
525 __entry->opcode, show_ib_opcode(__entry->opcode),
536 /* extended headers */
539 (void *)__get_dynamic_array(ehdrs))
543 DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
544 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
547 DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr,
548 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
552 "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
553 "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
556 #define TRACE_SYSTEM hfi1_snoop
559 TRACE_EVENT(snoop_capture,
560 TP_PROTO(struct hfi1_devdata *dd,
562 struct hfi1_ib_header *hdr,
565 TP_ARGS(dd, hdr_len, hdr, data_len, data),
574 __field(u32, hdr_len)
575 __field(u32, data_len)
577 __dynamic_array(u8, raw_hdr, hdr_len)
578 __dynamic_array(u8, raw_pkt, data_len)
581 struct hfi1_other_headers *ohdr;
583 __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
584 if (__entry->lnh == HFI1_LRH_BTH)
587 ohdr = &hdr->u.l.oth;
589 __entry->slid = be16_to_cpu(hdr->lrh[3]);
590 __entry->dlid = be16_to_cpu(hdr->lrh[1]);
591 __entry->qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
592 __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
593 __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
594 __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
595 __entry->hdr_len = hdr_len;
596 __entry->data_len = data_len;
597 memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
598 memcpy(__get_dynamic_array(raw_pkt), data, data_len);
600 TP_printk("[%s] " SNOOP_PRN,
606 show_ib_opcode(__entry->opcode),
615 #define TRACE_SYSTEM hfi1_ctxts
618 "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
619 "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
620 TRACE_EVENT(hfi1_uctxtdata,
621 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
625 __field(unsigned, ctxt)
626 __field(u32, credits)
627 __field(u64, hw_free)
628 __field(u64, piobase)
629 __field(u16, rcvhdrq_cnt)
630 __field(u64, rcvhdrq_phys)
631 __field(u32, eager_cnt)
632 __field(u64, rcvegr_phys)
636 __entry->ctxt = uctxt->ctxt;
637 __entry->credits = uctxt->sc->credits;
638 __entry->hw_free = (u64)uctxt->sc->hw_free;
639 __entry->piobase = (u64)uctxt->sc->base_addr;
640 __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
641 __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
642 __entry->eager_cnt = uctxt->egrbufs.alloced;
643 __entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys;
646 "[%s] ctxt %u " UCTXT_FMT,
652 __entry->rcvhdrq_cnt,
653 __entry->rcvhdrq_phys,
660 "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
661 TRACE_EVENT(hfi1_ctxt_info,
662 TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
663 struct hfi1_ctxt_info cinfo),
664 TP_ARGS(dd, ctxt, subctxt, cinfo),
667 __field(unsigned, ctxt)
668 __field(unsigned, subctxt)
669 __field(u16, egrtids)
670 __field(u16, rcvhdrq_cnt)
671 __field(u16, rcvhdrq_size)
672 __field(u16, sdma_ring_size)
673 __field(u32, rcvegr_size)
677 __entry->ctxt = ctxt;
678 __entry->subctxt = subctxt;
679 __entry->egrtids = cinfo.egrtids;
680 __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
681 __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
682 __entry->sdma_ring_size = cinfo.sdma_ring_size;
683 __entry->rcvegr_size = cinfo.rcvegr_size;
686 "[%s] ctxt %u:%u " CINFO_FMT,
691 __entry->rcvegr_size,
692 __entry->rcvhdrq_cnt,
693 __entry->rcvhdrq_size,
694 __entry->sdma_ring_size
699 #define TRACE_SYSTEM hfi1_sma
702 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
706 ((struct buffer_control *)__get_dynamic_array(bct))->field \
709 DECLARE_EVENT_CLASS(hfi1_bct_template,
710 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
714 __dynamic_array(u8, bct, sizeof(*bc))
719 __get_dynamic_array(bct),
723 TP_printk(BCT_FORMAT,
724 BCT(overall_shared_limit),
726 BCT(vl[0].dedicated),
729 BCT(vl[1].dedicated),
732 BCT(vl[2].dedicated),
735 BCT(vl[3].dedicated),
738 BCT(vl[4].dedicated),
741 BCT(vl[5].dedicated),
744 BCT(vl[6].dedicated),
747 BCT(vl[7].dedicated),
750 BCT(vl[15].dedicated),
756 DEFINE_EVENT(hfi1_bct_template, bct_set,
757 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
760 DEFINE_EVENT(hfi1_bct_template, bct_get,
761 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
765 #define TRACE_SYSTEM hfi1_sdma
767 TRACE_EVENT(hfi1_sdma_descriptor,
769 struct sdma_engine *sde,
774 TP_ARGS(sde, desc0, desc1, e, descp),
776 DD_DEV_ENTRY(sde->dd)
777 __field(void *, descp)
784 DD_DEV_ASSIGN(sde->dd);
785 __entry->desc0 = desc0;
786 __entry->desc1 = desc1;
787 __entry->idx = sde->this_idx;
788 __entry->descp = descp;
792 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
795 __parse_sdma_flags(__entry->desc0, __entry->desc1),
796 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT)
797 & SDMA_DESC0_PHY_ADDR_MASK,
798 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT)
799 & SDMA_DESC1_GENERATION_MASK),
800 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT)
801 & SDMA_DESC0_BYTE_COUNT_MASK),
809 TRACE_EVENT(hfi1_sdma_engine_select,
810 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
811 TP_ARGS(dd, sel, vl, idx),
825 "[%s] selecting SDE %u sel 0x%x vl %u",
833 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
835 struct sdma_engine *sde,
838 TP_ARGS(sde, status),
840 DD_DEV_ENTRY(sde->dd)
845 DD_DEV_ASSIGN(sde->dd);
846 __entry->status = status;
847 __entry->idx = sde->this_idx;
850 "[%s] SDE(%u) status %llx",
853 (unsigned long long)__entry->status
857 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
859 struct sdma_engine *sde,
865 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
867 struct sdma_engine *sde,
873 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
875 struct sdma_engine *sde,
880 DD_DEV_ENTRY(sde->dd)
885 DD_DEV_ASSIGN(sde->dd);
886 __entry->idx = sde->this_idx;
887 __entry->aidx = aidx;
890 "[%s] SDE(%u) aidx %d",
897 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
899 struct sdma_engine *sde,
904 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
906 struct sdma_engine *sde,
911 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
912 TRACE_EVENT(hfi1_sdma_progress,
914 struct sdma_engine *sde,
917 struct sdma_txreq *txp
919 TP_ARGS(sde, hwhead, swhead, txp),
921 DD_DEV_ENTRY(sde->dd)
926 __field(u16, tx_tail)
927 __field(u16, tx_head)
931 DD_DEV_ASSIGN(sde->dd);
932 __entry->hwhead = hwhead;
933 __entry->swhead = swhead;
934 __entry->tx_tail = sde->tx_tail;
935 __entry->tx_head = sde->tx_head;
936 __entry->txnext = txp ? txp->next_descq_idx : ~0;
937 __entry->idx = sde->this_idx;
938 __entry->sn = txp ? txp->sn : ~0;
941 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
953 TRACE_EVENT(hfi1_sdma_progress,
955 struct sdma_engine *sde,
958 struct sdma_txreq *txp
960 TP_ARGS(sde, hwhead, swhead, txp),
962 DD_DEV_ENTRY(sde->dd)
966 __field(u16, tx_tail)
967 __field(u16, tx_head)
971 DD_DEV_ASSIGN(sde->dd);
972 __entry->hwhead = hwhead;
973 __entry->swhead = swhead;
974 __entry->tx_tail = sde->tx_tail;
975 __entry->tx_head = sde->tx_head;
976 __entry->txnext = txp ? txp->next_descq_idx : ~0;
977 __entry->idx = sde->this_idx;
980 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
992 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
994 struct sdma_engine *sde,
999 DD_DEV_ENTRY(sde->dd)
1004 DD_DEV_ASSIGN(sde->dd);
1006 __entry->idx = sde->this_idx;
1009 "[%s] SDE(%u) sn %llu",
1016 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
1018 struct sdma_engine *sde,
1024 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
1026 struct sdma_engine *sde,
1032 #define USDMA_HDR_FORMAT \
1033 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
1035 TRACE_EVENT(hfi1_sdma_user_header,
1036 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1037 struct hfi1_pkt_header *hdr, u32 tidval),
1038 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
1042 __field(u8, subctxt)
1044 __field(__le32, pbc0)
1045 __field(__le32, pbc1)
1046 __field(__be32, lrh0)
1047 __field(__be32, lrh1)
1048 __field(__be32, bth0)
1049 __field(__be32, bth1)
1050 __field(__be32, bth2)
1051 __field(__le32, kdeth0)
1052 __field(__le32, kdeth1)
1053 __field(__le32, kdeth2)
1054 __field(__le32, kdeth3)
1055 __field(__le32, kdeth4)
1056 __field(__le32, kdeth5)
1057 __field(__le32, kdeth6)
1058 __field(__le32, kdeth7)
1059 __field(__le32, kdeth8)
1060 __field(u32, tidval)
1063 __le32 *pbc = (__le32 *)hdr->pbc;
1064 __be32 *lrh = (__be32 *)hdr->lrh;
1065 __be32 *bth = (__be32 *)hdr->bth;
1066 __le32 *kdeth = (__le32 *)&hdr->kdeth;
1069 __entry->ctxt = ctxt;
1070 __entry->subctxt = subctxt;
1072 __entry->pbc0 = pbc[0];
1073 __entry->pbc1 = pbc[1];
1074 __entry->lrh0 = be32_to_cpu(lrh[0]);
1075 __entry->lrh1 = be32_to_cpu(lrh[1]);
1076 __entry->bth0 = be32_to_cpu(bth[0]);
1077 __entry->bth1 = be32_to_cpu(bth[1]);
1078 __entry->bth2 = be32_to_cpu(bth[2]);
1079 __entry->kdeth0 = kdeth[0];
1080 __entry->kdeth1 = kdeth[1];
1081 __entry->kdeth2 = kdeth[2];
1082 __entry->kdeth3 = kdeth[3];
1083 __entry->kdeth4 = kdeth[4];
1084 __entry->kdeth5 = kdeth[5];
1085 __entry->kdeth6 = kdeth[6];
1086 __entry->kdeth7 = kdeth[7];
1087 __entry->kdeth8 = kdeth[8];
1088 __entry->tidval = tidval;
1090 TP_printk(USDMA_HDR_FORMAT,
1115 #define SDMA_UREQ_FMT \
1116 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
1117 TRACE_EVENT(hfi1_sdma_user_reqinfo,
1118 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
1119 TP_ARGS(dd, ctxt, subctxt, i),
1123 __field(u8, subctxt)
1124 __field(u8, ver_opcode)
1127 __field(u16, fragsize)
1128 __field(u16, comp_idx)
1132 __entry->ctxt = ctxt;
1133 __entry->subctxt = subctxt;
1134 __entry->ver_opcode = i[0] & 0xff;
1135 __entry->iovcnt = (i[0] >> 8) & 0xff;
1136 __entry->npkts = i[1];
1137 __entry->fragsize = i[2];
1138 __entry->comp_idx = i[3];
1140 TP_printk(SDMA_UREQ_FMT,
1144 __entry->ver_opcode,
1152 #define usdma_complete_name(st) { st, #st }
1153 #define show_usdma_complete_state(st) \
1154 __print_symbolic(st, \
1155 usdma_complete_name(FREE), \
1156 usdma_complete_name(QUEUED), \
1157 usdma_complete_name(COMPLETE), \
1158 usdma_complete_name(ERROR))
1160 TRACE_EVENT(hfi1_sdma_user_completion,
1161 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
1162 u8 state, int code),
1163 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
1167 __field(u8, subctxt)
1174 __entry->ctxt = ctxt;
1175 __entry->subctxt = subctxt;
1177 __entry->state = state;
1178 __entry->code = code;
1180 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
1181 __get_str(dev), __entry->ctxt, __entry->subctxt,
1182 __entry->idx, show_usdma_complete_state(__entry->state),
1186 const char *print_u32_array(struct trace_seq *, u32 *, int);
1187 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
1189 TRACE_EVENT(hfi1_sdma_user_header_ahg,
1190 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1191 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
1192 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
1196 __field(u8, subctxt)
1201 __field(u32, tidval)
1202 __array(u32, ahg, 10)
1206 __entry->ctxt = ctxt;
1207 __entry->subctxt = subctxt;
1210 __entry->idx = ahgidx;
1212 __entry->tidval = tidval;
1213 memcpy(__entry->ahg, ahg, len * sizeof(u32));
1215 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
1223 __print_u32_hex(__entry->ahg, __entry->len),
1228 TRACE_EVENT(hfi1_sdma_state,
1230 struct sdma_engine *sde,
1234 TP_ARGS(sde, cstate, nstate),
1236 DD_DEV_ENTRY(sde->dd)
1237 __string(curstate, cstate)
1238 __string(newstate, nstate)
1241 DD_DEV_ASSIGN(sde->dd);
1242 __assign_str(curstate, cstate);
1243 __assign_str(newstate, nstate);
1245 TP_printk("[%s] current state %s new state %s",
1247 __get_str(curstate),
1253 #define TRACE_SYSTEM hfi1_rc
1255 DECLARE_EVENT_CLASS(hfi1_sdma_rc,
1256 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1259 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1263 __field(u32, sending_psn)
1264 __field(u32, sending_hpsn)
1267 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1268 __entry->qpn = qp->ibqp.qp_num;
1269 __entry->flags = qp->s_flags;
1271 __entry->sending_psn = qp->s_sending_psn;
1272 __entry->sending_hpsn = qp->s_sending_hpsn;
1275 "[%s] qpn 0x%x flags 0x%x psn 0x%x sending_psn 0x%x sending_hpsn 0x%x",
1280 __entry->sending_psn,
1281 __entry->sending_psn
1285 DEFINE_EVENT(hfi1_sdma_rc, hfi1_rc_sendcomplete,
1286 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1291 #define TRACE_SYSTEM hfi1_misc
1293 TRACE_EVENT(hfi1_interrupt,
1294 TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
1296 TP_ARGS(dd, is_entry, src),
1299 __array(char, buf, 64)
1304 is_entry->is_name(__entry->buf, 64, src - is_entry->start);
1307 TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
1313 * This produces a REALLY ugly trace in the console output when the string is
1318 #define TRACE_SYSTEM hfi1_trace
1320 #define MAX_MSG_LEN 512
1322 DECLARE_EVENT_CLASS(hfi1_trace_template,
1323 TP_PROTO(const char *function, struct va_format *vaf),
1324 TP_ARGS(function, vaf),
1326 __string(function, function)
1327 __dynamic_array(char, msg, MAX_MSG_LEN)
1330 __assign_str(function, function);
1331 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
1332 MAX_MSG_LEN, vaf->fmt,
1333 *vaf->va) >= MAX_MSG_LEN);
1335 TP_printk("(%s) %s",
1336 __get_str(function),
1341 * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
1342 * actual function to work and can not be in a macro.
1344 #define __hfi1_trace_def(lvl) \
1345 void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
1347 DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
1348 TP_PROTO(const char *function, struct va_format *vaf), \
1349 TP_ARGS(function, vaf))
1351 #define __hfi1_trace_fn(lvl) \
1352 void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
1354 struct va_format vaf = { \
1359 va_start(args, fmt); \
1361 trace_hfi1_ ##lvl(func, &vaf); \
1367 * To create a new trace level simply define it below and as a __hfi1_trace_fn
1368 * in trace.c. This will create all the hooks for calling
1369 * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
1370 * the debugfs stuff.
1372 __hfi1_trace_def(PKT);
1373 __hfi1_trace_def(PROC);
1374 __hfi1_trace_def(SDMA);
1375 __hfi1_trace_def(LINKVERB);
1376 __hfi1_trace_def(DEBUG);
1377 __hfi1_trace_def(SNOOP);
1378 __hfi1_trace_def(CNTR);
1379 __hfi1_trace_def(PIO);
1380 __hfi1_trace_def(DC8051);
1381 __hfi1_trace_def(FIRMWARE);
1382 __hfi1_trace_def(RCVCTRL);
1383 __hfi1_trace_def(TID);
1385 #define hfi1_cdbg(which, fmt, ...) \
1386 __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
1388 #define hfi1_dbg(fmt, ...) \
1389 hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
1392 * Define HFI1_EARLY_DBG at compile time or here to enable early trace
1393 * messages. Do not check in an enablement for this.
1396 #ifdef HFI1_EARLY_DBG
1397 #define hfi1_dbg_early(fmt, ...) \
1398 trace_printk(fmt, ##__VA_ARGS__)
1400 #define hfi1_dbg_early(fmt, ...)
1403 #endif /* __HFI1_TRACE_H */
1405 #undef TRACE_INCLUDE_PATH
1406 #undef TRACE_INCLUDE_FILE
1407 #define TRACE_INCLUDE_PATH .
1408 #define TRACE_INCLUDE_FILE trace
1409 #include <trace/define_trace.h>