2 * Pretty printing Support for iptables xt_qtaguid module.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 * Most of the functions in this file just waste time if DEBUG is not defined.
13 * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
14 * debug flags ore not defined.
15 * Those funcs that fail to allocate memory will panic as there is no need to
16 * hobble allong just pretending to do the requested work.
22 #include <linux/gfp.h>
23 #include <linux/net.h>
24 #include <linux/rbtree.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock_types.h>
29 #include "xt_qtaguid_internal.h"
30 #include "xt_qtaguid_print.h"
34 static void _bug_on_err_or_null(void *ptr)
36 if (IS_ERR_OR_NULL(ptr)) {
37 pr_err("qtaguid: kmalloc failed\n");
42 char *pp_tag_t(tag_t *tag)
47 res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
49 res = kasprintf(GFP_ATOMIC,
50 "tag_t@%p{tag=0x%llx, uid=%u}",
51 tag, *tag, get_uid_from_tag(*tag));
52 _bug_on_err_or_null(res);
56 char *pp_data_counters(struct data_counters *dc, bool showValues)
61 res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
64 GFP_ATOMIC, "data_counters@%p{"
67 "tcp{b=%llu, p=%llu}, "
68 "udp{b=%llu, p=%llu},"
69 "other{b=%llu, p=%llu}}, "
71 "tcp{b=%llu, p=%llu}, "
72 "udp{b=%llu, p=%llu},"
73 "other{b=%llu, p=%llu}}}, "
76 "tcp{b=%llu, p=%llu}, "
77 "udp{b=%llu, p=%llu},"
78 "other{b=%llu, p=%llu}}, "
80 "tcp{b=%llu, p=%llu}, "
81 "udp{b=%llu, p=%llu},"
82 "other{b=%llu, p=%llu}}}}",
84 dc->bpc[0][IFS_RX][IFS_TCP].bytes,
85 dc->bpc[0][IFS_RX][IFS_TCP].packets,
86 dc->bpc[0][IFS_RX][IFS_UDP].bytes,
87 dc->bpc[0][IFS_RX][IFS_UDP].packets,
88 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
89 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
90 dc->bpc[0][IFS_TX][IFS_TCP].bytes,
91 dc->bpc[0][IFS_TX][IFS_TCP].packets,
92 dc->bpc[0][IFS_TX][IFS_UDP].bytes,
93 dc->bpc[0][IFS_TX][IFS_UDP].packets,
94 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
95 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
96 dc->bpc[1][IFS_RX][IFS_TCP].bytes,
97 dc->bpc[1][IFS_RX][IFS_TCP].packets,
98 dc->bpc[1][IFS_RX][IFS_UDP].bytes,
99 dc->bpc[1][IFS_RX][IFS_UDP].packets,
100 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
101 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
102 dc->bpc[1][IFS_TX][IFS_TCP].bytes,
103 dc->bpc[1][IFS_TX][IFS_TCP].packets,
104 dc->bpc[1][IFS_TX][IFS_UDP].bytes,
105 dc->bpc[1][IFS_TX][IFS_UDP].packets,
106 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
107 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
109 res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
110 _bug_on_err_or_null(res);
114 char *pp_tag_node(struct tag_node *tn)
120 res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
121 _bug_on_err_or_null(res);
124 tag_str = pp_tag_t(&tn->tag);
125 res = kasprintf(GFP_ATOMIC,
126 "tag_node@%p{tag=%s}",
128 _bug_on_err_or_null(res);
133 char *pp_tag_ref(struct tag_ref *tr)
139 res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
140 _bug_on_err_or_null(res);
143 tn_str = pp_tag_node(&tr->tn);
144 res = kasprintf(GFP_ATOMIC,
145 "tag_ref@%p{%s, num_sock_tags=%d}",
146 tr, tn_str, tr->num_sock_tags);
147 _bug_on_err_or_null(res);
152 char *pp_tag_stat(struct tag_stat *ts)
156 char *parent_counters_str;
160 res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
161 _bug_on_err_or_null(res);
164 tn_str = pp_tag_node(&ts->tn);
165 counters_str = pp_data_counters(&ts->counters, true);
166 parent_counters_str = pp_data_counters(ts->parent_counters, false);
167 res = kasprintf(GFP_ATOMIC,
168 "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
169 ts, tn_str, counters_str, parent_counters_str);
170 _bug_on_err_or_null(res);
173 kfree(parent_counters_str);
177 char *pp_iface_stat(struct iface_stat *is)
181 res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
183 struct data_counters *cnts = &is->totals_via_skb;
184 res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
185 "list=list_head{...}, "
187 "total_dev={rx={bytes=%llu, "
191 "total_skb={rx={bytes=%llu, "
195 "last_known_valid=%d, "
196 "last_known={rx={bytes=%llu, "
203 "tag_stat_tree=rb_root{...}}",
206 is->totals_via_dev[IFS_RX].bytes,
207 is->totals_via_dev[IFS_RX].packets,
208 is->totals_via_dev[IFS_TX].bytes,
209 is->totals_via_dev[IFS_TX].packets,
210 dc_sum_bytes(cnts, 0, IFS_RX),
211 dc_sum_packets(cnts, 0, IFS_RX),
212 dc_sum_bytes(cnts, 0, IFS_TX),
213 dc_sum_packets(cnts, 0, IFS_TX),
214 is->last_known_valid,
215 is->last_known[IFS_RX].bytes,
216 is->last_known[IFS_RX].packets,
217 is->last_known[IFS_TX].bytes,
218 is->last_known[IFS_TX].packets,
223 _bug_on_err_or_null(res);
227 char *pp_sock_tag(struct sock_tag *st)
233 res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
234 _bug_on_err_or_null(res);
237 tag_str = pp_tag_t(&st->tag);
238 res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
239 "sock_node=rb_node{...}, "
240 "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
242 st, st->sk, st->socket, atomic_long_read(
243 &st->socket->file->f_count),
245 _bug_on_err_or_null(res);
250 char *pp_uid_tag_data(struct uid_tag_data *utd)
255 res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
257 res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
258 "uid=%u, num_active_acct_tags=%d, "
260 "tag_node_tree=rb_root{...}, "
261 "proc_qtu_data_tree=rb_root{...}}",
263 utd->num_active_tags, utd->num_pqd);
264 _bug_on_err_or_null(res);
268 char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
270 char *parent_tag_data_str;
274 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
275 _bug_on_err_or_null(res);
278 parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
279 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
280 "node=rb_node{...}, pid=%u, "
281 "parent_tag_data=%s, "
282 "sock_tag_list=list_head{...}}",
283 pqd, pqd->pid, parent_tag_data_str
285 _bug_on_err_or_null(res);
286 kfree(parent_tag_data_str);
290 /*------------------------------------------*/
291 void prdebug_sock_tag_tree(int indent_level,
292 struct rb_root *sock_tag_tree)
294 struct rb_node *node;
295 struct sock_tag *sock_tag_entry;
298 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
301 if (RB_EMPTY_ROOT(sock_tag_tree)) {
302 str = "sock_tag_tree=rb_root{}";
303 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
307 str = "sock_tag_tree=rb_root{";
308 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
310 for (node = rb_first(sock_tag_tree);
312 node = rb_next(node)) {
313 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
314 str = pp_sock_tag(sock_tag_entry);
315 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
320 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
323 void prdebug_sock_tag_list(int indent_level,
324 struct list_head *sock_tag_list)
326 struct sock_tag *sock_tag_entry;
329 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
332 if (list_empty(sock_tag_list)) {
333 str = "sock_tag_list=list_head{}";
334 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
338 str = "sock_tag_list=list_head{";
339 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
341 list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
342 str = pp_sock_tag(sock_tag_entry);
343 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
348 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
351 void prdebug_proc_qtu_data_tree(int indent_level,
352 struct rb_root *proc_qtu_data_tree)
355 struct rb_node *node;
356 struct proc_qtu_data *proc_qtu_data_entry;
358 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
361 if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
362 str = "proc_qtu_data_tree=rb_root{}";
363 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
367 str = "proc_qtu_data_tree=rb_root{";
368 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
370 for (node = rb_first(proc_qtu_data_tree);
372 node = rb_next(node)) {
373 proc_qtu_data_entry = rb_entry(node,
374 struct proc_qtu_data,
376 str = pp_proc_qtu_data(proc_qtu_data_entry);
377 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
381 prdebug_sock_tag_list(indent_level,
382 &proc_qtu_data_entry->sock_tag_list);
388 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
391 void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
394 struct rb_node *node;
395 struct tag_ref *tag_ref_entry;
397 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
400 if (RB_EMPTY_ROOT(tag_ref_tree)) {
401 str = "tag_ref_tree{}";
402 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
406 str = "tag_ref_tree{";
407 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
409 for (node = rb_first(tag_ref_tree);
411 node = rb_next(node)) {
412 tag_ref_entry = rb_entry(node,
415 str = pp_tag_ref(tag_ref_entry);
416 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
422 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
425 void prdebug_uid_tag_data_tree(int indent_level,
426 struct rb_root *uid_tag_data_tree)
429 struct rb_node *node;
430 struct uid_tag_data *uid_tag_data_entry;
432 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
435 if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
436 str = "uid_tag_data_tree=rb_root{}";
437 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
441 str = "uid_tag_data_tree=rb_root{";
442 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
444 for (node = rb_first(uid_tag_data_tree);
446 node = rb_next(node)) {
447 uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
449 str = pp_uid_tag_data(uid_tag_data_entry);
450 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
452 if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
454 prdebug_tag_ref_tree(indent_level,
455 &uid_tag_data_entry->tag_ref_tree);
461 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
464 void prdebug_tag_stat_tree(int indent_level,
465 struct rb_root *tag_stat_tree)
468 struct rb_node *node;
469 struct tag_stat *ts_entry;
471 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
474 if (RB_EMPTY_ROOT(tag_stat_tree)) {
475 str = "tag_stat_tree{}";
476 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
480 str = "tag_stat_tree{";
481 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
483 for (node = rb_first(tag_stat_tree);
485 node = rb_next(node)) {
486 ts_entry = rb_entry(node, struct tag_stat, tn.node);
487 str = pp_tag_stat(ts_entry);
488 pr_debug("%*d: %s\n", indent_level*2, indent_level,
494 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
497 void prdebug_iface_stat_list(int indent_level,
498 struct list_head *iface_stat_list)
501 struct iface_stat *iface_entry;
503 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
506 if (list_empty(iface_stat_list)) {
507 str = "iface_stat_list=list_head{}";
508 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
512 str = "iface_stat_list=list_head{";
513 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
515 list_for_each_entry(iface_entry, iface_stat_list, list) {
516 str = pp_iface_stat(iface_entry);
517 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
520 spin_lock_bh(&iface_entry->tag_stat_list_lock);
521 if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
523 prdebug_tag_stat_tree(indent_level,
524 &iface_entry->tag_stat_tree);
527 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
531 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
534 #endif /* ifdef DDEBUG */
535 /*------------------------------------------*/
536 static const char * const netdev_event_strings[] = {
548 "NETDEV_FEAT_CHANGE",
549 "NETDEV_BONDING_FAILOVER",
551 "NETDEV_PRE_TYPE_CHANGE",
552 "NETDEV_POST_TYPE_CHANGE",
554 "NETDEV_UNREGISTER_BATCH",
556 "NETDEV_NOTIFY_PEERS",
560 const char *netdev_evt_str(int netdev_event)
563 || netdev_event >= ARRAY_SIZE(netdev_event_strings))
564 return "bad event num";
565 return netdev_event_strings[netdev_event];