2 * Pretty printing Support for iptables xt_qtaguid module.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 * Most of the functions in this file just waste time if DEBUG is not defined.
13 * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
14 * debug flags ore not defined.
15 * Those funcs that fail to allocate memory will panic as there is no need to
16 * hobble allong just pretending to do the requested work.
22 #include <linux/gfp.h>
23 #include <linux/net.h>
24 #include <linux/rbtree.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock_types.h>
29 #include "xt_qtaguid_internal.h"
30 #include "xt_qtaguid_print.h"
34 static void _bug_on_err_or_null(void *ptr)
36 if (IS_ERR_OR_NULL(ptr)) {
37 pr_err("qtaguid: kmalloc failed\n");
42 char *pp_tag_t(tag_t *tag)
47 res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
49 res = kasprintf(GFP_ATOMIC,
50 "tag_t@%p{tag=0x%llx, uid=%u}",
51 tag, *tag, get_uid_from_tag(*tag));
52 _bug_on_err_or_null(res);
56 char *pp_data_counters(struct data_counters *dc, bool showValues)
61 res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
64 GFP_ATOMIC, "data_counters@%p{"
67 "tcp{b=%llu, p=%llu}, "
68 "udp{b=%llu, p=%llu},"
69 "other{b=%llu, p=%llu}}, "
71 "tcp{b=%llu, p=%llu}, "
72 "udp{b=%llu, p=%llu},"
73 "other{b=%llu, p=%llu}}}, "
76 "tcp{b=%llu, p=%llu}, "
77 "udp{b=%llu, p=%llu},"
78 "other{b=%llu, p=%llu}}, "
80 "tcp{b=%llu, p=%llu}, "
81 "udp{b=%llu, p=%llu},"
82 "other{b=%llu, p=%llu}}}}",
84 dc->bpc[0][IFS_RX][IFS_TCP].bytes,
85 dc->bpc[0][IFS_RX][IFS_TCP].packets,
86 dc->bpc[0][IFS_RX][IFS_UDP].bytes,
87 dc->bpc[0][IFS_RX][IFS_UDP].packets,
88 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
89 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
90 dc->bpc[0][IFS_TX][IFS_TCP].bytes,
91 dc->bpc[0][IFS_TX][IFS_TCP].packets,
92 dc->bpc[0][IFS_TX][IFS_UDP].bytes,
93 dc->bpc[0][IFS_TX][IFS_UDP].packets,
94 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
95 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
96 dc->bpc[1][IFS_RX][IFS_TCP].bytes,
97 dc->bpc[1][IFS_RX][IFS_TCP].packets,
98 dc->bpc[1][IFS_RX][IFS_UDP].bytes,
99 dc->bpc[1][IFS_RX][IFS_UDP].packets,
100 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
101 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
102 dc->bpc[1][IFS_TX][IFS_TCP].bytes,
103 dc->bpc[1][IFS_TX][IFS_TCP].packets,
104 dc->bpc[1][IFS_TX][IFS_UDP].bytes,
105 dc->bpc[1][IFS_TX][IFS_UDP].packets,
106 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
107 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
109 res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
110 _bug_on_err_or_null(res);
114 char *pp_tag_node(struct tag_node *tn)
120 res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
121 _bug_on_err_or_null(res);
124 tag_str = pp_tag_t(&tn->tag);
125 res = kasprintf(GFP_ATOMIC,
126 "tag_node@%p{tag=%s}",
128 _bug_on_err_or_null(res);
133 char *pp_tag_ref(struct tag_ref *tr)
139 res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
140 _bug_on_err_or_null(res);
143 tn_str = pp_tag_node(&tr->tn);
144 res = kasprintf(GFP_ATOMIC,
145 "tag_ref@%p{%s, num_sock_tags=%d}",
146 tr, tn_str, tr->num_sock_tags);
147 _bug_on_err_or_null(res);
152 char *pp_tag_stat(struct tag_stat *ts)
156 char *parent_counters_str;
160 res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
161 _bug_on_err_or_null(res);
164 tn_str = pp_tag_node(&ts->tn);
165 counters_str = pp_data_counters(&ts->counters, true);
166 parent_counters_str = pp_data_counters(ts->parent_counters, false);
167 res = kasprintf(GFP_ATOMIC,
168 "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
169 ts, tn_str, counters_str, parent_counters_str);
170 _bug_on_err_or_null(res);
173 kfree(parent_counters_str);
177 char *pp_iface_stat(struct iface_stat *is)
181 res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
183 res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
184 "list=list_head{...}, "
186 "total={rx={bytes=%llu, "
190 "last_known_valid=%d, "
191 "last_known={rx={bytes=%llu, "
198 "tag_stat_tree=rb_root{...}}",
201 is->totals[IFS_RX].bytes,
202 is->totals[IFS_RX].packets,
203 is->totals[IFS_TX].bytes,
204 is->totals[IFS_TX].packets,
205 is->last_known_valid,
206 is->last_known[IFS_RX].bytes,
207 is->last_known[IFS_RX].packets,
208 is->last_known[IFS_TX].bytes,
209 is->last_known[IFS_TX].packets,
213 _bug_on_err_or_null(res);
217 char *pp_sock_tag(struct sock_tag *st)
223 res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
224 _bug_on_err_or_null(res);
227 tag_str = pp_tag_t(&st->tag);
228 res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
229 "sock_node=rb_node{...}, "
230 "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
232 st, st->sk, st->socket, atomic_long_read(
233 &st->socket->file->f_count),
235 _bug_on_err_or_null(res);
240 char *pp_uid_tag_data(struct uid_tag_data *utd)
245 res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
247 res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
248 "uid=%u, num_active_acct_tags=%d, "
250 "tag_node_tree=rb_root{...}, "
251 "proc_qtu_data_tree=rb_root{...}}",
253 utd->num_active_tags, utd->num_pqd);
254 _bug_on_err_or_null(res);
258 char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
260 char *parent_tag_data_str;
264 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
265 _bug_on_err_or_null(res);
268 parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
269 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
270 "node=rb_node{...}, pid=%u, "
271 "parent_tag_data=%s, "
272 "sock_tag_list=list_head{...}}",
273 pqd, pqd->pid, parent_tag_data_str
275 _bug_on_err_or_null(res);
276 kfree(parent_tag_data_str);
280 /*------------------------------------------*/
281 void prdebug_sock_tag_tree(int indent_level,
282 struct rb_root *sock_tag_tree)
284 struct rb_node *node;
285 struct sock_tag *sock_tag_entry;
288 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
291 if (RB_EMPTY_ROOT(sock_tag_tree)) {
292 str = "sock_tag_tree=rb_root{}";
293 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
297 str = "sock_tag_tree=rb_root{";
298 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
300 for (node = rb_first(sock_tag_tree);
302 node = rb_next(node)) {
303 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
304 str = pp_sock_tag(sock_tag_entry);
305 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
310 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
313 void prdebug_sock_tag_list(int indent_level,
314 struct list_head *sock_tag_list)
316 struct sock_tag *sock_tag_entry;
319 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
322 if (list_empty(sock_tag_list)) {
323 str = "sock_tag_list=list_head{}";
324 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
328 str = "sock_tag_list=list_head{";
329 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
331 list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
332 str = pp_sock_tag(sock_tag_entry);
333 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
338 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
341 void prdebug_proc_qtu_data_tree(int indent_level,
342 struct rb_root *proc_qtu_data_tree)
345 struct rb_node *node;
346 struct proc_qtu_data *proc_qtu_data_entry;
348 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
351 if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
352 str = "proc_qtu_data_tree=rb_root{}";
353 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
357 str = "proc_qtu_data_tree=rb_root{";
358 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
360 for (node = rb_first(proc_qtu_data_tree);
362 node = rb_next(node)) {
363 proc_qtu_data_entry = rb_entry(node,
364 struct proc_qtu_data,
366 str = pp_proc_qtu_data(proc_qtu_data_entry);
367 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
371 prdebug_sock_tag_list(indent_level,
372 &proc_qtu_data_entry->sock_tag_list);
378 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
381 void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
384 struct rb_node *node;
385 struct tag_ref *tag_ref_entry;
387 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
390 if (RB_EMPTY_ROOT(tag_ref_tree)) {
391 str = "tag_ref_tree{}";
392 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
396 str = "tag_ref_tree{";
397 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
399 for (node = rb_first(tag_ref_tree);
401 node = rb_next(node)) {
402 tag_ref_entry = rb_entry(node,
405 str = pp_tag_ref(tag_ref_entry);
406 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
412 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
415 void prdebug_uid_tag_data_tree(int indent_level,
416 struct rb_root *uid_tag_data_tree)
419 struct rb_node *node;
420 struct uid_tag_data *uid_tag_data_entry;
422 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
425 if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
426 str = "uid_tag_data_tree=rb_root{}";
427 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
431 str = "uid_tag_data_tree=rb_root{";
432 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
434 for (node = rb_first(uid_tag_data_tree);
436 node = rb_next(node)) {
437 uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
439 str = pp_uid_tag_data(uid_tag_data_entry);
440 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
442 if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
444 prdebug_tag_ref_tree(indent_level,
445 &uid_tag_data_entry->tag_ref_tree);
451 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
454 void prdebug_tag_stat_tree(int indent_level,
455 struct rb_root *tag_stat_tree)
458 struct rb_node *node;
459 struct tag_stat *ts_entry;
461 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
464 if (RB_EMPTY_ROOT(tag_stat_tree)) {
465 str = "tag_stat_tree{}";
466 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
470 str = "tag_stat_tree{";
471 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
473 for (node = rb_first(tag_stat_tree);
475 node = rb_next(node)) {
476 ts_entry = rb_entry(node, struct tag_stat, tn.node);
477 str = pp_tag_stat(ts_entry);
478 pr_debug("%*d: %s\n", indent_level*2, indent_level,
484 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
487 void prdebug_iface_stat_list(int indent_level,
488 struct list_head *iface_stat_list)
491 struct iface_stat *iface_entry;
493 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
496 if (list_empty(iface_stat_list)) {
497 str = "iface_stat_list=list_head{}";
498 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
502 str = "iface_stat_list=list_head{";
503 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
505 list_for_each_entry(iface_entry, iface_stat_list, list) {
506 str = pp_iface_stat(iface_entry);
507 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
510 spin_lock_bh(&iface_entry->tag_stat_list_lock);
511 if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
513 prdebug_tag_stat_tree(indent_level,
514 &iface_entry->tag_stat_tree);
517 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
521 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
524 #endif /* ifdef DDEBUG */
525 /*------------------------------------------*/
526 static const char * const netdev_event_strings[] = {
538 "NETDEV_FEAT_CHANGE",
539 "NETDEV_BONDING_FAILOVER",
541 "NETDEV_PRE_TYPE_CHANGE",
542 "NETDEV_POST_TYPE_CHANGE",
544 "NETDEV_UNREGISTER_BATCH",
546 "NETDEV_NOTIFY_PEERS",
550 const char *netdev_evt_str(int netdev_event)
553 || netdev_event >= ARRAY_SIZE(netdev_event_strings))
554 return "bad event num";
555 return netdev_event_strings[netdev_event];