2 * Pretty printing Support for iptables xt_qtaguid module.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 * Most of the functions in this file just waste time if DEBUG is not defined.
13 * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
14 * debug flags ore not defined.
15 * Those funcs that fail to allocate memory will panic as there is no need to
16 * hobble allong just pretending to do the requested work.
22 #include <linux/gfp.h>
23 #include <linux/net.h>
24 #include <linux/rbtree.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock_types.h>
29 #include "xt_qtaguid_internal.h"
30 #include "xt_qtaguid_print.h"
34 static void _bug_on_err_or_null(void *ptr)
36 if (IS_ERR_OR_NULL(ptr)) {
37 pr_err("qtaguid: kmalloc failed\n");
42 char *pp_tag_t(tag_t *tag)
47 res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
49 res = kasprintf(GFP_ATOMIC,
50 "tag_t@%p{tag=0x%llx, uid=%u}",
51 tag, *tag, get_uid_from_tag(*tag));
52 _bug_on_err_or_null(res);
56 char *pp_data_counters(struct data_counters *dc, bool showValues)
61 res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
64 GFP_ATOMIC, "data_counters@%p{"
67 "tcp{b=%llu, p=%llu}, "
68 "udp{b=%llu, p=%llu},"
69 "other{b=%llu, p=%llu}}, "
71 "tcp{b=%llu, p=%llu}, "
72 "udp{b=%llu, p=%llu},"
73 "other{b=%llu, p=%llu}}}, "
76 "tcp{b=%llu, p=%llu}, "
77 "udp{b=%llu, p=%llu},"
78 "other{b=%llu, p=%llu}}, "
80 "tcp{b=%llu, p=%llu}, "
81 "udp{b=%llu, p=%llu},"
82 "other{b=%llu, p=%llu}}}}",
84 dc->bpc[0][IFS_RX][IFS_TCP].bytes,
85 dc->bpc[0][IFS_RX][IFS_TCP].packets,
86 dc->bpc[0][IFS_RX][IFS_UDP].bytes,
87 dc->bpc[0][IFS_RX][IFS_UDP].packets,
88 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
89 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
90 dc->bpc[0][IFS_TX][IFS_TCP].bytes,
91 dc->bpc[0][IFS_TX][IFS_TCP].packets,
92 dc->bpc[0][IFS_TX][IFS_UDP].bytes,
93 dc->bpc[0][IFS_TX][IFS_UDP].packets,
94 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
95 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
96 dc->bpc[1][IFS_RX][IFS_TCP].bytes,
97 dc->bpc[1][IFS_RX][IFS_TCP].packets,
98 dc->bpc[1][IFS_RX][IFS_UDP].bytes,
99 dc->bpc[1][IFS_RX][IFS_UDP].packets,
100 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
101 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
102 dc->bpc[1][IFS_TX][IFS_TCP].bytes,
103 dc->bpc[1][IFS_TX][IFS_TCP].packets,
104 dc->bpc[1][IFS_TX][IFS_UDP].bytes,
105 dc->bpc[1][IFS_TX][IFS_UDP].packets,
106 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
107 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
109 res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
110 _bug_on_err_or_null(res);
114 char *pp_tag_node(struct tag_node *tn)
120 res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
121 _bug_on_err_or_null(res);
124 tag_str = pp_tag_t(&tn->tag);
125 res = kasprintf(GFP_ATOMIC,
126 "tag_node@%p{tag=%s}",
128 _bug_on_err_or_null(res);
133 char *pp_tag_ref(struct tag_ref *tr)
139 res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
140 _bug_on_err_or_null(res);
143 tn_str = pp_tag_node(&tr->tn);
144 res = kasprintf(GFP_ATOMIC,
145 "tag_ref@%p{%s, num_sock_tags=%d}",
146 tr, tn_str, tr->num_sock_tags);
147 _bug_on_err_or_null(res);
152 char *pp_tag_stat(struct tag_stat *ts)
156 char *parent_counters_str;
160 res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
161 _bug_on_err_or_null(res);
164 tn_str = pp_tag_node(&ts->tn);
165 counters_str = pp_data_counters(&ts->counters, true);
166 parent_counters_str = pp_data_counters(ts->parent_counters, false);
167 res = kasprintf(GFP_ATOMIC,
168 "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
169 ts, tn_str, counters_str, parent_counters_str);
170 _bug_on_err_or_null(res);
173 kfree(parent_counters_str);
177 char *pp_iface_stat(struct iface_stat *is)
181 res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
183 res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
184 "list=list_head{...}, "
186 "total_dev={rx={bytes=%llu, "
190 "total_skb={rx={bytes=%llu, "
194 "last_known_valid=%d, "
195 "last_known={rx={bytes=%llu, "
202 "tag_stat_tree=rb_root{...}}",
205 is->totals_via_dev[IFS_RX].bytes,
206 is->totals_via_dev[IFS_RX].packets,
207 is->totals_via_dev[IFS_TX].bytes,
208 is->totals_via_dev[IFS_TX].packets,
209 is->totals_via_skb[IFS_RX].bytes,
210 is->totals_via_skb[IFS_RX].packets,
211 is->totals_via_skb[IFS_TX].bytes,
212 is->totals_via_skb[IFS_TX].packets,
213 is->last_known_valid,
214 is->last_known[IFS_RX].bytes,
215 is->last_known[IFS_RX].packets,
216 is->last_known[IFS_TX].bytes,
217 is->last_known[IFS_TX].packets,
221 _bug_on_err_or_null(res);
225 char *pp_sock_tag(struct sock_tag *st)
231 res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
232 _bug_on_err_or_null(res);
235 tag_str = pp_tag_t(&st->tag);
236 res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
237 "sock_node=rb_node{...}, "
238 "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
240 st, st->sk, st->socket, atomic_long_read(
241 &st->socket->file->f_count),
243 _bug_on_err_or_null(res);
248 char *pp_uid_tag_data(struct uid_tag_data *utd)
253 res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
255 res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
256 "uid=%u, num_active_acct_tags=%d, "
258 "tag_node_tree=rb_root{...}, "
259 "proc_qtu_data_tree=rb_root{...}}",
261 utd->num_active_tags, utd->num_pqd);
262 _bug_on_err_or_null(res);
266 char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
268 char *parent_tag_data_str;
272 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
273 _bug_on_err_or_null(res);
276 parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
277 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
278 "node=rb_node{...}, pid=%u, "
279 "parent_tag_data=%s, "
280 "sock_tag_list=list_head{...}}",
281 pqd, pqd->pid, parent_tag_data_str
283 _bug_on_err_or_null(res);
284 kfree(parent_tag_data_str);
288 /*------------------------------------------*/
289 void prdebug_sock_tag_tree(int indent_level,
290 struct rb_root *sock_tag_tree)
292 struct rb_node *node;
293 struct sock_tag *sock_tag_entry;
296 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
299 if (RB_EMPTY_ROOT(sock_tag_tree)) {
300 str = "sock_tag_tree=rb_root{}";
301 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
305 str = "sock_tag_tree=rb_root{";
306 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
308 for (node = rb_first(sock_tag_tree);
310 node = rb_next(node)) {
311 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
312 str = pp_sock_tag(sock_tag_entry);
313 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
318 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
321 void prdebug_sock_tag_list(int indent_level,
322 struct list_head *sock_tag_list)
324 struct sock_tag *sock_tag_entry;
327 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
330 if (list_empty(sock_tag_list)) {
331 str = "sock_tag_list=list_head{}";
332 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
336 str = "sock_tag_list=list_head{";
337 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
339 list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
340 str = pp_sock_tag(sock_tag_entry);
341 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
346 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
349 void prdebug_proc_qtu_data_tree(int indent_level,
350 struct rb_root *proc_qtu_data_tree)
353 struct rb_node *node;
354 struct proc_qtu_data *proc_qtu_data_entry;
356 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
359 if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
360 str = "proc_qtu_data_tree=rb_root{}";
361 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
365 str = "proc_qtu_data_tree=rb_root{";
366 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
368 for (node = rb_first(proc_qtu_data_tree);
370 node = rb_next(node)) {
371 proc_qtu_data_entry = rb_entry(node,
372 struct proc_qtu_data,
374 str = pp_proc_qtu_data(proc_qtu_data_entry);
375 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
379 prdebug_sock_tag_list(indent_level,
380 &proc_qtu_data_entry->sock_tag_list);
386 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
389 void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
392 struct rb_node *node;
393 struct tag_ref *tag_ref_entry;
395 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
398 if (RB_EMPTY_ROOT(tag_ref_tree)) {
399 str = "tag_ref_tree{}";
400 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
404 str = "tag_ref_tree{";
405 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
407 for (node = rb_first(tag_ref_tree);
409 node = rb_next(node)) {
410 tag_ref_entry = rb_entry(node,
413 str = pp_tag_ref(tag_ref_entry);
414 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
420 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
423 void prdebug_uid_tag_data_tree(int indent_level,
424 struct rb_root *uid_tag_data_tree)
427 struct rb_node *node;
428 struct uid_tag_data *uid_tag_data_entry;
430 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
433 if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
434 str = "uid_tag_data_tree=rb_root{}";
435 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
439 str = "uid_tag_data_tree=rb_root{";
440 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
442 for (node = rb_first(uid_tag_data_tree);
444 node = rb_next(node)) {
445 uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
447 str = pp_uid_tag_data(uid_tag_data_entry);
448 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
450 if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
452 prdebug_tag_ref_tree(indent_level,
453 &uid_tag_data_entry->tag_ref_tree);
459 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
462 void prdebug_tag_stat_tree(int indent_level,
463 struct rb_root *tag_stat_tree)
466 struct rb_node *node;
467 struct tag_stat *ts_entry;
469 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
472 if (RB_EMPTY_ROOT(tag_stat_tree)) {
473 str = "tag_stat_tree{}";
474 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
478 str = "tag_stat_tree{";
479 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
481 for (node = rb_first(tag_stat_tree);
483 node = rb_next(node)) {
484 ts_entry = rb_entry(node, struct tag_stat, tn.node);
485 str = pp_tag_stat(ts_entry);
486 pr_debug("%*d: %s\n", indent_level*2, indent_level,
492 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
495 void prdebug_iface_stat_list(int indent_level,
496 struct list_head *iface_stat_list)
499 struct iface_stat *iface_entry;
501 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
504 if (list_empty(iface_stat_list)) {
505 str = "iface_stat_list=list_head{}";
506 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
510 str = "iface_stat_list=list_head{";
511 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
513 list_for_each_entry(iface_entry, iface_stat_list, list) {
514 str = pp_iface_stat(iface_entry);
515 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
518 spin_lock_bh(&iface_entry->tag_stat_list_lock);
519 if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
521 prdebug_tag_stat_tree(indent_level,
522 &iface_entry->tag_stat_tree);
525 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
529 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
532 #endif /* ifdef DDEBUG */
533 /*------------------------------------------*/
534 static const char * const netdev_event_strings[] = {
546 "NETDEV_FEAT_CHANGE",
547 "NETDEV_BONDING_FAILOVER",
549 "NETDEV_PRE_TYPE_CHANGE",
550 "NETDEV_POST_TYPE_CHANGE",
552 "NETDEV_UNREGISTER_BATCH",
554 "NETDEV_NOTIFY_PEERS",
558 const char *netdev_evt_str(int netdev_event)
561 || netdev_event >= ARRAY_SIZE(netdev_event_strings))
562 return "bad event num";
563 return netdev_event_strings[netdev_event];