2 * xt_hashlimit - Netfilter module to limit the number of packets per time
3 * separately for each hashbucket (sourceip/sourceport/dstip/dstport)
5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
8 * Development of this code was funded by Astaro AG, http://www.astaro.com/
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 #include <linux/spinlock.h>
13 #include <linux/random.h>
14 #include <linux/jhash.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/list.h>
20 #include <linux/skbuff.h>
24 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
25 #include <linux/ipv6.h>
29 #include <net/net_namespace.h>
30 #include <net/netns/generic.h>
32 #include <linux/netfilter/x_tables.h>
33 #include <linux/netfilter_ipv4/ip_tables.h>
34 #include <linux/netfilter_ipv6/ip6_tables.h>
35 #include <linux/netfilter/xt_hashlimit.h>
36 #include <linux/mutex.h>
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
41 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
42 MODULE_ALIAS("ipt_hashlimit");
43 MODULE_ALIAS("ip6t_hashlimit");
45 struct hashlimit_net {
46 struct hlist_head htables;
47 struct proc_dir_entry *ipt_hashlimit;
48 struct proc_dir_entry *ip6t_hashlimit;
51 static int hashlimit_net_id;
52 static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
54 return net_generic(net, hashlimit_net_id);
57 /* need to declare this at the top */
58 static const struct file_operations dl_file_ops;
67 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
79 /* static / read-only parts in the beginning */
80 struct hlist_node node;
81 struct dsthash_dst dst;
83 /* modified structure members in the end */
85 unsigned long expires; /* precalculated expiry time */
87 unsigned long prev; /* last modification */
89 u_int32_t credit_cap, cost;
94 struct xt_hashlimit_htable {
95 struct hlist_node node; /* global list of all htables */
100 struct hashlimit_cfg1 cfg; /* config */
102 /* used internally */
103 spinlock_t lock; /* lock for list_head */
104 u_int32_t rnd; /* random seed for hash */
105 unsigned int count; /* number entries in table */
106 struct timer_list timer; /* timer for gc */
109 struct proc_dir_entry *pde;
112 struct hlist_head hash[0]; /* hashtable itself */
115 static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
116 static struct kmem_cache *hashlimit_cachep __read_mostly;
118 static inline bool dst_cmp(const struct dsthash_ent *ent,
119 const struct dsthash_dst *b)
121 return !memcmp(&ent->dst, b, sizeof(ent->dst));
125 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
127 u_int32_t hash = jhash2((const u32 *)dst,
128 sizeof(*dst)/sizeof(u32),
131 * Instead of returning hash % ht->cfg.size (implying a divide)
132 * we return the high 32 bits of the (hash * ht->cfg.size) that will
133 * give results between [0 and cfg.size-1] and same hash distribution,
134 * but using a multiply, less expensive than a divide
136 return ((u64)hash * ht->cfg.size) >> 32;
139 static struct dsthash_ent *
140 dsthash_find(const struct xt_hashlimit_htable *ht,
141 const struct dsthash_dst *dst)
143 struct dsthash_ent *ent;
144 u_int32_t hash = hash_dst(ht, dst);
146 if (!hlist_empty(&ht->hash[hash])) {
147 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
148 if (dst_cmp(ent, dst)) {
149 spin_lock(&ent->lock);
156 /* allocate dsthash_ent, initialize dst, put in htable and lock it */
157 static struct dsthash_ent *
158 dsthash_alloc_init(struct xt_hashlimit_htable *ht,
159 const struct dsthash_dst *dst, bool *race)
161 struct dsthash_ent *ent;
163 spin_lock(&ht->lock);
165 /* Two or more packets may race to create the same entry in the
166 * hashtable, double check if this packet lost race.
168 ent = dsthash_find(ht, dst);
170 spin_unlock(&ht->lock);
175 /* initialize hash with random val at the time we allocate
176 * the first hashtable entry */
177 if (unlikely(!ht->rnd_initialized)) {
178 get_random_bytes(&ht->rnd, sizeof(ht->rnd));
179 ht->rnd_initialized = true;
182 if (ht->cfg.max && ht->count >= ht->cfg.max) {
183 /* FIXME: do something. question is what.. */
184 net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
187 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
189 memcpy(&ent->dst, dst, sizeof(ent->dst));
190 spin_lock_init(&ent->lock);
192 spin_lock(&ent->lock);
193 hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
196 spin_unlock(&ht->lock);
200 static void dsthash_free_rcu(struct rcu_head *head)
202 struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
204 kmem_cache_free(hashlimit_cachep, ent);
208 dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
210 hlist_del_rcu(&ent->node);
211 call_rcu_bh(&ent->rcu, dsthash_free_rcu);
214 static void htable_gc(unsigned long htlong);
216 static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
219 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
220 struct xt_hashlimit_htable *hinfo;
224 if (minfo->cfg.size) {
225 size = minfo->cfg.size;
227 size = (totalram_pages << PAGE_SHIFT) / 16384 /
228 sizeof(struct list_head);
229 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
234 /* FIXME: don't use vmalloc() here or anywhere else -HW */
235 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
236 sizeof(struct list_head) * size);
239 minfo->hinfo = hinfo;
241 /* copy match config into hashtable config */
242 memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
243 hinfo->cfg.size = size;
244 if (hinfo->cfg.max == 0)
245 hinfo->cfg.max = 8 * hinfo->cfg.size;
246 else if (hinfo->cfg.max < hinfo->cfg.size)
247 hinfo->cfg.max = hinfo->cfg.size;
249 for (i = 0; i < hinfo->cfg.size; i++)
250 INIT_HLIST_HEAD(&hinfo->hash[i]);
254 hinfo->family = family;
255 hinfo->rnd_initialized = false;
256 spin_lock_init(&hinfo->lock);
258 hinfo->pde = proc_create_data(minfo->name, 0,
259 (family == NFPROTO_IPV4) ?
260 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
261 &dl_file_ops, hinfo);
262 if (hinfo->pde == NULL) {
268 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
269 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
270 add_timer(&hinfo->timer);
272 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
277 static bool select_all(const struct xt_hashlimit_htable *ht,
278 const struct dsthash_ent *he)
283 static bool select_gc(const struct xt_hashlimit_htable *ht,
284 const struct dsthash_ent *he)
286 return time_after_eq(jiffies, he->expires);
289 static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
290 bool (*select)(const struct xt_hashlimit_htable *ht,
291 const struct dsthash_ent *he))
295 /* lock hash table and iterate over it */
296 spin_lock_bh(&ht->lock);
297 for (i = 0; i < ht->cfg.size; i++) {
298 struct dsthash_ent *dh;
299 struct hlist_node *n;
300 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
301 if ((*select)(ht, dh))
302 dsthash_free(ht, dh);
305 spin_unlock_bh(&ht->lock);
308 /* hash table garbage collector, run by timer */
309 static void htable_gc(unsigned long htlong)
311 struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
313 htable_selective_cleanup(ht, select_gc);
315 /* re-add the timer accordingly */
316 ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
317 add_timer(&ht->timer);
320 static void htable_destroy(struct xt_hashlimit_htable *hinfo)
322 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
323 struct proc_dir_entry *parent;
325 del_timer_sync(&hinfo->timer);
327 if (hinfo->family == NFPROTO_IPV4)
328 parent = hashlimit_net->ipt_hashlimit;
330 parent = hashlimit_net->ip6t_hashlimit;
333 remove_proc_entry(hinfo->pde->name, parent);
335 htable_selective_cleanup(hinfo, select_all);
339 static struct xt_hashlimit_htable *htable_find_get(struct net *net,
343 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
344 struct xt_hashlimit_htable *hinfo;
346 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
347 if (!strcmp(name, hinfo->pde->name) &&
348 hinfo->family == family) {
356 static void htable_put(struct xt_hashlimit_htable *hinfo)
358 mutex_lock(&hashlimit_mutex);
359 if (--hinfo->use == 0) {
360 hlist_del(&hinfo->node);
361 htable_destroy(hinfo);
363 mutex_unlock(&hashlimit_mutex);
366 /* The algorithm used is the Simple Token Bucket Filter (TBF)
367 * see net/sched/sch_tbf.c in the linux source tree
370 /* Rusty: This is my (non-mathematically-inclined) understanding of
371 this algorithm. The `average rate' in jiffies becomes your initial
372 amount of credit `credit' and the most credit you can ever have
373 `credit_cap'. The `peak rate' becomes the cost of passing the
376 `prev' tracks the last packet hit: you gain one credit per jiffy.
377 If you get credit balance more than this, the extra credit is
378 discarded. Every time the match passes, you lose `cost' credits;
379 if you don't have that many, the test fails.
381 See Alexey's formal explanation in net/sched/sch_tbf.c.
383 To get the maximum range, we multiply by this factor (ie. you get N
384 credits per jiffy). We want to allow a rate as low as 1 per day
385 (slowest userspace tool allows), which means
386 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
388 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
390 /* Repeated shift and or gives us all 1s, final shift and add 1 gives
391 * us the power of 2 below the theoretical max, so GCC simply does a
393 #define _POW2_BELOW2(x) ((x)|((x)>>1))
394 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
395 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
396 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
397 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
398 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
400 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
402 /* in byte mode, the lowest possible rate is one packet/second.
403 * credit_cap is used as a counter that tells us how many times we can
404 * refill the "credits available" counter when it becomes empty.
406 #define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
407 #define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
409 static u32 xt_hashlimit_len_to_chunks(u32 len)
411 return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
414 /* Precision saver. */
415 static u32 user2credits(u32 user)
417 /* If multiplying would overflow... */
418 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
420 return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
422 return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
425 static u32 user2credits_byte(u32 user)
428 us *= HZ * CREDITS_PER_JIFFY_BYTES;
429 return (u32) (us >> 32);
432 static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
434 unsigned long delta = now - dh->rateinfo.prev;
440 dh->rateinfo.prev = now;
442 if (mode & XT_HASHLIMIT_BYTES) {
443 u32 tmp = dh->rateinfo.credit;
444 dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
445 cap = CREDITS_PER_JIFFY_BYTES * HZ;
446 if (tmp >= dh->rateinfo.credit) {/* overflow */
447 dh->rateinfo.credit = cap;
451 dh->rateinfo.credit += delta * CREDITS_PER_JIFFY;
452 cap = dh->rateinfo.credit_cap;
454 if (dh->rateinfo.credit > cap)
455 dh->rateinfo.credit = cap;
458 static void rateinfo_init(struct dsthash_ent *dh,
459 struct xt_hashlimit_htable *hinfo)
461 dh->rateinfo.prev = jiffies;
462 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
463 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
464 dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
465 dh->rateinfo.credit_cap = hinfo->cfg.burst;
467 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
469 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
470 dh->rateinfo.credit_cap = dh->rateinfo.credit;
474 static inline __be32 maskl(__be32 a, unsigned int l)
476 return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
479 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
480 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
484 i[0] = maskl(i[0], p);
485 i[1] = i[2] = i[3] = 0;
488 i[1] = maskl(i[1], p - 32);
492 i[2] = maskl(i[2], p - 64);
496 i[3] = maskl(i[3], p - 96);
505 hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
506 struct dsthash_dst *dst,
507 const struct sk_buff *skb, unsigned int protoff)
509 __be16 _ports[2], *ports;
513 memset(dst, 0, sizeof(*dst));
515 switch (hinfo->family) {
517 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
518 dst->ip.dst = maskl(ip_hdr(skb)->daddr,
520 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
521 dst->ip.src = maskl(ip_hdr(skb)->saddr,
524 if (!(hinfo->cfg.mode &
525 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
527 nexthdr = ip_hdr(skb)->protocol;
529 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
534 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
535 memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
536 sizeof(dst->ip6.dst));
537 hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask);
539 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) {
540 memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
541 sizeof(dst->ip6.src));
542 hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask);
545 if (!(hinfo->cfg.mode &
546 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
548 nexthdr = ipv6_hdr(skb)->nexthdr;
549 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
550 if ((int)protoff < 0)
560 poff = proto_ports_offset(nexthdr);
562 ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
565 _ports[0] = _ports[1] = 0;
570 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
571 dst->src_port = ports[0];
572 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
573 dst->dst_port = ports[1];
577 static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
579 u64 tmp = xt_hashlimit_len_to_chunks(len);
580 tmp = tmp * dh->rateinfo.cost;
582 if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
583 tmp = CREDITS_PER_JIFFY_BYTES * HZ;
585 if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
586 dh->rateinfo.credit_cap--;
587 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
593 hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
595 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
596 struct xt_hashlimit_htable *hinfo = info->hinfo;
597 unsigned long now = jiffies;
598 struct dsthash_ent *dh;
599 struct dsthash_dst dst;
603 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
607 dh = dsthash_find(hinfo, &dst);
609 dh = dsthash_alloc_init(hinfo, &dst, &race);
611 rcu_read_unlock_bh();
614 /* Already got an entry, update expiration timeout */
615 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
616 rateinfo_recalc(dh, now, hinfo->cfg.mode);
618 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
619 rateinfo_init(dh, hinfo);
622 /* update expiration timeout */
623 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
624 rateinfo_recalc(dh, now, hinfo->cfg.mode);
627 if (info->cfg.mode & XT_HASHLIMIT_BYTES)
628 cost = hashlimit_byte_cost(skb->len, dh);
630 cost = dh->rateinfo.cost;
632 if (dh->rateinfo.credit >= cost) {
633 /* below the limit */
634 dh->rateinfo.credit -= cost;
635 spin_unlock(&dh->lock);
636 rcu_read_unlock_bh();
637 return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
640 spin_unlock(&dh->lock);
641 rcu_read_unlock_bh();
642 /* default match is underlimit - so over the limit, we need to invert */
643 return info->cfg.mode & XT_HASHLIMIT_INVERT;
650 static int hashlimit_mt_check(const struct xt_mtchk_param *par)
652 struct net *net = par->net;
653 struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
656 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
658 if (info->name[sizeof(info->name)-1] != '\0')
660 if (par->family == NFPROTO_IPV4) {
661 if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
664 if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
668 if (info->cfg.mode & ~XT_HASHLIMIT_ALL) {
669 pr_info("Unknown mode mask %X, kernel too old?\n",
674 /* Check for overflow. */
675 if (info->cfg.mode & XT_HASHLIMIT_BYTES) {
676 if (user2credits_byte(info->cfg.avg) == 0) {
677 pr_info("overflow, rate too high: %u\n", info->cfg.avg);
680 } else if (info->cfg.burst == 0 ||
681 user2credits(info->cfg.avg * info->cfg.burst) <
682 user2credits(info->cfg.avg)) {
683 pr_info("overflow, try lower: %u/%u\n",
684 info->cfg.avg, info->cfg.burst);
688 mutex_lock(&hashlimit_mutex);
689 info->hinfo = htable_find_get(net, info->name, par->family);
690 if (info->hinfo == NULL) {
691 ret = htable_create(net, info, par->family);
693 mutex_unlock(&hashlimit_mutex);
697 mutex_unlock(&hashlimit_mutex);
701 static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
703 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
705 htable_put(info->hinfo);
708 static struct xt_match hashlimit_mt_reg[] __read_mostly = {
712 .family = NFPROTO_IPV4,
713 .match = hashlimit_mt,
714 .matchsize = sizeof(struct xt_hashlimit_mtinfo1),
715 .checkentry = hashlimit_mt_check,
716 .destroy = hashlimit_mt_destroy,
719 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
723 .family = NFPROTO_IPV6,
724 .match = hashlimit_mt,
725 .matchsize = sizeof(struct xt_hashlimit_mtinfo1),
726 .checkentry = hashlimit_mt_check,
727 .destroy = hashlimit_mt_destroy,
734 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
735 __acquires(htable->lock)
737 struct xt_hashlimit_htable *htable = s->private;
738 unsigned int *bucket;
740 spin_lock_bh(&htable->lock);
741 if (*pos >= htable->cfg.size)
744 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
746 return ERR_PTR(-ENOMEM);
752 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
754 struct xt_hashlimit_htable *htable = s->private;
755 unsigned int *bucket = (unsigned int *)v;
758 if (*pos >= htable->cfg.size) {
765 static void dl_seq_stop(struct seq_file *s, void *v)
766 __releases(htable->lock)
768 struct xt_hashlimit_htable *htable = s->private;
769 unsigned int *bucket = (unsigned int *)v;
773 spin_unlock_bh(&htable->lock);
776 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
780 const struct xt_hashlimit_htable *ht = s->private;
782 spin_lock(&ent->lock);
783 /* recalculate to show accurate numbers */
784 rateinfo_recalc(ent, jiffies, ht->cfg.mode);
788 res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
789 (long)(ent->expires - jiffies)/HZ,
791 ntohs(ent->dst.src_port),
793 ntohs(ent->dst.dst_port),
794 ent->rateinfo.credit, ent->rateinfo.credit_cap,
797 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
799 res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
800 (long)(ent->expires - jiffies)/HZ,
802 ntohs(ent->dst.src_port),
804 ntohs(ent->dst.dst_port),
805 ent->rateinfo.credit, ent->rateinfo.credit_cap,
813 spin_unlock(&ent->lock);
817 static int dl_seq_show(struct seq_file *s, void *v)
819 struct xt_hashlimit_htable *htable = s->private;
820 unsigned int *bucket = (unsigned int *)v;
821 struct dsthash_ent *ent;
823 if (!hlist_empty(&htable->hash[*bucket])) {
824 hlist_for_each_entry(ent, &htable->hash[*bucket], node)
825 if (dl_seq_real_show(ent, htable->family, s))
831 static const struct seq_operations dl_seq_ops = {
832 .start = dl_seq_start,
838 static int dl_proc_open(struct inode *inode, struct file *file)
840 int ret = seq_open(file, &dl_seq_ops);
843 struct seq_file *sf = file->private_data;
844 sf->private = PDE(inode)->data;
849 static const struct file_operations dl_file_ops = {
850 .owner = THIS_MODULE,
851 .open = dl_proc_open,
854 .release = seq_release
857 static int __net_init hashlimit_proc_net_init(struct net *net)
859 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
861 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
862 if (!hashlimit_net->ipt_hashlimit)
864 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
865 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
866 if (!hashlimit_net->ip6t_hashlimit) {
867 remove_proc_entry("ipt_hashlimit", net->proc_net);
874 static void __net_exit hashlimit_proc_net_exit(struct net *net)
876 struct xt_hashlimit_htable *hinfo;
877 struct proc_dir_entry *pde;
878 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
880 /* recent_net_exit() is called before recent_mt_destroy(). Make sure
881 * that the parent xt_recent proc entry is is empty before trying to
884 mutex_lock(&hashlimit_mutex);
885 pde = hashlimit_net->ipt_hashlimit;
887 pde = hashlimit_net->ip6t_hashlimit;
889 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
890 remove_proc_entry(hinfo->pde->name, pde);
892 hashlimit_net->ipt_hashlimit = NULL;
893 hashlimit_net->ip6t_hashlimit = NULL;
894 mutex_unlock(&hashlimit_mutex);
896 remove_proc_entry("ipt_hashlimit", net->proc_net);
897 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
898 remove_proc_entry("ip6t_hashlimit", net->proc_net);
902 static int __net_init hashlimit_net_init(struct net *net)
904 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
906 INIT_HLIST_HEAD(&hashlimit_net->htables);
907 return hashlimit_proc_net_init(net);
910 static void __net_exit hashlimit_net_exit(struct net *net)
912 hashlimit_proc_net_exit(net);
915 static struct pernet_operations hashlimit_net_ops = {
916 .init = hashlimit_net_init,
917 .exit = hashlimit_net_exit,
918 .id = &hashlimit_net_id,
919 .size = sizeof(struct hashlimit_net),
922 static int __init hashlimit_mt_init(void)
926 err = register_pernet_subsys(&hashlimit_net_ops);
929 err = xt_register_matches(hashlimit_mt_reg,
930 ARRAY_SIZE(hashlimit_mt_reg));
935 hashlimit_cachep = kmem_cache_create("xt_hashlimit",
936 sizeof(struct dsthash_ent), 0, 0,
938 if (!hashlimit_cachep) {
939 pr_warning("unable to create slab cache\n");
945 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
947 unregister_pernet_subsys(&hashlimit_net_ops);
952 static void __exit hashlimit_mt_exit(void)
954 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
955 unregister_pernet_subsys(&hashlimit_net_ops);
958 kmem_cache_destroy(hashlimit_cachep);
961 module_init(hashlimit_mt_init);
962 module_exit(hashlimit_mt_exit);