2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
29 #include <linux/file.h>
30 #include <linux/pid_namespace.h>
31 #include <net/genetlink.h>
32 #include <linux/atomic.h>
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
38 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
40 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
41 static int family_registered;
42 struct kmem_cache *taskstats_cache;
44 static struct genl_family family = {
45 .id = GENL_ID_GENERATE,
46 .name = TASKSTATS_GENL_NAME,
47 .version = TASKSTATS_GENL_VERSION,
48 .maxattr = TASKSTATS_CMD_ATTR_MAX,
51 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
52 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
57 static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
58 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
62 struct list_head list;
67 struct listener_list {
68 struct rw_semaphore sem;
69 struct list_head list;
71 static DEFINE_PER_CPU(struct listener_list, listener_array);
79 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
86 * If new attributes are added, please revisit this allocation
88 skb = genlmsg_new(size, GFP_KERNEL);
93 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
108 * Send taskstats data in @skb to listener with nl_pid @pid
110 static int send_reply(struct sk_buff *skb, struct genl_info *info)
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 void *reply = genlmsg_data(genlhdr);
115 genlmsg_end(skb, reply);
117 return genlmsg_reply(skb, info);
121 * Send taskstats data in @skb to listeners registered for @cpu's exit data
123 static void send_cpu_listeners(struct sk_buff *skb,
124 struct listener_list *listeners)
126 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
127 struct listener *s, *tmp;
128 struct sk_buff *skb_next, *skb_cur = skb;
129 void *reply = genlmsg_data(genlhdr);
130 int rc, delcount = 0;
132 genlmsg_end(skb, reply);
135 down_read(&listeners->sem);
136 list_for_each_entry(s, &listeners->list, list) {
138 if (!list_is_last(&s->list, &listeners->list)) {
139 skb_next = skb_clone(skb_cur, GFP_KERNEL);
143 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
144 if (rc == -ECONNREFUSED) {
150 up_read(&listeners->sem);
158 /* Delete invalidated entries */
159 down_write(&listeners->sem);
160 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
166 up_write(&listeners->sem);
169 static void fill_stats(struct user_namespace *user_ns,
170 struct pid_namespace *pid_ns,
171 struct task_struct *tsk, struct taskstats *stats)
173 memset(stats, 0, sizeof(*stats));
175 * Each accounting subsystem adds calls to its functions to
176 * fill in relevant parts of struct taskstsats as follows
178 * per-task-foo(stats, tsk);
181 delayacct_add_tsk(stats, tsk);
183 /* fill in basic acct fields */
184 stats->version = TASKSTATS_VERSION;
185 stats->nvcsw = tsk->nvcsw;
186 stats->nivcsw = tsk->nivcsw;
187 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
189 /* fill in extended acct fields */
190 xacct_add_tsk(stats, tsk);
193 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
195 struct task_struct *tsk;
198 tsk = find_task_by_vpid(pid);
200 get_task_struct(tsk);
204 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
205 put_task_struct(tsk);
209 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
211 struct task_struct *tsk, *first;
216 * Add additional stats from live tasks except zombie thread group
217 * leaders who are already counted with the dead tasks
220 first = find_task_by_vpid(tgid);
222 if (!first || !lock_task_sighand(first, &flags))
225 if (first->signal->stats)
226 memcpy(stats, first->signal->stats, sizeof(*stats));
228 memset(stats, 0, sizeof(*stats));
235 * Accounting subsystem can call its functions here to
236 * fill in relevant parts of struct taskstsats as follows
238 * per-task-foo(stats, tsk);
240 delayacct_add_tsk(stats, tsk);
242 stats->nvcsw += tsk->nvcsw;
243 stats->nivcsw += tsk->nivcsw;
244 } while_each_thread(first, tsk);
246 unlock_task_sighand(first, &flags);
251 stats->version = TASKSTATS_VERSION;
253 * Accounting subsystems can also add calls here to modify
254 * fields of taskstats.
259 static void fill_tgid_exit(struct task_struct *tsk)
263 spin_lock_irqsave(&tsk->sighand->siglock, flags);
264 if (!tsk->signal->stats)
268 * Each accounting subsystem calls its functions here to
269 * accumalate its per-task stats for tsk, into the per-tgid structure
271 * per-task-foo(tsk->signal->stats, tsk);
273 delayacct_add_tsk(tsk->signal->stats, tsk);
275 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
279 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
281 struct listener_list *listeners;
282 struct listener *s, *tmp, *s2;
286 if (!cpumask_subset(mask, cpu_possible_mask))
289 if (current_user_ns() != &init_user_ns)
292 if (task_active_pid_ns(current) != &init_pid_ns)
295 if (isadd == REGISTER) {
296 for_each_cpu(cpu, mask) {
297 s = kmalloc_node(sizeof(struct listener),
298 GFP_KERNEL, cpu_to_node(cpu));
306 listeners = &per_cpu(listener_array, cpu);
307 down_write(&listeners->sem);
308 list_for_each_entry(s2, &listeners->list, list) {
309 if (s2->pid == pid && s2->valid)
312 list_add(&s->list, &listeners->list);
315 up_write(&listeners->sem);
316 kfree(s); /* nop if NULL */
321 /* Deregister or cleanup */
323 for_each_cpu(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
333 up_write(&listeners->sem);
338 static int parse(struct nlattr *na, struct cpumask *mask)
347 if (len > TASKSTATS_CPUMASK_MAXLEN)
351 data = kmalloc(len, GFP_KERNEL);
354 nla_strlcpy(data, na, len);
355 ret = cpulist_parse(data, mask);
360 #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
361 #define TASKSTATS_NEEDS_PADDING 1
364 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
366 struct nlattr *na, *ret;
369 aggr = (type == TASKSTATS_TYPE_PID)
370 ? TASKSTATS_TYPE_AGGR_PID
371 : TASKSTATS_TYPE_AGGR_TGID;
374 * The taskstats structure is internally aligned on 8 byte
375 * boundaries but the layout of the aggregrate reply, with
376 * two NLA headers and the pid (each 4 bytes), actually
377 * force the entire structure to be unaligned. This causes
378 * the kernel to issue unaligned access warnings on some
379 * architectures like ia64. Unfortunately, some software out there
380 * doesn't properly unroll the NLA packet and assumes that the start
381 * of the taskstats structure will always be 20 bytes from the start
382 * of the netlink payload. Aligning the start of the taskstats
383 * structure breaks this software, which we don't want. So, for now
384 * the alignment only happens on architectures that require it
385 * and those users will have to update to fixed versions of those
386 * packages. Space is reserved in the packet only when needed.
387 * This ifdef should be removed in several years e.g. 2012 once
388 * we can be confident that fixed versions are installed on most
389 * systems. We add the padding before the aggregate since the
390 * aggregate is already a defined type.
392 #ifdef TASKSTATS_NEEDS_PADDING
393 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
396 na = nla_nest_start(skb, aggr);
400 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
401 nla_nest_cancel(skb, na);
404 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
406 nla_nest_cancel(skb, na);
409 nla_nest_end(skb, na);
411 return nla_data(ret);
416 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
419 struct sk_buff *rep_skb;
420 struct cgroupstats *stats;
426 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
430 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
435 size = nla_total_size(sizeof(struct cgroupstats));
437 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
442 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
443 sizeof(struct cgroupstats));
450 stats = nla_data(na);
451 memset(stats, 0, sizeof(*stats));
453 rc = cgroupstats_build(stats, f.file->f_path.dentry);
459 rc = send_reply(rep_skb, info);
466 static int cmd_attr_register_cpumask(struct genl_info *info)
471 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
473 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
476 rc = add_del_listener(info->snd_portid, mask, REGISTER);
478 free_cpumask_var(mask);
482 static int cmd_attr_deregister_cpumask(struct genl_info *info)
487 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
489 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
492 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
494 free_cpumask_var(mask);
498 static size_t taskstats_packet_size(void)
502 size = nla_total_size(sizeof(u32)) +
503 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
504 #ifdef TASKSTATS_NEEDS_PADDING
505 size += nla_total_size(0); /* Padding for alignment */
510 static int cmd_attr_pid(struct genl_info *info)
512 struct taskstats *stats;
513 struct sk_buff *rep_skb;
518 size = taskstats_packet_size();
520 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
525 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
526 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
530 rc = fill_stats_for_pid(pid, stats);
533 return send_reply(rep_skb, info);
539 static int cmd_attr_tgid(struct genl_info *info)
541 struct taskstats *stats;
542 struct sk_buff *rep_skb;
547 size = taskstats_packet_size();
549 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
554 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
555 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
559 rc = fill_stats_for_tgid(tgid, stats);
562 return send_reply(rep_skb, info);
568 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
570 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
571 return cmd_attr_register_cpumask(info);
572 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
573 return cmd_attr_deregister_cpumask(info);
574 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
575 return cmd_attr_pid(info);
576 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
577 return cmd_attr_tgid(info);
582 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
584 struct signal_struct *sig = tsk->signal;
585 struct taskstats *stats;
587 if (sig->stats || thread_group_empty(tsk))
590 /* No problem if kmem_cache_zalloc() fails */
591 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
593 spin_lock_irq(&tsk->sighand->siglock);
598 spin_unlock_irq(&tsk->sighand->siglock);
601 kmem_cache_free(taskstats_cache, stats);
606 /* Send pid data out on exit */
607 void taskstats_exit(struct task_struct *tsk, int group_dead)
610 struct listener_list *listeners;
611 struct taskstats *stats;
612 struct sk_buff *rep_skb;
616 if (!family_registered)
620 * Size includes space for nested attributes
622 size = taskstats_packet_size();
624 is_thread_group = !!taskstats_tgid_alloc(tsk);
625 if (is_thread_group) {
626 /* PID + STATS + TGID + STATS */
628 /* fill the tsk->signal->stats structure */
632 listeners = raw_cpu_ptr(&listener_array);
633 if (list_empty(&listeners->list))
636 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
640 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
641 task_pid_nr_ns(tsk, &init_pid_ns));
645 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
648 * Doesn't matter if tsk is the leader or the last group member leaving
650 if (!is_thread_group || !group_dead)
653 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
654 task_tgid_nr_ns(tsk, &init_pid_ns));
658 memcpy(stats, tsk->signal->stats, sizeof(*stats));
661 send_cpu_listeners(rep_skb, listeners);
667 static const struct genl_ops taskstats_ops[] = {
669 .cmd = TASKSTATS_CMD_GET,
670 .doit = taskstats_user_cmd,
671 .policy = taskstats_cmd_get_policy,
672 .flags = GENL_ADMIN_PERM,
675 .cmd = CGROUPSTATS_CMD_GET,
676 .doit = cgroupstats_user_cmd,
677 .policy = cgroupstats_cmd_get_policy,
681 /* Needed early in initialization */
682 void __init taskstats_init_early(void)
686 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
687 for_each_possible_cpu(i) {
688 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
689 init_rwsem(&(per_cpu(listener_array, i).sem));
693 static int __init taskstats_init(void)
697 rc = genl_register_family_with_ops(&family, taskstats_ops);
701 family_registered = 1;
702 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
707 * late initcall ensures initialization of statistics collection
708 * mechanisms precedes initialization of the taskstats interface
710 late_initcall(taskstats_init);