2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/netlink.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/signal.h>
31 #include <linux/mutex.h>
32 #include <linux/nsproxy.h>
33 #include <linux/pid.h>
34 #include <linux/ipc_namespace.h>
35 #include <linux/user_namespace.h>
36 #include <linux/slab.h>
41 #define MQUEUE_MAGIC 0x19800202
42 #define DIRENT_SIZE 20
43 #define FILENT_SIZE 80
49 #define STATE_PENDING 1
52 struct ext_wait_queue { /* queue of sleeping tasks */
53 struct task_struct *task;
54 struct list_head list;
55 struct msg_msg *msg; /* ptr of loaded message */
56 int state; /* one of STATE_* values */
59 struct mqueue_inode_info {
61 struct inode vfs_inode;
62 wait_queue_head_t wait_q;
64 struct msg_msg **messages;
67 struct sigevent notify;
68 struct pid* notify_owner;
69 struct user_namespace *notify_user_ns;
70 struct user_struct *user; /* user who created, for accounting */
71 struct sock *notify_sock;
72 struct sk_buff *notify_cookie;
74 /* for tasks waiting for free space and messages, respectively */
75 struct ext_wait_queue e_wait_q[2];
77 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
80 static const struct inode_operations mqueue_dir_inode_operations;
81 static const struct file_operations mqueue_file_operations;
82 static const struct super_operations mqueue_super_ops;
83 static void remove_notification(struct mqueue_inode_info *info);
85 static struct kmem_cache *mqueue_inode_cachep;
87 static struct ctl_table_header * mq_sysctl_table;
89 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
91 return container_of(inode, struct mqueue_inode_info, vfs_inode);
95 * This routine should be called with the mq_lock held.
97 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
99 return get_ipc_ns(inode->i_sb->s_fs_info);
102 static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
104 struct ipc_namespace *ns;
107 ns = __get_ns_from_inode(inode);
108 spin_unlock(&mq_lock);
112 static struct inode *mqueue_get_inode(struct super_block *sb,
113 struct ipc_namespace *ipc_ns, umode_t mode,
114 struct mq_attr *attr)
116 struct user_struct *u = current_user();
120 inode = new_inode(sb);
124 inode->i_ino = get_next_ino();
125 inode->i_mode = mode;
126 inode->i_uid = current_fsuid();
127 inode->i_gid = current_fsgid();
128 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
131 struct mqueue_inode_info *info;
132 unsigned long mq_bytes, mq_msg_tblsz;
134 inode->i_fop = &mqueue_file_operations;
135 inode->i_size = FILENT_SIZE;
136 /* mqueue specific info */
137 info = MQUEUE_I(inode);
138 spin_lock_init(&info->lock);
139 init_waitqueue_head(&info->wait_q);
140 INIT_LIST_HEAD(&info->e_wait_q[0].list);
141 INIT_LIST_HEAD(&info->e_wait_q[1].list);
142 info->notify_owner = NULL;
143 info->notify_user_ns = NULL;
145 info->user = NULL; /* set when all is ok */
146 memset(&info->attr, 0, sizeof(info->attr));
147 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, DFLT_MSG);
148 info->attr.mq_msgsize =
149 min(ipc_ns->mq_msgsize_max, DFLT_MSGSIZE);
151 info->attr.mq_maxmsg = attr->mq_maxmsg;
152 info->attr.mq_msgsize = attr->mq_msgsize;
154 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
155 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
159 mq_bytes = (mq_msg_tblsz +
160 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
163 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
164 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
165 spin_unlock(&mq_lock);
166 /* mqueue_evict_inode() releases info->messages */
170 u->mq_bytes += mq_bytes;
171 spin_unlock(&mq_lock);
174 info->user = get_uid(u);
175 } else if (S_ISDIR(mode)) {
177 /* Some things misbehave if size == 0 on a directory */
178 inode->i_size = 2 * DIRENT_SIZE;
179 inode->i_op = &mqueue_dir_inode_operations;
180 inode->i_fop = &simple_dir_operations;
190 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
193 struct ipc_namespace *ns = data;
195 sb->s_blocksize = PAGE_CACHE_SIZE;
196 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
197 sb->s_magic = MQUEUE_MAGIC;
198 sb->s_op = &mqueue_super_ops;
200 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
202 return PTR_ERR(inode);
204 sb->s_root = d_make_root(inode);
210 static struct dentry *mqueue_mount(struct file_system_type *fs_type,
211 int flags, const char *dev_name,
214 if (!(flags & MS_KERNMOUNT))
215 data = current->nsproxy->ipc_ns;
216 return mount_ns(fs_type, flags, data, mqueue_fill_super);
219 static void init_once(void *foo)
221 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
223 inode_init_once(&p->vfs_inode);
226 static struct inode *mqueue_alloc_inode(struct super_block *sb)
228 struct mqueue_inode_info *ei;
230 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
233 return &ei->vfs_inode;
236 static void mqueue_i_callback(struct rcu_head *head)
238 struct inode *inode = container_of(head, struct inode, i_rcu);
239 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
242 static void mqueue_destroy_inode(struct inode *inode)
244 call_rcu(&inode->i_rcu, mqueue_i_callback);
247 static void mqueue_evict_inode(struct inode *inode)
249 struct mqueue_inode_info *info;
250 struct user_struct *user;
251 unsigned long mq_bytes;
253 struct ipc_namespace *ipc_ns;
257 if (S_ISDIR(inode->i_mode))
260 ipc_ns = get_ns_from_inode(inode);
261 info = MQUEUE_I(inode);
262 spin_lock(&info->lock);
263 for (i = 0; i < info->attr.mq_curmsgs; i++)
264 free_msg(info->messages[i]);
265 kfree(info->messages);
266 spin_unlock(&info->lock);
268 /* Total amount of bytes accounted for the mqueue */
269 mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
270 + info->attr.mq_msgsize);
274 user->mq_bytes -= mq_bytes;
276 * get_ns_from_inode() ensures that the
277 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
278 * to which we now hold a reference, or it is NULL.
279 * We can't put it here under mq_lock, though.
282 ipc_ns->mq_queues_count--;
283 spin_unlock(&mq_lock);
290 static int mqueue_create(struct inode *dir, struct dentry *dentry,
291 umode_t mode, struct nameidata *nd)
294 struct mq_attr *attr = dentry->d_fsdata;
296 struct ipc_namespace *ipc_ns;
299 ipc_ns = __get_ns_from_inode(dir);
304 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
305 !capable(CAP_SYS_RESOURCE)) {
309 ipc_ns->mq_queues_count++;
310 spin_unlock(&mq_lock);
312 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
314 error = PTR_ERR(inode);
316 ipc_ns->mq_queues_count--;
321 dir->i_size += DIRENT_SIZE;
322 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
324 d_instantiate(dentry, inode);
328 spin_unlock(&mq_lock);
334 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
336 struct inode *inode = dentry->d_inode;
338 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
339 dir->i_size -= DIRENT_SIZE;
346 * This is routine for system read from queue file.
347 * To avoid mess with doing here some sort of mq_receive we allow
348 * to read only queue size & notification info (the only values
349 * that are interesting from user point of view and aren't accessible
350 * through std routines)
352 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
353 size_t count, loff_t *off)
355 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
356 char buffer[FILENT_SIZE];
359 spin_lock(&info->lock);
360 snprintf(buffer, sizeof(buffer),
361 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
363 info->notify_owner ? info->notify.sigev_notify : 0,
364 (info->notify_owner &&
365 info->notify.sigev_notify == SIGEV_SIGNAL) ?
366 info->notify.sigev_signo : 0,
367 pid_vnr(info->notify_owner));
368 spin_unlock(&info->lock);
369 buffer[sizeof(buffer)-1] = '\0';
371 ret = simple_read_from_buffer(u_data, count, off, buffer,
376 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
380 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
382 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
384 spin_lock(&info->lock);
385 if (task_tgid(current) == info->notify_owner)
386 remove_notification(info);
388 spin_unlock(&info->lock);
392 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
394 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
397 poll_wait(filp, &info->wait_q, poll_tab);
399 spin_lock(&info->lock);
400 if (info->attr.mq_curmsgs)
401 retval = POLLIN | POLLRDNORM;
403 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
404 retval |= POLLOUT | POLLWRNORM;
405 spin_unlock(&info->lock);
410 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
411 static void wq_add(struct mqueue_inode_info *info, int sr,
412 struct ext_wait_queue *ewp)
414 struct ext_wait_queue *walk;
418 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
419 if (walk->task->static_prio <= current->static_prio) {
420 list_add_tail(&ewp->list, &walk->list);
424 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
428 * Puts current task to sleep. Caller must hold queue lock. After return
432 static int wq_sleep(struct mqueue_inode_info *info, int sr,
433 ktime_t *timeout, struct ext_wait_queue *ewp)
438 wq_add(info, sr, ewp);
441 set_current_state(TASK_INTERRUPTIBLE);
443 spin_unlock(&info->lock);
444 time = schedule_hrtimeout_range_clock(timeout, 0,
445 HRTIMER_MODE_ABS, CLOCK_REALTIME);
447 while (ewp->state == STATE_PENDING)
450 if (ewp->state == STATE_READY) {
454 spin_lock(&info->lock);
455 if (ewp->state == STATE_READY) {
459 if (signal_pending(current)) {
460 retval = -ERESTARTSYS;
468 list_del(&ewp->list);
470 spin_unlock(&info->lock);
476 * Returns waiting task that should be serviced first or NULL if none exists
478 static struct ext_wait_queue *wq_get_first_waiter(
479 struct mqueue_inode_info *info, int sr)
481 struct list_head *ptr;
483 ptr = info->e_wait_q[sr].list.prev;
484 if (ptr == &info->e_wait_q[sr].list)
486 return list_entry(ptr, struct ext_wait_queue, list);
489 /* Auxiliary functions to manipulate messages' list */
490 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
494 k = info->attr.mq_curmsgs - 1;
495 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
496 info->messages[k + 1] = info->messages[k];
499 info->attr.mq_curmsgs++;
500 info->qsize += ptr->m_ts;
501 info->messages[k + 1] = ptr;
504 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
506 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
507 return info->messages[info->attr.mq_curmsgs];
510 static inline void set_cookie(struct sk_buff *skb, char code)
512 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
516 * The next function is only to split too long sys_mq_timedsend
518 static void __do_notify(struct mqueue_inode_info *info)
521 * invoked when there is registered process and there isn't process
522 * waiting synchronously for message AND state of queue changed from
523 * empty to not empty. Here we are sure that no one is waiting
525 if (info->notify_owner &&
526 info->attr.mq_curmsgs == 1) {
527 struct siginfo sig_i;
528 switch (info->notify.sigev_notify) {
534 sig_i.si_signo = info->notify.sigev_signo;
536 sig_i.si_code = SI_MESGQ;
537 sig_i.si_value = info->notify.sigev_value;
538 /* map current pid/uid into info->owner's namespaces */
540 sig_i.si_pid = task_tgid_nr_ns(current,
541 ns_of_pid(info->notify_owner));
542 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
545 kill_pid_info(info->notify.sigev_signo,
546 &sig_i, info->notify_owner);
549 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
550 netlink_sendskb(info->notify_sock, info->notify_cookie);
553 /* after notification unregisters process */
554 put_pid(info->notify_owner);
555 put_user_ns(info->notify_user_ns);
556 info->notify_owner = NULL;
557 info->notify_user_ns = NULL;
559 wake_up(&info->wait_q);
562 static int prepare_timeout(const struct timespec __user *u_abs_timeout,
563 ktime_t *expires, struct timespec *ts)
565 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
567 if (!timespec_valid(ts))
570 *expires = timespec_to_ktime(*ts);
574 static void remove_notification(struct mqueue_inode_info *info)
576 if (info->notify_owner != NULL &&
577 info->notify.sigev_notify == SIGEV_THREAD) {
578 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
579 netlink_sendskb(info->notify_sock, info->notify_cookie);
581 put_pid(info->notify_owner);
582 put_user_ns(info->notify_user_ns);
583 info->notify_owner = NULL;
584 info->notify_user_ns = NULL;
587 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
589 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
591 if (capable(CAP_SYS_RESOURCE)) {
592 if (attr->mq_maxmsg > HARD_MSGMAX)
595 if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
596 attr->mq_msgsize > ipc_ns->mq_msgsize_max)
599 /* check for overflow */
600 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
602 if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
603 + sizeof (struct msg_msg *))) <
604 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
610 * Invoked when creating a new queue via sys_mq_open
612 static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
613 struct dentry *dentry, int oflag, umode_t mode,
614 struct mq_attr *attr)
616 const struct cred *cred = current_cred();
621 if (!mq_attr_ok(ipc_ns, attr)) {
625 /* store for use during create */
626 dentry->d_fsdata = attr;
629 mode &= ~current_umask();
630 ret = mnt_want_write(ipc_ns->mq_mnt);
633 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
634 dentry->d_fsdata = NULL;
638 result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
640 * dentry_open() took a persistent mnt_want_write(),
641 * so we can now drop this one.
643 mnt_drop_write(ipc_ns->mq_mnt);
647 mnt_drop_write(ipc_ns->mq_mnt);
650 mntput(ipc_ns->mq_mnt);
654 /* Opens existing queue */
655 static struct file *do_open(struct ipc_namespace *ipc_ns,
656 struct dentry *dentry, int oflag)
659 const struct cred *cred = current_cred();
661 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
662 MAY_READ | MAY_WRITE };
664 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
669 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
674 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
678 mntput(ipc_ns->mq_mnt);
682 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
683 struct mq_attr __user *, u_attr)
685 struct dentry *dentry;
690 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
692 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
695 audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
697 if (IS_ERR(name = getname(u_name)))
698 return PTR_ERR(name);
700 fd = get_unused_fd_flags(O_CLOEXEC);
704 mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
705 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
706 if (IS_ERR(dentry)) {
707 error = PTR_ERR(dentry);
710 mntget(ipc_ns->mq_mnt);
712 if (oflag & O_CREAT) {
713 if (dentry->d_inode) { /* entry already exists */
714 audit_inode(name, dentry);
715 if (oflag & O_EXCL) {
719 filp = do_open(ipc_ns, dentry, oflag);
721 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
723 u_attr ? &attr : NULL);
726 if (!dentry->d_inode) {
730 audit_inode(name, dentry);
731 filp = do_open(ipc_ns, dentry, oflag);
735 error = PTR_ERR(filp);
739 fd_install(fd, filp);
744 mntput(ipc_ns->mq_mnt);
749 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
755 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
759 struct dentry *dentry;
760 struct inode *inode = NULL;
761 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
763 name = getname(u_name);
765 return PTR_ERR(name);
767 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
769 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
770 if (IS_ERR(dentry)) {
771 err = PTR_ERR(dentry);
775 if (!dentry->d_inode) {
780 inode = dentry->d_inode;
783 err = mnt_want_write(ipc_ns->mq_mnt);
786 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
787 mnt_drop_write(ipc_ns->mq_mnt);
792 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
800 /* Pipelined send and receive functions.
802 * If a receiver finds no waiting message, then it registers itself in the
803 * list of waiting receivers. A sender checks that list before adding the new
804 * message into the message array. If there is a waiting receiver, then it
805 * bypasses the message array and directly hands the message over to the
807 * The receiver accepts the message and returns without grabbing the queue
808 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
809 * are necessary. The same algorithm is used for sysv semaphores, see
810 * ipc/sem.c for more details.
812 * The same algorithm is used for senders.
815 /* pipelined_send() - send a message directly to the task waiting in
816 * sys_mq_timedreceive() (without inserting message into a queue).
818 static inline void pipelined_send(struct mqueue_inode_info *info,
819 struct msg_msg *message,
820 struct ext_wait_queue *receiver)
822 receiver->msg = message;
823 list_del(&receiver->list);
824 receiver->state = STATE_PENDING;
825 wake_up_process(receiver->task);
827 receiver->state = STATE_READY;
830 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
831 * gets its message and put to the queue (we have one free place for sure). */
832 static inline void pipelined_receive(struct mqueue_inode_info *info)
834 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
838 wake_up_interruptible(&info->wait_q);
841 msg_insert(sender->msg, info);
842 list_del(&sender->list);
843 sender->state = STATE_PENDING;
844 wake_up_process(sender->task);
846 sender->state = STATE_READY;
849 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
850 size_t, msg_len, unsigned int, msg_prio,
851 const struct timespec __user *, u_abs_timeout)
855 struct ext_wait_queue wait;
856 struct ext_wait_queue *receiver;
857 struct msg_msg *msg_ptr;
858 struct mqueue_inode_info *info;
859 ktime_t expires, *timeout = NULL;
864 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
870 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
873 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
876 if (unlikely(!filp)) {
881 inode = filp->f_path.dentry->d_inode;
882 if (unlikely(filp->f_op != &mqueue_file_operations)) {
886 info = MQUEUE_I(inode);
887 audit_inode(NULL, filp->f_path.dentry);
889 if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
894 if (unlikely(msg_len > info->attr.mq_msgsize)) {
899 /* First try to allocate memory, before doing anything with
900 * existing queues. */
901 msg_ptr = load_msg(u_msg_ptr, msg_len);
902 if (IS_ERR(msg_ptr)) {
903 ret = PTR_ERR(msg_ptr);
906 msg_ptr->m_ts = msg_len;
907 msg_ptr->m_type = msg_prio;
909 spin_lock(&info->lock);
911 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
912 if (filp->f_flags & O_NONBLOCK) {
913 spin_unlock(&info->lock);
917 wait.msg = (void *) msg_ptr;
918 wait.state = STATE_NONE;
919 ret = wq_sleep(info, SEND, timeout, &wait);
924 receiver = wq_get_first_waiter(info, RECV);
926 pipelined_send(info, msg_ptr, receiver);
928 /* adds message to the queue */
929 msg_insert(msg_ptr, info);
932 inode->i_atime = inode->i_mtime = inode->i_ctime =
934 spin_unlock(&info->lock);
943 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
944 size_t, msg_len, unsigned int __user *, u_msg_prio,
945 const struct timespec __user *, u_abs_timeout)
948 struct msg_msg *msg_ptr;
951 struct mqueue_inode_info *info;
952 struct ext_wait_queue wait;
953 ktime_t expires, *timeout = NULL;
957 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
963 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
966 if (unlikely(!filp)) {
971 inode = filp->f_path.dentry->d_inode;
972 if (unlikely(filp->f_op != &mqueue_file_operations)) {
976 info = MQUEUE_I(inode);
977 audit_inode(NULL, filp->f_path.dentry);
979 if (unlikely(!(filp->f_mode & FMODE_READ))) {
984 /* checks if buffer is big enough */
985 if (unlikely(msg_len < info->attr.mq_msgsize)) {
990 spin_lock(&info->lock);
991 if (info->attr.mq_curmsgs == 0) {
992 if (filp->f_flags & O_NONBLOCK) {
993 spin_unlock(&info->lock);
997 wait.state = STATE_NONE;
998 ret = wq_sleep(info, RECV, timeout, &wait);
1002 msg_ptr = msg_get(info);
1004 inode->i_atime = inode->i_mtime = inode->i_ctime =
1007 /* There is now free space in queue. */
1008 pipelined_receive(info);
1009 spin_unlock(&info->lock);
1013 ret = msg_ptr->m_ts;
1015 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1016 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1028 * Notes: the case when user wants us to deregister (with NULL as pointer)
1029 * and he isn't currently owner of notification, will be silently discarded.
1030 * It isn't explicitly defined in the POSIX.
1032 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1033 const struct sigevent __user *, u_notification)
1038 struct inode *inode;
1039 struct sigevent notification;
1040 struct mqueue_inode_info *info;
1043 if (u_notification) {
1044 if (copy_from_user(¬ification, u_notification,
1045 sizeof(struct sigevent)))
1049 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL);
1053 if (u_notification != NULL) {
1054 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1055 notification.sigev_notify != SIGEV_SIGNAL &&
1056 notification.sigev_notify != SIGEV_THREAD))
1058 if (notification.sigev_notify == SIGEV_SIGNAL &&
1059 !valid_signal(notification.sigev_signo)) {
1062 if (notification.sigev_notify == SIGEV_THREAD) {
1065 /* create the notify skb */
1066 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1071 if (copy_from_user(nc->data,
1072 notification.sigev_value.sival_ptr,
1073 NOTIFY_COOKIE_LEN)) {
1078 /* TODO: add a header? */
1079 skb_put(nc, NOTIFY_COOKIE_LEN);
1080 /* and attach it to the socket */
1082 filp = fget(notification.sigev_signo);
1087 sock = netlink_getsockbyfilp(filp);
1090 ret = PTR_ERR(sock);
1095 timeo = MAX_SCHEDULE_TIMEOUT;
1096 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1113 inode = filp->f_path.dentry->d_inode;
1114 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1118 info = MQUEUE_I(inode);
1121 spin_lock(&info->lock);
1122 if (u_notification == NULL) {
1123 if (info->notify_owner == task_tgid(current)) {
1124 remove_notification(info);
1125 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1127 } else if (info->notify_owner != NULL) {
1130 switch (notification.sigev_notify) {
1132 info->notify.sigev_notify = SIGEV_NONE;
1135 info->notify_sock = sock;
1136 info->notify_cookie = nc;
1139 info->notify.sigev_notify = SIGEV_THREAD;
1142 info->notify.sigev_signo = notification.sigev_signo;
1143 info->notify.sigev_value = notification.sigev_value;
1144 info->notify.sigev_notify = SIGEV_SIGNAL;
1148 info->notify_owner = get_pid(task_tgid(current));
1149 info->notify_user_ns = get_user_ns(current_user_ns());
1150 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1152 spin_unlock(&info->lock);
1157 netlink_detachskb(sock, nc);
1164 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1165 const struct mq_attr __user *, u_mqstat,
1166 struct mq_attr __user *, u_omqstat)
1169 struct mq_attr mqstat, omqstat;
1171 struct inode *inode;
1172 struct mqueue_inode_info *info;
1174 if (u_mqstat != NULL) {
1175 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1177 if (mqstat.mq_flags & (~O_NONBLOCK))
1187 inode = filp->f_path.dentry->d_inode;
1188 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1192 info = MQUEUE_I(inode);
1194 spin_lock(&info->lock);
1196 omqstat = info->attr;
1197 omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1199 audit_mq_getsetattr(mqdes, &mqstat);
1200 spin_lock(&filp->f_lock);
1201 if (mqstat.mq_flags & O_NONBLOCK)
1202 filp->f_flags |= O_NONBLOCK;
1204 filp->f_flags &= ~O_NONBLOCK;
1205 spin_unlock(&filp->f_lock);
1207 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1210 spin_unlock(&info->lock);
1213 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1214 sizeof(struct mq_attr)))
1223 static const struct inode_operations mqueue_dir_inode_operations = {
1224 .lookup = simple_lookup,
1225 .create = mqueue_create,
1226 .unlink = mqueue_unlink,
1229 static const struct file_operations mqueue_file_operations = {
1230 .flush = mqueue_flush_file,
1231 .poll = mqueue_poll_file,
1232 .read = mqueue_read_file,
1233 .llseek = default_llseek,
1236 static const struct super_operations mqueue_super_ops = {
1237 .alloc_inode = mqueue_alloc_inode,
1238 .destroy_inode = mqueue_destroy_inode,
1239 .evict_inode = mqueue_evict_inode,
1240 .statfs = simple_statfs,
1243 static struct file_system_type mqueue_fs_type = {
1245 .mount = mqueue_mount,
1246 .kill_sb = kill_litter_super,
1249 int mq_init_ns(struct ipc_namespace *ns)
1251 ns->mq_queues_count = 0;
1252 ns->mq_queues_max = DFLT_QUEUESMAX;
1253 ns->mq_msg_max = DFLT_MSGMAX;
1254 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1256 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1257 if (IS_ERR(ns->mq_mnt)) {
1258 int err = PTR_ERR(ns->mq_mnt);
1265 void mq_clear_sbinfo(struct ipc_namespace *ns)
1267 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1270 void mq_put_mnt(struct ipc_namespace *ns)
1272 kern_unmount(ns->mq_mnt);
1275 static int __init init_mqueue_fs(void)
1279 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1280 sizeof(struct mqueue_inode_info), 0,
1281 SLAB_HWCACHE_ALIGN, init_once);
1282 if (mqueue_inode_cachep == NULL)
1285 /* ignore failures - they are not fatal */
1286 mq_sysctl_table = mq_register_sysctl_table();
1288 error = register_filesystem(&mqueue_fs_type);
1292 spin_lock_init(&mq_lock);
1294 error = mq_init_ns(&init_ipc_ns);
1296 goto out_filesystem;
1301 unregister_filesystem(&mqueue_fs_type);
1303 if (mq_sysctl_table)
1304 unregister_sysctl_table(mq_sysctl_table);
1305 kmem_cache_destroy(mqueue_inode_cachep);
1309 __initcall(init_mqueue_fs);