Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Mar 2010 16:15:33 +0000 (08:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Mar 2010 16:15:33 +0000 (08:15 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: (52 commits)
  init: Open /dev/console from rootfs
  mqueue: fix typo "failues" -> "failures"
  mqueue: only set error codes if they are really necessary
  mqueue: simplify do_open() error handling
  mqueue: apply mathematics distributivity on mq_bytes calculation
  mqueue: remove unneeded info->messages initialization
  mqueue: fix mq_open() file descriptor leak on user-space processes
  fix race in d_splice_alias()
  set S_DEAD on unlink() and non-directory rename() victims
  vfs: add NOFOLLOW flag to umount(2)
  get rid of ->mnt_parent in tomoyo/realpath
  hppfs can use existing proc_mnt, no need for do_kern_mount() in there
  Mirror MS_KERNMOUNT in ->mnt_flags
  get rid of useless vfsmount_lock use in put_mnt_ns()
  Take vfsmount_lock to fs/internal.h
  get rid of insanity with namespace roots in tomoyo
  take check for new events in namespace (guts of mounts_poll()) to namespace.c
  Don't mess with generic_permission() under ->d_lock in hpfs
  sanitize const/signedness for udf
  nilfs: sanitize const/signedness in dealing with ->d_name.name
  ...

Fix up fairly trivial (famous last words...) conflicts in
drivers/infiniband/core/uverbs_main.c and security/tomoyo/realpath.c

1  2 
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_main.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/file_storage.c
fs/proc/base.c
include/linux/mount.h
init/main.c
net/sunrpc/rpc_pipe.c
security/smack/smack_lsm.c
security/tomoyo/realpath.c

index e54d9ac6d1cabee6a14e25dc5ce94939d5d2910e,0b3862080c0f547259402dda2b89fd5720543931..a078e5624d22f700c0a526538e6e2926f5d2775f
@@@ -41,7 -41,6 +41,7 @@@
  #include <linux/idr.h>
  #include <linux/mutex.h>
  #include <linux/completion.h>
 +#include <linux/cdev.h>
  
  #include <rdma/ib_verbs.h>
  #include <rdma/ib_umem.h>
  
  struct ib_uverbs_device {
        struct kref                             ref;
 +      int                                     num_comp_vectors;
        struct completion                       comp;
 -      int                                     devnum;
 -      struct cdev                            *cdev;
        struct device                          *dev;
        struct ib_device                       *ib_dev;
 -      int                                     num_comp_vectors;
 +      int                                     devnum;
 +      struct cdev                             cdev;
  };
  
  struct ib_uverbs_event_file {
        struct kref                             ref;
 +      int                                     is_async;
        struct ib_uverbs_file                  *uverbs_file;
        spinlock_t                              lock;
 +      int                                     is_closed;
        wait_queue_head_t                       poll_wait;
        struct fasync_struct                   *async_queue;
        struct list_head                        event_list;
 -      int                                     is_async;
 -      int                                     is_closed;
  };
  
  struct ib_uverbs_file {
@@@ -146,7 -145,7 +146,7 @@@ extern struct idr ib_uverbs_srq_idr
  void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
  
  struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
-                                       int is_async, int *fd);
+                                       int is_async);
  struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
  
  void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
index ff59a795e840f7665f8e0b8fca5fdd5b06a31e57,810f277739e2361fd8b840ee512f03c7c8f65e5f..4fa2e65164418e033e36065baeec38d74db32032
@@@ -41,9 -41,9 +41,9 @@@
  #include <linux/fs.h>
  #include <linux/poll.h>
  #include <linux/sched.h>
 -#include <linux/anon_inodes.h>
  #include <linux/file.h>
  #include <linux/cdev.h>
 +#include <linux/anon_inodes.h>
  
  #include <asm/uaccess.h>
  
@@@ -73,39 -73,40 +73,39 @@@ DEFINE_IDR(ib_uverbs_qp_idr)
  DEFINE_IDR(ib_uverbs_srq_idr);
  
  static DEFINE_SPINLOCK(map_lock);
 -static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
  static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
  
  static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
                                     const char __user *buf, int in_len,
                                     int out_len) = {
 -      [IB_USER_VERBS_CMD_GET_CONTEXT]         = ib_uverbs_get_context,
 -      [IB_USER_VERBS_CMD_QUERY_DEVICE]        = ib_uverbs_query_device,
 -      [IB_USER_VERBS_CMD_QUERY_PORT]          = ib_uverbs_query_port,
 -      [IB_USER_VERBS_CMD_ALLOC_PD]            = ib_uverbs_alloc_pd,
 -      [IB_USER_VERBS_CMD_DEALLOC_PD]          = ib_uverbs_dealloc_pd,
 -      [IB_USER_VERBS_CMD_REG_MR]              = ib_uverbs_reg_mr,
 -      [IB_USER_VERBS_CMD_DEREG_MR]            = ib_uverbs_dereg_mr,
 +      [IB_USER_VERBS_CMD_GET_CONTEXT]         = ib_uverbs_get_context,
 +      [IB_USER_VERBS_CMD_QUERY_DEVICE]        = ib_uverbs_query_device,
 +      [IB_USER_VERBS_CMD_QUERY_PORT]          = ib_uverbs_query_port,
 +      [IB_USER_VERBS_CMD_ALLOC_PD]            = ib_uverbs_alloc_pd,
 +      [IB_USER_VERBS_CMD_DEALLOC_PD]          = ib_uverbs_dealloc_pd,
 +      [IB_USER_VERBS_CMD_REG_MR]              = ib_uverbs_reg_mr,
 +      [IB_USER_VERBS_CMD_DEREG_MR]            = ib_uverbs_dereg_mr,
        [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
 -      [IB_USER_VERBS_CMD_CREATE_CQ]           = ib_uverbs_create_cq,
 -      [IB_USER_VERBS_CMD_RESIZE_CQ]           = ib_uverbs_resize_cq,
 -      [IB_USER_VERBS_CMD_POLL_CQ]             = ib_uverbs_poll_cq,
 -      [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ]       = ib_uverbs_req_notify_cq,
 -      [IB_USER_VERBS_CMD_DESTROY_CQ]          = ib_uverbs_destroy_cq,
 -      [IB_USER_VERBS_CMD_CREATE_QP]           = ib_uverbs_create_qp,
 -      [IB_USER_VERBS_CMD_QUERY_QP]            = ib_uverbs_query_qp,
 -      [IB_USER_VERBS_CMD_MODIFY_QP]           = ib_uverbs_modify_qp,
 -      [IB_USER_VERBS_CMD_DESTROY_QP]          = ib_uverbs_destroy_qp,
 -      [IB_USER_VERBS_CMD_POST_SEND]           = ib_uverbs_post_send,
 -      [IB_USER_VERBS_CMD_POST_RECV]           = ib_uverbs_post_recv,
 -      [IB_USER_VERBS_CMD_POST_SRQ_RECV]       = ib_uverbs_post_srq_recv,
 -      [IB_USER_VERBS_CMD_CREATE_AH]           = ib_uverbs_create_ah,
 -      [IB_USER_VERBS_CMD_DESTROY_AH]          = ib_uverbs_destroy_ah,
 -      [IB_USER_VERBS_CMD_ATTACH_MCAST]        = ib_uverbs_attach_mcast,
 -      [IB_USER_VERBS_CMD_DETACH_MCAST]        = ib_uverbs_detach_mcast,
 -      [IB_USER_VERBS_CMD_CREATE_SRQ]          = ib_uverbs_create_srq,
 -      [IB_USER_VERBS_CMD_MODIFY_SRQ]          = ib_uverbs_modify_srq,
 -      [IB_USER_VERBS_CMD_QUERY_SRQ]           = ib_uverbs_query_srq,
 -      [IB_USER_VERBS_CMD_DESTROY_SRQ]         = ib_uverbs_destroy_srq,
 +      [IB_USER_VERBS_CMD_CREATE_CQ]           = ib_uverbs_create_cq,
 +      [IB_USER_VERBS_CMD_RESIZE_CQ]           = ib_uverbs_resize_cq,
 +      [IB_USER_VERBS_CMD_POLL_CQ]             = ib_uverbs_poll_cq,
 +      [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ]       = ib_uverbs_req_notify_cq,
 +      [IB_USER_VERBS_CMD_DESTROY_CQ]          = ib_uverbs_destroy_cq,
 +      [IB_USER_VERBS_CMD_CREATE_QP]           = ib_uverbs_create_qp,
 +      [IB_USER_VERBS_CMD_QUERY_QP]            = ib_uverbs_query_qp,
 +      [IB_USER_VERBS_CMD_MODIFY_QP]           = ib_uverbs_modify_qp,
 +      [IB_USER_VERBS_CMD_DESTROY_QP]          = ib_uverbs_destroy_qp,
 +      [IB_USER_VERBS_CMD_POST_SEND]           = ib_uverbs_post_send,
 +      [IB_USER_VERBS_CMD_POST_RECV]           = ib_uverbs_post_recv,
 +      [IB_USER_VERBS_CMD_POST_SRQ_RECV]       = ib_uverbs_post_srq_recv,
 +      [IB_USER_VERBS_CMD_CREATE_AH]           = ib_uverbs_create_ah,
 +      [IB_USER_VERBS_CMD_DESTROY_AH]          = ib_uverbs_destroy_ah,
 +      [IB_USER_VERBS_CMD_ATTACH_MCAST]        = ib_uverbs_attach_mcast,
 +      [IB_USER_VERBS_CMD_DETACH_MCAST]        = ib_uverbs_detach_mcast,
 +      [IB_USER_VERBS_CMD_CREATE_SRQ]          = ib_uverbs_create_srq,
 +      [IB_USER_VERBS_CMD_MODIFY_SRQ]          = ib_uverbs_modify_srq,
 +      [IB_USER_VERBS_CMD_QUERY_SRQ]           = ib_uverbs_query_srq,
 +      [IB_USER_VERBS_CMD_DESTROY_SRQ]         = ib_uverbs_destroy_srq,
  };
  
  static void ib_uverbs_add_one(struct ib_device *device);
@@@ -365,7 -366,7 +365,7 @@@ static int ib_uverbs_event_close(struc
  
  static const struct file_operations uverbs_event_fops = {
        .owner   = THIS_MODULE,
 -      .read    = ib_uverbs_event_read,
 +      .read    = ib_uverbs_event_read,
        .poll    = ib_uverbs_event_poll,
        .release = ib_uverbs_event_close,
        .fasync  = ib_uverbs_event_fasync
@@@ -484,11 -485,10 +484,10 @@@ void ib_uverbs_event_handler(struct ib_
  }
  
  struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
-                                       int is_async, int *fd)
+                                       int is_async)
  {
        struct ib_uverbs_event_file *ev_file;
        struct file *filp;
-       int ret;
  
        ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
        if (!ev_file)
        ev_file->is_async    = is_async;
        ev_file->is_closed   = 0;
  
-       *fd = get_unused_fd();
-       if (*fd < 0) {
-               ret = *fd;
-               goto err;
-       }
-       filp = anon_inode_getfile("[uverbs-event]", &uverbs_event_fops,
+       filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
                                  ev_file, O_RDONLY);
-       if (!filp) {
-               ret = -ENFILE;
-               goto err_fd;
-       }
+       if (IS_ERR(filp))
+               kfree(ev_file);
  
        return filp;
- err_fd:
-       put_unused_fd(*fd);
- err:
-       kfree(ev_file);
-       return ERR_PTR(ret);
  }
  
  /*
@@@ -600,12 -585,14 +584,12 @@@ static int ib_uverbs_mmap(struct file *
  /*
   * ib_uverbs_open() does not need the BKL:
   *
 - *  - dev_table[] accesses are protected by map_lock, the
 - *    ib_uverbs_device structures are properly reference counted, and
 + *  - the ib_uverbs_device structures are properly reference counted and
   *    everything else is purely local to the file being created, so
   *    races against other open calls are not a problem;
   *  - there is no ioctl method to race against;
 - *  - the device is added to dev_table[] as the last part of module
 - *    initialization, the open method will either immediately run
 - *    -ENXIO, or all required initialization will be done.
 + *  - the open method will either immediately run -ENXIO, or all
 + *    required initialization will be done.
   */
  static int ib_uverbs_open(struct inode *inode, struct file *filp)
  {
        struct ib_uverbs_file *file;
        int ret;
  
 -      spin_lock(&map_lock);
 -      dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR];
 +      dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
        if (dev)
                kref_get(&dev->ref);
 -      spin_unlock(&map_lock);
 -
 -      if (!dev)
 +      else
                return -ENXIO;
  
        if (!try_module_get(dev->ib_dev->owner)) {
@@@ -663,17 -653,17 +647,17 @@@ static int ib_uverbs_close(struct inod
  }
  
  static const struct file_operations uverbs_fops = {
 -      .owner   = THIS_MODULE,
 -      .write   = ib_uverbs_write,
 -      .open    = ib_uverbs_open,
 +      .owner   = THIS_MODULE,
 +      .write   = ib_uverbs_write,
 +      .open    = ib_uverbs_open,
        .release = ib_uverbs_close
  };
  
  static const struct file_operations uverbs_mmap_fops = {
 -      .owner   = THIS_MODULE,
 -      .write   = ib_uverbs_write,
 +      .owner   = THIS_MODULE,
 +      .write   = ib_uverbs_write,
        .mmap    = ib_uverbs_mmap,
 -      .open    = ib_uverbs_open,
 +      .open    = ib_uverbs_open,
        .release = ib_uverbs_close
  };
  
@@@ -713,38 -703,8 +697,38 @@@ static ssize_t show_abi_version(struct 
  }
  static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
  
 +static dev_t overflow_maj;
 +static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
 +
 +/*
 + * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
 + * requesting a new major number and doubling the number of max devices we
 + * support. It's stupid, but simple.
 + */
 +static int find_overflow_devnum(void)
 +{
 +      int ret;
 +
 +      if (!overflow_maj) {
 +              ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
 +                                        "infiniband_verbs");
 +              if (ret) {
 +                      printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
 +                      return ret;
 +              }
 +      }
 +
 +      ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
 +      if (ret >= IB_UVERBS_MAX_DEVICES)
 +              return -1;
 +
 +      return ret;
 +}
 +
  static void ib_uverbs_add_one(struct ib_device *device)
  {
 +      int devnum;
 +      dev_t base;
        struct ib_uverbs_device *uverbs_dev;
  
        if (!device->alloc_ucontext)
        init_completion(&uverbs_dev->comp);
  
        spin_lock(&map_lock);
 -      uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
 -      if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) {
 +      devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
 +      if (devnum >= IB_UVERBS_MAX_DEVICES) {
                spin_unlock(&map_lock);
 -              goto err;
 +              devnum = find_overflow_devnum();
 +              if (devnum < 0)
 +                      goto err;
 +
 +              spin_lock(&map_lock);
 +              uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
 +              base = devnum + overflow_maj;
 +              set_bit(devnum, overflow_map);
 +      } else {
 +              uverbs_dev->devnum = devnum;
 +              base = devnum + IB_UVERBS_BASE_DEV;
 +              set_bit(devnum, dev_map);
        }
 -      set_bit(uverbs_dev->devnum, dev_map);
        spin_unlock(&map_lock);
  
        uverbs_dev->ib_dev           = device;
        uverbs_dev->num_comp_vectors = device->num_comp_vectors;
  
 -      uverbs_dev->cdev = cdev_alloc();
 -      if (!uverbs_dev->cdev)
 -              goto err;
 -      uverbs_dev->cdev->owner = THIS_MODULE;
 -      uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
 -      kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
 -      if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
 +      cdev_init(&uverbs_dev->cdev, NULL);
 +      uverbs_dev->cdev.owner = THIS_MODULE;
 +      uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
 +      kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
 +      if (cdev_add(&uverbs_dev->cdev, base, 1))
                goto err_cdev;
  
        uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
 -                                      uverbs_dev->cdev->dev, uverbs_dev,
 +                                      uverbs_dev->cdev.dev, uverbs_dev,
                                        "uverbs%d", uverbs_dev->devnum);
        if (IS_ERR(uverbs_dev->dev))
                goto err_cdev;
        if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
                goto err_class;
  
 -      spin_lock(&map_lock);
 -      dev_table[uverbs_dev->devnum] = uverbs_dev;
 -      spin_unlock(&map_lock);
 -
        ib_set_client_data(device, &uverbs_client, uverbs_dev);
  
        return;
  
  err_class:
 -      device_destroy(uverbs_class, uverbs_dev->cdev->dev);
 +      device_destroy(uverbs_class, uverbs_dev->cdev.dev);
  
  err_cdev:
 -      cdev_del(uverbs_dev->cdev);
 -      clear_bit(uverbs_dev->devnum, dev_map);
 +      cdev_del(&uverbs_dev->cdev);
 +      if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
 +              clear_bit(devnum, dev_map);
 +      else
 +              clear_bit(devnum, overflow_map);
  
  err:
        kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
@@@ -826,13 -779,14 +810,13 @@@ static void ib_uverbs_remove_one(struc
                return;
  
        dev_set_drvdata(uverbs_dev->dev, NULL);
 -      device_destroy(uverbs_class, uverbs_dev->cdev->dev);
 -      cdev_del(uverbs_dev->cdev);
 +      device_destroy(uverbs_class, uverbs_dev->cdev.dev);
 +      cdev_del(&uverbs_dev->cdev);
  
 -      spin_lock(&map_lock);
 -      dev_table[uverbs_dev->devnum] = NULL;
 -      spin_unlock(&map_lock);
 -
 -      clear_bit(uverbs_dev->devnum, dev_map);
 +      if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
 +              clear_bit(uverbs_dev->devnum, dev_map);
 +      else
 +              clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
  
        kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
        wait_for_completion(&uverbs_dev->comp);
@@@ -886,8 -840,6 +870,8 @@@ static void __exit ib_uverbs_cleanup(vo
        ib_unregister_client(&uverbs_client);
        class_destroy(uverbs_class);
        unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
 +      if (overflow_maj)
 +              unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
        idr_destroy(&ib_uverbs_pd_idr);
        idr_destroy(&ib_uverbs_mr_idr);
        idr_destroy(&ib_uverbs_mw_idr);
index b1935fe156a0320769481cf6419c4d58286c52bc,77fcd1b697e8d52d584de7872ac1423d03d7409c..5a3cdd08f1d05c7d589399670fa2ac4eec7eb4a7
@@@ -368,7 -368,7 +368,7 @@@ struct fsg_common 
        struct task_struct      *thread_task;
  
        /* Callback function to call when thread exits. */
 -      void                    (*thread_exits)(struct fsg_common *common);
 +      int                     (*thread_exits)(struct fsg_common *common);
        /* Gadget's private data. */
        void                    *private_data;
  
@@@ -392,12 -392,8 +392,12 @@@ struct fsg_config 
        const char              *lun_name_format;
        const char              *thread_name;
  
 -      /* Callback function to call when thread exits. */
 -      void                    (*thread_exits)(struct fsg_common *common);
 +      /* Callback function to call when thread exits.  If no
 +       * callback is set or it returns value lower then zero MSF
 +       * will force eject all LUNs it operates on (including those
 +       * marked as non-removable or with prevent_medium_removal flag
 +       * set). */
 +      int                     (*thread_exits)(struct fsg_common *common);
        /* Gadget's private data. */
        void                    *private_data;
  
@@@ -618,12 -614,7 +618,12 @@@ static int fsg_setup(struct usb_functio
                        return -EDOM;
                VDBG(fsg, "get max LUN\n");
                *(u8 *) req->buf = fsg->common->nluns - 1;
 -              return 1;
 +
 +              /* Respond with data/status */
 +              req->length = min((u16)1, w_length);
 +              fsg->common->ep0req_name =
 +                      ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
 +              return ep0_queue(fsg->common);
        }
  
        VDBG(fsg,
@@@ -1050,7 -1041,7 +1050,7 @@@ static void invalidate_sub(struct fsg_l
        unsigned long   rc;
  
        rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
-       VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+       VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
  }
  
  static int do_verify(struct fsg_common *common)
@@@ -2533,6 -2524,14 +2533,6 @@@ static void handle_exception(struct fsg
  
        case FSG_STATE_CONFIG_CHANGE:
                rc = do_set_config(common, new_config);
 -              if (common->ep0_req_tag != exception_req_tag)
 -                      break;
 -              if (rc != 0) {                  /* STALL on errors */
 -                      DBG(common, "ep0 set halt\n");
 -                      usb_ep_set_halt(common->ep0);
 -              } else {                        /* Complete the status stage */
 -                      ep0_queue(common);
 -              }
                break;
  
        case FSG_STATE_EXIT:
@@@ -2616,20 -2615,8 +2616,20 @@@ static int fsg_main_thread(void *common
        common->thread_task = NULL;
        spin_unlock_irq(&common->lock);
  
 -      if (common->thread_exits)
 -              common->thread_exits(common);
 +      if (!common->thread_exits || common->thread_exits(common) < 0) {
 +              struct fsg_lun *curlun = common->luns;
 +              unsigned i = common->nluns;
 +
 +              down_write(&common->filesem);
 +              for (; i--; ++curlun) {
 +                      if (!fsg_lun_is_open(curlun))
 +                              continue;
 +
 +                      fsg_lun_close(curlun);
 +                      curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
 +              }
 +              up_write(&common->filesem);
 +      }
  
        /* Let the unbind and cleanup routines know the thread has exited */
        complete_and_exit(&common->thread_notifier, 0);
@@@ -2776,7 -2763,10 +2776,7 @@@ static struct fsg_common *fsg_common_in
        if (cfg->release != 0xffff) {
                i = cfg->release;
        } else {
 -              /* The sa1100 controller is not supported */
 -              i = gadget_is_sa1100(gadget)
 -                      ? -1
 -                      : usb_gadget_controller_number(gadget);
 +              i = usb_gadget_controller_number(gadget);
                if (i >= 0) {
                        i = 0x0300 + i;
                } else {
         * disable stalls.
         */
        common->can_stall = cfg->can_stall &&
 -              !(gadget_is_sh(common->gadget) ||
 -                gadget_is_at91(common->gadget));
 +              !(gadget_is_at91(common->gadget));
  
  
        spin_lock_init(&common->lock);
@@@ -2861,6 -2852,7 +2861,6 @@@ error_release
        /* Call fsg_common_release() directly, ref might be not
         * initialised */
        fsg_common_release(&common->ref);
 -      complete(&common->thread_notifier);
        return ERR_PTR(rc);
  }
  
index a90dd2db04889aa5b20fa0f938d367c8cf31c9c7,7dcdbda49cacf4f510e14ed3103e504bbe0f0b19..b49d86e3e45b639d2d3f4760d027e38a0fd52d44
@@@ -1448,7 -1448,7 +1448,7 @@@ static void invalidate_sub(struct fsg_l
        unsigned long   rc;
  
        rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
-       VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+       VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
  }
  
  static int do_verify(struct fsg_dev *fsg)
@@@ -3208,11 -3208,15 +3208,11 @@@ static int __init check_parameters(stru
         * halt bulk endpoints correctly.  If one of them is present,
         * disable stalls.
         */
 -      if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
 +      if (gadget_is_at91(fsg->gadget))
                mod_data.can_stall = 0;
  
        if (mod_data.release == 0xffff) {       // Parameter wasn't set
 -              /* The sa1100 controller is not supported */
 -              if (gadget_is_sa1100(fsg->gadget))
 -                      gcnum = -1;
 -              else
 -                      gcnum = usb_gadget_controller_number(fsg->gadget);
 +              gcnum = usb_gadget_controller_number(fsg->gadget);
                if (gcnum >= 0)
                        mod_data.release = 0x0300 + gcnum;
                else {
diff --combined fs/proc/base.c
index 623e2ffb5d2bb5ae7fbb153af881a24f6f9932a0,746895ddfda16982ce7ae751aca4d2f2a964b8a5..a7310841c83149e406c1089d30a8c27b2c1e67fb
@@@ -647,17 -647,11 +647,11 @@@ static int mounts_release(struct inode 
  static unsigned mounts_poll(struct file *file, poll_table *wait)
  {
        struct proc_mounts *p = file->private_data;
-       struct mnt_namespace *ns = p->ns;
        unsigned res = POLLIN | POLLRDNORM;
  
-       poll_wait(file, &ns->poll, wait);
-       spin_lock(&vfsmount_lock);
-       if (p->event != ns->event) {
-               p->event = ns->event;
+       poll_wait(file, &p->ns->poll, wait);
+       if (mnt_had_events(p))
                res |= POLLERR | POLLPRI;
-       }
-       spin_unlock(&vfsmount_lock);
  
        return res;
  }
@@@ -1095,12 -1089,8 +1089,12 @@@ static ssize_t proc_loginuid_write(stru
        if (!capable(CAP_AUDIT_CONTROL))
                return -EPERM;
  
 -      if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
 +      rcu_read_lock();
 +      if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
 +              rcu_read_unlock();
                return -EPERM;
 +      }
 +      rcu_read_unlock();
  
        if (count >= PAGE_SIZE)
                count = PAGE_SIZE - 1;
diff --combined include/linux/mount.h
index b5f43a34ef8841b9a2a5afe7fd961b85ad988d8b,ca726ebf50a3197e92b864254079af52cfa342ec..4bd05474d11d557cd709ab3aa045902a8014af7c
@@@ -34,7 -34,18 +34,18 @@@ struct mnt_namespace
  
  #define MNT_SHARED    0x1000  /* if the vfsmount is a shared mount */
  #define MNT_UNBINDABLE        0x2000  /* if the vfsmount is a unbindable mount */
- #define MNT_PNODE_MASK        0x3000  /* propagation flag mask */
+ /*
+  * MNT_SHARED_MASK is the set of flags that should be cleared when a
+  * mount becomes shared.  Currently, this is only the flag that says a
+  * mount cannot be bind mounted, since this is how we create a mount
+  * that shares events with another mount.  If you add a new MNT_*
+  * flag, consider how it interacts with shared mounts.
+  */
+ #define MNT_SHARED_MASK       (MNT_UNBINDABLE)
+ #define MNT_PROPAGATION_MASK  (MNT_SHARED | MNT_UNBINDABLE)
+ #define MNT_INTERNAL  0x4000
  
  struct vfsmount {
        struct list_head mnt_hash;
@@@ -66,7 -77,7 +77,7 @@@
        int mnt_pinned;
        int mnt_ghosts;
  #ifdef CONFIG_SMP
 -      int *mnt_writers;
 +      int __percpu *mnt_writers;
  #else
        int mnt_writers;
  #endif
@@@ -123,7 -134,6 +134,6 @@@ extern int do_add_mount(struct vfsmoun
  
  extern void mark_mounts_for_expiry(struct list_head *mounts);
  
- extern spinlock_t vfsmount_lock;
  extern dev_t name_to_dev_t(char *name);
  
  #endif /* _LINUX_MOUNT_H */
diff --combined init/main.c
index 18098153c33170c782309f6275cb7f936cdcf6b0,106e02d7ffa5a9709843f8aa39133dad8e828d8f..40aaa020cd6818b04986aacfe180bc5cac054353
@@@ -149,20 -149,6 +149,20 @@@ static int __init nosmp(char *str
  
  early_param("nosmp", nosmp);
  
 +/* this is hard limit */
 +static int __init nrcpus(char *str)
 +{
 +      int nr_cpus;
 +
 +      get_option(&str, &nr_cpus);
 +      if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
 +              nr_cpu_ids = nr_cpus;
 +
 +      return 0;
 +}
 +
 +early_param("nr_cpus", nrcpus);
 +
  static int __init maxcpus(char *str)
  {
        get_option(&str, &setup_max_cpus);
@@@ -430,9 -416,7 +430,9 @@@ static noinline void __init_refok rest_
        kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
        numa_default_policy();
        pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
 +      rcu_read_lock();
        kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
 +      rcu_read_unlock();
        unlock_kernel();
  
        /*
@@@ -600,7 -584,6 +600,7 @@@ asmlinkage void __init start_kernel(voi
                local_irq_disable();
        }
        rcu_init();
 +      radix_tree_init();
        /* init some links before init_ISA_irqs() */
        early_irq_init();
        init_IRQ();
        proc_caches_init();
        buffer_init();
        key_init();
 -      radix_tree_init();
        security_init();
        vfs_caches_init(totalram_pages);
        signals_init();
@@@ -822,11 -806,6 +822,6 @@@ static noinline int init_post(void
        system_state = SYSTEM_RUNNING;
        numa_default_policy();
  
-       if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
-               printk(KERN_WARNING "Warning: unable to open an initial console.\n");
-       (void) sys_dup(0);
-       (void) sys_dup(0);
  
        current->signal->flags |= SIGNAL_UNKILLABLE;
  
@@@ -889,6 -868,12 +884,12 @@@ static int __init kernel_init(void * un
  
        do_basic_setup();
  
+       /* Open the /dev/console on the rootfs, this should never fail */
+       if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+               printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+       (void) sys_dup(0);
+       (void) sys_dup(0);
        /*
         * check if there is an early userspace init.  If yes, let it do all
         * the work
diff --combined net/sunrpc/rpc_pipe.c
index 9ea45383480ee4ddc65711b0c3b45016ff048f0b,9ac493fcc873f88d40ed8917073b10bbc39bf05b..8d63f8fd29b7e3ff0e940d7457d45203f99c56b1
@@@ -78,7 -78,7 +78,7 @@@ rpc_timeout_upcall_queue(struct work_st
  }
  
  /**
 - * rpc_queue_upcall
 + * rpc_queue_upcall - queue an upcall message to userspace
   * @inode: inode of upcall pipe on which to queue given message
   * @msg: message to queue
   *
@@@ -999,19 -999,14 +999,14 @@@ rpc_fill_super(struct super_block *sb, 
        inode = rpc_get_inode(sb, S_IFDIR | 0755);
        if (!inode)
                return -ENOMEM;
-       root = d_alloc_root(inode);
+       sb->s_root = root = d_alloc_root(inode);
        if (!root) {
                iput(inode);
                return -ENOMEM;
        }
        if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
-               goto out;
-       sb->s_root = root;
+               return -ENOMEM;
        return 0;
- out:
-       d_genocide(root);
-       dput(root);
-       return -ENOMEM;
  }
  
  static int
index a5721b373f53935601c63687acc5b4454acc54b4,8dffcb7c9d81227d80351e47fd13029d4e3d565c..5225e668dbf046fbe17ea105b23eb18cd11590d4
@@@ -157,12 -157,12 +157,12 @@@ static int smack_ptrace_traceme(struct 
   *
   * Returns 0 on success, error code otherwise.
   */
 -static int smack_syslog(int type)
 +static int smack_syslog(int type, bool from_file)
  {
        int rc;
        char *sp = current_security();
  
 -      rc = cap_syslog(type);
 +      rc = cap_syslog(type, from_file);
        if (rc != 0)
                return rc;
  
@@@ -387,7 -387,7 +387,7 @@@ static int smack_sb_umount(struct vfsmo
        struct smk_audit_info ad;
  
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
-       smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint);
+       smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root);
        smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
  
        sbp = mnt->mnt_sb->s_security;
index c00df45c7ede899681344086e509c52f4b180bc0,455bc391b76ddb242dea60d354eb1d0507bfdecf..cf7d61f781b9768cc8691cd3b4fc4c234a313b55
@@@ -14,8 -14,9 +14,8 @@@
  #include <linux/mnt_namespace.h>
  #include <linux/fs_struct.h>
  #include <linux/hash.h>
 -
 +#include <linux/magic.h>
  #include "common.h"
 -#include "realpath.h"
  
  /**
   * tomoyo_encode: Convert binary string to ascii string.
@@@ -88,30 -89,15 +88,15 @@@ int tomoyo_realpath_from_path2(struct p
                sp = dentry->d_op->d_dname(dentry, newname + offset,
                                           newname_len - offset);
        } else {
-               /* Taken from d_namespace_path(). */
-               struct path root;
-               struct path ns_root = { };
-               struct path tmp;
+               struct path ns_root = {.mnt = NULL, .dentry = NULL};
  
-               read_lock(&current->fs->lock);
-               root = current->fs->root;
-               path_get(&root);
-               read_unlock(&current->fs->lock);
-               spin_lock(&vfsmount_lock);
-               if (root.mnt && root.mnt->mnt_ns)
-                       ns_root.mnt = mntget(root.mnt->mnt_ns->root);
-               if (ns_root.mnt)
-                       ns_root.dentry = dget(ns_root.mnt->mnt_root);
-               spin_unlock(&vfsmount_lock);
                spin_lock(&dcache_lock);
-               tmp = ns_root;
-               sp = __d_path(path, &tmp, newname, newname_len);
+               /* go to whatever namespace root we are under */
+               sp = __d_path(path, &ns_root, newname, newname_len);
                spin_unlock(&dcache_lock);
-               path_put(&root);
-               path_put(&ns_root);
                /* Prepend "/proc" prefix if using internal proc vfs mount. */
-               if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
+               if (!IS_ERR(sp) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
 -                  (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
 +                  (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
                        sp -= 5;
                        if (sp >= newname)
                                memcpy(sp, "/proc", 5);
   *
   * Returns the realpath of the given @path on success, NULL otherwise.
   *
 - * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
 + * These functions use kzalloc(), so the caller must call kfree()
   * if these functions didn't return NULL.
   */
  char *tomoyo_realpath_from_path(struct path *path)
  {
 -      char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer));
 +      char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_KERNEL);
  
        BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
                     <= TOMOYO_MAX_PATHNAME_LEN - 1);
        if (tomoyo_realpath_from_path2(path, buf,
                                       TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
                return buf;
 -      tomoyo_free(buf);
 +      kfree(buf);
        return NULL;
  }
  
@@@ -205,47 -191,98 +190,47 @@@ char *tomoyo_realpath_nofollow(const ch
  }
  
  /* Memory allocated for non-string data. */
 -static unsigned int tomoyo_allocated_memory_for_elements;
 -/* Quota for holding non-string data. */
 -static unsigned int tomoyo_quota_for_elements;
 +static atomic_t tomoyo_policy_memory_size;
 +/* Quota for holding policy. */
 +static unsigned int tomoyo_quota_for_policy;
  
  /**
 - * tomoyo_alloc_element - Allocate permanent memory for structures.
 + * tomoyo_memory_ok - Check memory quota.
   *
 - * @size: Size in bytes.
 + * @ptr: Pointer to allocated memory.
   *
 - * Returns pointer to allocated memory on success, NULL otherwise.
 + * Returns true on success, false otherwise.
   *
 - * Memory has to be zeroed.
 - * The RAM is chunked, so NEVER try to kfree() the returned pointer.
 + * Caller holds tomoyo_policy_lock.
 + * Memory pointed by @ptr will be zeroed on success.
   */
 -void *tomoyo_alloc_element(const unsigned int size)
 +bool tomoyo_memory_ok(void *ptr)
  {
 -      static char *buf;
 -      static DEFINE_MUTEX(lock);
 -      static unsigned int buf_used_len = PATH_MAX;
 -      char *ptr = NULL;
 -      /*Assumes sizeof(void *) >= sizeof(long) is true. */
 -      const unsigned int word_aligned_size
 -              = roundup(size, max(sizeof(void *), sizeof(long)));
 -      if (word_aligned_size > PATH_MAX)
 -              return NULL;
 -      mutex_lock(&lock);
 -      if (buf_used_len + word_aligned_size > PATH_MAX) {
 -              if (!tomoyo_quota_for_elements ||
 -                  tomoyo_allocated_memory_for_elements
 -                  + PATH_MAX <= tomoyo_quota_for_elements)
 -                      ptr = kzalloc(PATH_MAX, GFP_KERNEL);
 -              if (!ptr) {
 -                      printk(KERN_WARNING "ERROR: Out of memory "
 -                             "for tomoyo_alloc_element().\n");
 -                      if (!tomoyo_policy_loaded)
 -                              panic("MAC Initialization failed.\n");
 -              } else {
 -                      buf = ptr;
 -                      tomoyo_allocated_memory_for_elements += PATH_MAX;
 -                      buf_used_len = word_aligned_size;
 -                      ptr = buf;
 -              }
 -      } else if (word_aligned_size) {
 -              int i;
 -              ptr = buf + buf_used_len;
 -              buf_used_len += word_aligned_size;
 -              for (i = 0; i < word_aligned_size; i++) {
 -                      if (!ptr[i])
 -                              continue;
 -                      printk(KERN_ERR "WARNING: Reserved memory was tainted! "
 -                             "The system might go wrong.\n");
 -                      ptr[i] = '\0';
 -              }
 +      int allocated_len = ptr ? ksize(ptr) : 0;
 +      atomic_add(allocated_len, &tomoyo_policy_memory_size);
 +      if (ptr && (!tomoyo_quota_for_policy ||
 +                  atomic_read(&tomoyo_policy_memory_size)
 +                  <= tomoyo_quota_for_policy)) {
 +              memset(ptr, 0, allocated_len);
 +              return true;
        }
 -      mutex_unlock(&lock);
 -      return ptr;
 +      printk(KERN_WARNING "ERROR: Out of memory "
 +             "for tomoyo_alloc_element().\n");
 +      if (!tomoyo_policy_loaded)
 +              panic("MAC Initialization failed.\n");
 +      return false;
  }
  
 -/* Memory allocated for string data in bytes. */
 -static unsigned int tomoyo_allocated_memory_for_savename;
 -/* Quota for holding string data in bytes. */
 -static unsigned int tomoyo_quota_for_savename;
 -
 -/*
 - * TOMOYO uses this hash only when appending a string into the string
 - * table. Frequency of appending strings is very low. So we don't need
 - * large (e.g. 64k) hash size. 256 will be sufficient.
 - */
 -#define TOMOYO_HASH_BITS  8
 -#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
 -
 -/*
 - * tomoyo_name_entry is a structure which is used for linking
 - * "struct tomoyo_path_info" into tomoyo_name_list .
 +/**
 + * tomoyo_memory_free - Free memory for elements.
   *
 - * Since tomoyo_name_list manages a list of strings which are shared by
 - * multiple processes (whereas "struct tomoyo_path_info" inside
 - * "struct tomoyo_path_info_with_data" is not shared), a reference counter will
 - * be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info"
 - * when TOMOYO starts supporting garbage collector.
 + * @ptr:  Pointer to allocated memory.
   */
 -struct tomoyo_name_entry {
 -      struct list_head list;
 -      struct tomoyo_path_info entry;
 -};
 -
 -/* Structure for available memory region. */
 -struct tomoyo_free_memory_block_list {
 -      struct list_head list;
 -      char *ptr;             /* Pointer to a free area. */
 -      int len;               /* Length of the area.     */
 -};
 +void tomoyo_memory_free(void *ptr)
 +{
 +      atomic_sub(ksize(ptr), &tomoyo_policy_memory_size);
 +      kfree(ptr);
 +}
  
  /*
   * tomoyo_name_list is used for holding string data used by TOMOYO.
   * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
   * "const struct tomoyo_path_info *".
   */
 -static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
 +struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
 +/* Lock for protecting tomoyo_name_list . */
 +DEFINE_MUTEX(tomoyo_name_list_lock);
  
  /**
 - * tomoyo_save_name - Allocate permanent memory for string data.
 + * tomoyo_get_name - Allocate permanent memory for string data.
   *
   * @name: The string to store into the permernent memory.
   *
   * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
 - *
 - * The RAM is shared, so NEVER try to modify or kfree() the returned name.
   */
 -const struct tomoyo_path_info *tomoyo_save_name(const char *name)
 +const struct tomoyo_path_info *tomoyo_get_name(const char *name)
  {
 -      static LIST_HEAD(fmb_list);
 -      static DEFINE_MUTEX(lock);
        struct tomoyo_name_entry *ptr;
        unsigned int hash;
 -      /* fmb contains available size in bytes.
 -         fmb is removed from the fmb_list when fmb->len becomes 0. */
 -      struct tomoyo_free_memory_block_list *fmb;
        int len;
 -      char *cp;
 +      int allocated_len;
        struct list_head *head;
  
        if (!name)
                return NULL;
        len = strlen(name) + 1;
 -      if (len > TOMOYO_MAX_PATHNAME_LEN) {
 -              printk(KERN_WARNING "ERROR: Name too long "
 -                     "for tomoyo_save_name().\n");
 -              return NULL;
 -      }
        hash = full_name_hash((const unsigned char *) name, len - 1);
        head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
 -
 -      mutex_lock(&lock);
 +      mutex_lock(&tomoyo_name_list_lock);
        list_for_each_entry(ptr, head, list) {
 -              if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
 -                      goto out;
 -      }
 -      list_for_each_entry(fmb, &fmb_list, list) {
 -              if (len <= fmb->len)
 -                      goto ready;
 +              if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
 +                      continue;
 +              atomic_inc(&ptr->users);
 +              goto out;
        }
 -      if (!tomoyo_quota_for_savename ||
 -          tomoyo_allocated_memory_for_savename + PATH_MAX
 -          <= tomoyo_quota_for_savename)
 -              cp = kzalloc(PATH_MAX, GFP_KERNEL);
 -      else
 -              cp = NULL;
 -      fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
 -      if (!cp || !fmb) {
 -              kfree(cp);
 -              kfree(fmb);
 +      ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
 +      allocated_len = ptr ? ksize(ptr) : 0;
 +      if (!ptr || (tomoyo_quota_for_policy &&
 +                   atomic_read(&tomoyo_policy_memory_size) + allocated_len
 +                   > tomoyo_quota_for_policy)) {
 +              kfree(ptr);
                printk(KERN_WARNING "ERROR: Out of memory "
 -                     "for tomoyo_save_name().\n");
 +                     "for tomoyo_get_name().\n");
                if (!tomoyo_policy_loaded)
                        panic("MAC Initialization failed.\n");
                ptr = NULL;
                goto out;
        }
 -      tomoyo_allocated_memory_for_savename += PATH_MAX;
 -      list_add(&fmb->list, &fmb_list);
 -      fmb->ptr = cp;
 -      fmb->len = PATH_MAX;
 - ready:
 -      ptr = tomoyo_alloc_element(sizeof(*ptr));
 -      if (!ptr)
 -              goto out;
 -      ptr->entry.name = fmb->ptr;
 -      memmove(fmb->ptr, name, len);
 +      atomic_add(allocated_len, &tomoyo_policy_memory_size);
 +      ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
 +      memmove((char *) ptr->entry.name, name, len);
 +      atomic_set(&ptr->users, 1);
        tomoyo_fill_path_info(&ptr->entry);
 -      fmb->ptr += len;
 -      fmb->len -= len;
        list_add_tail(&ptr->list, head);
 -      if (fmb->len == 0) {
 -              list_del(&fmb->list);
 -              kfree(fmb);
 -      }
   out:
 -      mutex_unlock(&lock);
 +      mutex_unlock(&tomoyo_name_list_lock);
        return ptr ? &ptr->entry : NULL;
  }
  
@@@ -319,14 -385,45 +304,14 @@@ void __init tomoyo_realpath_init(void
        for (i = 0; i < TOMOYO_MAX_HASH; i++)
                INIT_LIST_HEAD(&tomoyo_name_list[i]);
        INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
 -      tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
 -      list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
 -      down_read(&tomoyo_domain_list_lock);
 +      tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME);
 +      /*
 +       * tomoyo_read_lock() is not needed because this function is
 +       * called before the first "delete" request.
 +       */
 +      list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
        if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
                panic("Can't register tomoyo_kernel_domain");
 -      up_read(&tomoyo_domain_list_lock);
 -}
 -
 -/* Memory allocated for temporary purpose. */
 -static atomic_t tomoyo_dynamic_memory_size;
 -
 -/**
 - * tomoyo_alloc - Allocate memory for temporary purpose.
 - *
 - * @size: Size in bytes.
 - *
 - * Returns pointer to allocated memory on success, NULL otherwise.
 - */
 -void *tomoyo_alloc(const size_t size)
 -{
 -      void *p = kzalloc(size, GFP_KERNEL);
 -      if (p)
 -              atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
 -      return p;
 -}
 -
 -/**
 - * tomoyo_free - Release memory allocated by tomoyo_alloc().
 - *
 - * @p: Pointer returned by tomoyo_alloc(). May be NULL.
 - *
 - * Returns nothing.
 - */
 -void tomoyo_free(const void *p)
 -{
 -      if (p) {
 -              atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
 -              kfree(p);
 -      }
  }
  
  /**
  int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
  {
        if (!head->read_eof) {
 -              const unsigned int shared
 -                      = tomoyo_allocated_memory_for_savename;
 -              const unsigned int private
 -                      = tomoyo_allocated_memory_for_elements;
 -              const unsigned int dynamic
 -                      = atomic_read(&tomoyo_dynamic_memory_size);
 +              const unsigned int policy
 +                      = atomic_read(&tomoyo_policy_memory_size);
                char buffer[64];
  
                memset(buffer, 0, sizeof(buffer));
 -              if (tomoyo_quota_for_savename)
 -                      snprintf(buffer, sizeof(buffer) - 1,
 -                               "   (Quota: %10u)",
 -                               tomoyo_quota_for_savename);
 -              else
 -                      buffer[0] = '\0';
 -              tomoyo_io_printf(head, "Shared:  %10u%s\n", shared, buffer);
 -              if (tomoyo_quota_for_elements)
 +              if (tomoyo_quota_for_policy)
                        snprintf(buffer, sizeof(buffer) - 1,
                                 "   (Quota: %10u)",
 -                               tomoyo_quota_for_elements);
 +                               tomoyo_quota_for_policy);
                else
                        buffer[0] = '\0';
 -              tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer);
 -              tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic);
 -              tomoyo_io_printf(head, "Total:   %10u\n",
 -                               shared + private + dynamic);
 +              tomoyo_io_printf(head, "Policy:  %10u%s\n", policy, buffer);
 +              tomoyo_io_printf(head, "Total:   %10u\n", policy);
                head->read_eof = true;
        }
        return 0;
@@@ -369,7 -479,9 +354,7 @@@ int tomoyo_write_memory_quota(struct to
        char *data = head->write_buf;
        unsigned int size;
  
 -      if (sscanf(data, "Shared: %u", &size) == 1)
 -              tomoyo_quota_for_savename = size;
 -      else if (sscanf(data, "Private: %u", &size) == 1)
 -              tomoyo_quota_for_elements = size;
 +      if (sscanf(data, "Policy: %u", &size) == 1)
 +              tomoyo_quota_for_policy = size;
        return 0;
  }