3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include "overlayfs.h"
20 struct ovl_cache_entry {
24 struct list_head l_node;
31 struct ovl_dir_cache {
34 struct list_head entries;
37 struct ovl_readdir_data {
38 struct dir_context ctx;
41 struct list_head *list;
42 struct list_head middle;
50 struct ovl_dir_cache *cache;
51 struct ovl_cache_entry cursor;
52 struct file *realfile;
53 struct file *upperfile;
56 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
58 return container_of(n, struct ovl_cache_entry, node);
61 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
62 const char *name, int len)
64 struct rb_node *node = root->rb_node;
68 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
70 cmp = strncmp(name, p->name, len);
72 node = p->node.rb_right;
73 else if (cmp < 0 || len < p->len)
74 node = p->node.rb_left;
82 static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
83 u64 ino, unsigned int d_type)
85 struct ovl_cache_entry *p;
86 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
88 p = kmalloc(size, GFP_KERNEL);
90 memcpy(p->name, name, len);
95 p->is_whiteout = false;
102 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
103 const char *name, int len, u64 ino,
106 struct rb_node **newp = &rdd->root.rb_node;
107 struct rb_node *parent = NULL;
108 struct ovl_cache_entry *p;
112 struct ovl_cache_entry *tmp;
115 tmp = ovl_cache_entry_from_node(*newp);
116 cmp = strncmp(name, tmp->name, len);
118 newp = &tmp->node.rb_right;
119 else if (cmp < 0 || len < tmp->len)
120 newp = &tmp->node.rb_left;
125 p = ovl_cache_entry_new(name, len, ino, d_type);
129 list_add_tail(&p->l_node, rdd->list);
130 rb_link_node(&p->node, parent, newp);
131 rb_insert_color(&p->node, &rdd->root);
136 static int ovl_fill_lower(struct ovl_readdir_data *rdd,
137 const char *name, int namelen,
138 loff_t offset, u64 ino, unsigned int d_type)
140 struct ovl_cache_entry *p;
142 p = ovl_cache_entry_find(&rdd->root, name, namelen);
144 list_move_tail(&p->l_node, &rdd->middle);
146 p = ovl_cache_entry_new(name, namelen, ino, d_type);
150 list_add_tail(&p->l_node, &rdd->middle);
156 void ovl_cache_free(struct list_head *list)
158 struct ovl_cache_entry *p;
159 struct ovl_cache_entry *n;
161 list_for_each_entry_safe(p, n, list, l_node)
164 INIT_LIST_HEAD(list);
167 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
169 struct ovl_dir_cache *cache = od->cache;
171 list_del_init(&od->cursor.l_node);
172 WARN_ON(cache->refcount <= 0);
174 if (!cache->refcount) {
175 if (ovl_dir_cache(dentry) == cache)
176 ovl_set_dir_cache(dentry, NULL);
178 ovl_cache_free(&cache->entries);
183 static int ovl_fill_merge(void *buf, const char *name, int namelen,
184 loff_t offset, u64 ino, unsigned int d_type)
186 struct ovl_readdir_data *rdd = buf;
190 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
192 return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
195 static inline int ovl_dir_read(struct path *realpath,
196 struct ovl_readdir_data *rdd)
198 struct file *realfile;
201 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
202 if (IS_ERR(realfile))
203 return PTR_ERR(realfile);
209 err = iterate_dir(realfile, &rdd->ctx);
212 } while (!err && rdd->count);
218 static void ovl_dir_reset(struct file *file)
220 struct ovl_dir_file *od = file->private_data;
221 struct ovl_dir_cache *cache = od->cache;
222 struct dentry *dentry = file->f_path.dentry;
223 enum ovl_path_type type = ovl_path_type(dentry);
225 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
226 ovl_cache_put(od, dentry);
229 WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
230 if (od->is_real && type == OVL_PATH_MERGE)
234 static int ovl_dir_mark_whiteouts(struct dentry *dir,
235 struct ovl_readdir_data *rdd)
237 struct ovl_cache_entry *p;
238 struct dentry *dentry;
239 const struct cred *old_cred;
240 struct cred *override_cred;
242 override_cred = prepare_creds();
243 if (!override_cred) {
244 ovl_cache_free(rdd->list);
249 * CAP_DAC_OVERRIDE for lookup
251 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
252 old_cred = override_creds(override_cred);
254 mutex_lock(&dir->d_inode->i_mutex);
255 list_for_each_entry(p, rdd->list, l_node) {
259 if (p->type != DT_CHR)
262 dentry = lookup_one_len(p->name, dir, p->len);
266 p->is_whiteout = ovl_is_whiteout(dentry);
269 mutex_unlock(&dir->d_inode->i_mutex);
271 revert_creds(old_cred);
272 put_cred(override_cred);
277 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
280 struct path lowerpath;
281 struct path upperpath;
282 struct ovl_readdir_data rdd = {
283 .ctx.actor = ovl_fill_merge,
289 ovl_path_lower(dentry, &lowerpath);
290 ovl_path_upper(dentry, &upperpath);
292 if (upperpath.dentry) {
293 err = ovl_dir_read(&upperpath, &rdd);
297 if (lowerpath.dentry) {
298 err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
303 if (lowerpath.dentry) {
305 * Insert lowerpath entries before upperpath ones, this allows
306 * offsets to be reasonably constant
308 list_add(&rdd.middle, rdd.list);
310 err = ovl_dir_read(&lowerpath, &rdd);
311 list_del(&rdd.middle);
317 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
319 struct ovl_cache_entry *p;
322 list_for_each_entry(p, &od->cache->entries, l_node) {
329 list_move_tail(&od->cursor.l_node, &p->l_node);
332 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
335 struct ovl_dir_cache *cache;
337 cache = ovl_dir_cache(dentry);
338 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
342 ovl_set_dir_cache(dentry, NULL);
344 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
346 return ERR_PTR(-ENOMEM);
349 INIT_LIST_HEAD(&cache->entries);
351 res = ovl_dir_read_merged(dentry, &cache->entries);
353 ovl_cache_free(&cache->entries);
358 cache->version = ovl_dentry_version_get(dentry);
359 ovl_set_dir_cache(dentry, cache);
364 static int ovl_iterate(struct file *file, struct dir_context *ctx)
366 struct ovl_dir_file *od = file->private_data;
367 struct dentry *dentry = file->f_path.dentry;
373 return iterate_dir(od->realfile, ctx);
376 struct ovl_dir_cache *cache;
378 cache = ovl_cache_get(dentry);
380 return PTR_ERR(cache);
383 ovl_seek_cursor(od, ctx->pos);
386 while (od->cursor.l_node.next != &od->cache->entries) {
387 struct ovl_cache_entry *p;
389 p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
392 if (!p->is_whiteout) {
393 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
398 list_move(&od->cursor.l_node, &p->l_node);
403 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
406 struct ovl_dir_file *od = file->private_data;
408 mutex_lock(&file_inode(file)->i_mutex);
413 res = vfs_llseek(od->realfile, offset, origin);
414 file->f_pos = od->realfile->f_pos;
420 offset += file->f_pos;
430 if (offset != file->f_pos) {
431 file->f_pos = offset;
433 ovl_seek_cursor(od, offset);
438 mutex_unlock(&file_inode(file)->i_mutex);
443 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
446 struct ovl_dir_file *od = file->private_data;
447 struct dentry *dentry = file->f_path.dentry;
448 struct file *realfile = od->realfile;
451 * Need to check if we started out being a lower dir, but got copied up
453 if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
454 struct inode *inode = file_inode(file);
456 realfile = lockless_dereference(od->upperfile);
458 struct path upperpath;
460 ovl_path_upper(dentry, &upperpath);
461 realfile = ovl_path_open(&upperpath, O_RDONLY);
462 smp_mb__before_spinlock();
463 mutex_lock(&inode->i_mutex);
464 if (!od->upperfile) {
465 if (IS_ERR(realfile)) {
466 mutex_unlock(&inode->i_mutex);
467 return PTR_ERR(realfile);
469 od->upperfile = realfile;
471 /* somebody has beaten us to it */
472 if (!IS_ERR(realfile))
474 realfile = od->upperfile;
476 mutex_unlock(&inode->i_mutex);
480 return vfs_fsync_range(realfile, start, end, datasync);
483 static int ovl_dir_release(struct inode *inode, struct file *file)
485 struct ovl_dir_file *od = file->private_data;
488 mutex_lock(&inode->i_mutex);
489 ovl_cache_put(od, file->f_path.dentry);
490 mutex_unlock(&inode->i_mutex);
500 static int ovl_dir_open(struct inode *inode, struct file *file)
502 struct path realpath;
503 struct file *realfile;
504 struct ovl_dir_file *od;
505 enum ovl_path_type type;
507 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
511 type = ovl_path_real(file->f_path.dentry, &realpath);
512 realfile = ovl_path_open(&realpath, file->f_flags);
513 if (IS_ERR(realfile)) {
515 return PTR_ERR(realfile);
517 INIT_LIST_HEAD(&od->cursor.l_node);
518 od->realfile = realfile;
519 od->is_real = (type != OVL_PATH_MERGE);
520 od->is_upper = (type != OVL_PATH_LOWER);
521 od->cursor.is_cursor = true;
522 file->private_data = od;
527 const struct file_operations ovl_dir_operations = {
528 .read = generic_read_dir,
529 .open = ovl_dir_open,
530 .iterate = ovl_iterate,
531 .llseek = ovl_dir_llseek,
532 .fsync = ovl_dir_fsync,
533 .release = ovl_dir_release,
536 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
539 struct ovl_cache_entry *p;
541 err = ovl_dir_read_merged(dentry, list);
547 list_for_each_entry(p, list, l_node) {
551 if (p->name[0] == '.') {
554 if (p->len == 2 && p->name[1] == '.')
564 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
566 struct ovl_cache_entry *p;
568 mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
569 list_for_each_entry(p, list, l_node) {
570 struct dentry *dentry;
575 dentry = lookup_one_len(p->name, upper, p->len);
576 if (IS_ERR(dentry)) {
577 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
578 upper->d_name.name, p->len, p->name,
579 (int) PTR_ERR(dentry));
582 ovl_cleanup(upper->d_inode, dentry);
585 mutex_unlock(&upper->d_inode->i_mutex);