2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
48 * Jan Kara, <jack@suse.cz>, 10/2002
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
59 #include <linux/mount.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/sched.h>
75 #include <linux/kmod.h>
76 #include <linux/namei.h>
77 #include <linux/capability.h>
78 #include <linux/quotaops.h>
79 #include "../internal.h" /* ugh */
81 #include <linux/uaccess.h>
84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
89 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
90 * modifications of quota state (on quotaon and quotaoff) and readers who care
91 * about latest values take it as well.
93 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
94 * dq_list_lock > dq_state_lock
96 * Note that some things (eg. sb pointer, type, id) doesn't change during
97 * the life of the dquot structure and so needn't to be protected by a lock
99 * Any operation working on dquots via inode pointers must hold dqptr_sem. If
100 * operation is just reading pointers from inode (or not using them at all) the
101 * read lock is enough. If pointers are altered function must hold write lock.
102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
103 * inode is a quota file). Functions adding pointers from inode to dquots have
104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
105 * have to do all pointer modifications before dropping dqptr_sem. This makes
106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
107 * then drops all pointers to dquots from an inode.
109 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
110 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
111 * Currently dquot is locked only when it is being read to memory (or space for
112 * it is being allocated) on the first dqget() and when it is being released on
113 * the last dqput(). The allocation and release oparations are serialized by
114 * the dq_lock and by checking the use count in dquot_release(). Write
115 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
116 * spinlock to internal buffers before writing.
118 * Lock ordering (including related VFS locks) is the following:
119 * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
121 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
122 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
123 * dqptr_sem. But filesystem has to count with the fact that functions such as
124 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
125 * from inside a transaction to keep filesystem consistency after a crash. Also
126 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
127 * called with dqptr_sem held.
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
131 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
132 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133 EXPORT_SYMBOL(dq_data_lock);
135 void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...)
138 if (printk_ratelimit()) {
140 struct va_format vaf;
147 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
148 sb->s_id, func, &vaf);
153 EXPORT_SYMBOL(__quota_error);
155 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
156 static char *quotatypes[] = INITQFNAMES;
158 static struct quota_format_type *quota_formats; /* List of registered formats */
159 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
161 /* SLAB cache for dquot structures */
162 static struct kmem_cache *dquot_cachep;
164 int register_quota_format(struct quota_format_type *fmt)
166 spin_lock(&dq_list_lock);
167 fmt->qf_next = quota_formats;
169 spin_unlock(&dq_list_lock);
172 EXPORT_SYMBOL(register_quota_format);
174 void unregister_quota_format(struct quota_format_type *fmt)
176 struct quota_format_type **actqf;
178 spin_lock(&dq_list_lock);
179 for (actqf = "a_formats; *actqf && *actqf != fmt;
180 actqf = &(*actqf)->qf_next)
183 *actqf = (*actqf)->qf_next;
184 spin_unlock(&dq_list_lock);
186 EXPORT_SYMBOL(unregister_quota_format);
188 static struct quota_format_type *find_quota_format(int id)
190 struct quota_format_type *actqf;
192 spin_lock(&dq_list_lock);
193 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
194 actqf = actqf->qf_next)
196 if (!actqf || !try_module_get(actqf->qf_owner)) {
199 spin_unlock(&dq_list_lock);
201 for (qm = 0; module_names[qm].qm_fmt_id &&
202 module_names[qm].qm_fmt_id != id; qm++)
204 if (!module_names[qm].qm_fmt_id ||
205 request_module(module_names[qm].qm_mod_name))
208 spin_lock(&dq_list_lock);
209 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
210 actqf = actqf->qf_next)
212 if (actqf && !try_module_get(actqf->qf_owner))
215 spin_unlock(&dq_list_lock);
219 static void put_quota_format(struct quota_format_type *fmt)
221 module_put(fmt->qf_owner);
225 * Dquot List Management:
226 * The quota code uses three lists for dquot management: the inuse_list,
227 * free_dquots, and dquot_hash[] array. A single dquot structure may be
228 * on all three lists, depending on its current state.
230 * All dquots are placed to the end of inuse_list when first created, and this
231 * list is used for invalidate operation, which must look at every dquot.
233 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
234 * and this list is searched whenever we need an available dquot. Dquots are
235 * removed from the list as soon as they are used again, and
236 * dqstats.free_dquots gives the number of dquots on the list. When
237 * dquot is invalidated it's completely released from memory.
239 * Dquots with a specific identity (device, type and id) are placed on
240 * one of the dquot_hash[] hash chains. The provides an efficient search
241 * mechanism to locate a specific dquot.
244 static LIST_HEAD(inuse_list);
245 static LIST_HEAD(free_dquots);
246 static unsigned int dq_hash_bits, dq_hash_mask;
247 static struct hlist_head *dquot_hash;
249 struct dqstats dqstats;
250 EXPORT_SYMBOL(dqstats);
252 static qsize_t inode_get_rsv_space(struct inode *inode);
253 static void __dquot_initialize(struct inode *inode, int type);
255 static inline unsigned int
256 hashfn(const struct super_block *sb, struct kqid qid)
258 unsigned int id = from_kqid(&init_user_ns, qid);
262 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
263 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
267 * Following list functions expect dq_list_lock to be held
269 static inline void insert_dquot_hash(struct dquot *dquot)
271 struct hlist_head *head;
272 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
273 hlist_add_head(&dquot->dq_hash, head);
276 static inline void remove_dquot_hash(struct dquot *dquot)
278 hlist_del_init(&dquot->dq_hash);
281 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
284 struct hlist_node *node;
287 hlist_for_each (node, dquot_hash+hashent) {
288 dquot = hlist_entry(node, struct dquot, dq_hash);
289 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
295 /* Add a dquot to the tail of the free list */
296 static inline void put_dquot_last(struct dquot *dquot)
298 list_add_tail(&dquot->dq_free, &free_dquots);
299 dqstats_inc(DQST_FREE_DQUOTS);
302 static inline void remove_free_dquot(struct dquot *dquot)
304 if (list_empty(&dquot->dq_free))
306 list_del_init(&dquot->dq_free);
307 dqstats_dec(DQST_FREE_DQUOTS);
310 static inline void put_inuse(struct dquot *dquot)
312 /* We add to the back of inuse list so we don't have to restart
313 * when traversing this list and we block */
314 list_add_tail(&dquot->dq_inuse, &inuse_list);
315 dqstats_inc(DQST_ALLOC_DQUOTS);
318 static inline void remove_inuse(struct dquot *dquot)
320 dqstats_dec(DQST_ALLOC_DQUOTS);
321 list_del(&dquot->dq_inuse);
324 * End of list functions needing dq_list_lock
327 static void wait_on_dquot(struct dquot *dquot)
329 mutex_lock(&dquot->dq_lock);
330 mutex_unlock(&dquot->dq_lock);
333 static inline int dquot_dirty(struct dquot *dquot)
335 return test_bit(DQ_MOD_B, &dquot->dq_flags);
338 static inline int mark_dquot_dirty(struct dquot *dquot)
340 return dquot->dq_sb->dq_op->mark_dirty(dquot);
343 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
344 int dquot_mark_dquot_dirty(struct dquot *dquot)
348 /* If quota is dirty already, we don't have to acquire dq_list_lock */
349 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
352 spin_lock(&dq_list_lock);
353 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
354 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
355 info[dquot->dq_id.type].dqi_dirty_list);
358 spin_unlock(&dq_list_lock);
361 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
363 /* Dirtify all the dquots - this can block when journalling */
364 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
369 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
371 /* Even in case of error we have to continue */
372 ret = mark_dquot_dirty(dquot[cnt]);
379 static inline void dqput_all(struct dquot **dquot)
383 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
387 /* This function needs dq_list_lock */
388 static inline int clear_dquot_dirty(struct dquot *dquot)
390 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
392 list_del_init(&dquot->dq_dirty);
396 void mark_info_dirty(struct super_block *sb, int type)
398 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
400 EXPORT_SYMBOL(mark_info_dirty);
403 * Read dquot from disk and alloc space for it
406 int dquot_acquire(struct dquot *dquot)
408 int ret = 0, ret2 = 0;
409 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
411 mutex_lock(&dquot->dq_lock);
412 mutex_lock(&dqopt->dqio_mutex);
413 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
414 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
417 set_bit(DQ_READ_B, &dquot->dq_flags);
418 /* Instantiate dquot if needed */
419 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
420 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
421 /* Write the info if needed */
422 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
423 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
424 dquot->dq_sb, dquot->dq_id.type);
433 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
435 mutex_unlock(&dqopt->dqio_mutex);
436 mutex_unlock(&dquot->dq_lock);
439 EXPORT_SYMBOL(dquot_acquire);
442 * Write dquot to disk
444 int dquot_commit(struct dquot *dquot)
447 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
449 mutex_lock(&dqopt->dqio_mutex);
450 spin_lock(&dq_list_lock);
451 if (!clear_dquot_dirty(dquot)) {
452 spin_unlock(&dq_list_lock);
455 spin_unlock(&dq_list_lock);
456 /* Inactive dquot can be only if there was error during read/init
457 * => we have better not writing it */
458 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
459 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
463 mutex_unlock(&dqopt->dqio_mutex);
466 EXPORT_SYMBOL(dquot_commit);
471 int dquot_release(struct dquot *dquot)
473 int ret = 0, ret2 = 0;
474 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
476 mutex_lock(&dquot->dq_lock);
477 /* Check whether we are not racing with some other dqget() */
478 if (atomic_read(&dquot->dq_count) > 1)
480 mutex_lock(&dqopt->dqio_mutex);
481 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
482 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
484 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
485 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
486 dquot->dq_sb, dquot->dq_id.type);
491 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
492 mutex_unlock(&dqopt->dqio_mutex);
494 mutex_unlock(&dquot->dq_lock);
497 EXPORT_SYMBOL(dquot_release);
499 void dquot_destroy(struct dquot *dquot)
501 kmem_cache_free(dquot_cachep, dquot);
503 EXPORT_SYMBOL(dquot_destroy);
505 static inline void do_destroy_dquot(struct dquot *dquot)
507 dquot->dq_sb->dq_op->destroy_dquot(dquot);
510 /* Invalidate all dquots on the list. Note that this function is called after
511 * quota is disabled and pointers from inodes removed so there cannot be new
512 * quota users. There can still be some users of quotas due to inodes being
513 * just deleted or pruned by prune_icache() (those are not attached to any
514 * list) or parallel quotactl call. We have to wait for such users.
516 static void invalidate_dquots(struct super_block *sb, int type)
518 struct dquot *dquot, *tmp;
521 spin_lock(&dq_list_lock);
522 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
523 if (dquot->dq_sb != sb)
525 if (dquot->dq_id.type != type)
527 /* Wait for dquot users */
528 if (atomic_read(&dquot->dq_count)) {
531 atomic_inc(&dquot->dq_count);
532 prepare_to_wait(&dquot->dq_wait_unused, &wait,
533 TASK_UNINTERRUPTIBLE);
534 spin_unlock(&dq_list_lock);
535 /* Once dqput() wakes us up, we know it's time to free
537 * IMPORTANT: we rely on the fact that there is always
538 * at most one process waiting for dquot to free.
539 * Otherwise dq_count would be > 1 and we would never
542 if (atomic_read(&dquot->dq_count) > 1)
544 finish_wait(&dquot->dq_wait_unused, &wait);
546 /* At this moment dquot() need not exist (it could be
547 * reclaimed by prune_dqcache(). Hence we must
552 * Quota now has no users and it has been written on last
555 remove_dquot_hash(dquot);
556 remove_free_dquot(dquot);
558 do_destroy_dquot(dquot);
560 spin_unlock(&dq_list_lock);
563 /* Call callback for every active dquot on given filesystem */
564 int dquot_scan_active(struct super_block *sb,
565 int (*fn)(struct dquot *dquot, unsigned long priv),
568 struct dquot *dquot, *old_dquot = NULL;
571 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
572 spin_lock(&dq_list_lock);
573 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
574 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
576 if (dquot->dq_sb != sb)
578 /* Now we have active dquot so we can just increase use count */
579 atomic_inc(&dquot->dq_count);
580 spin_unlock(&dq_list_lock);
581 dqstats_inc(DQST_LOOKUPS);
585 * ->release_dquot() can be racing with us. Our reference
586 * protects us from new calls to it so just wait for any
587 * outstanding call and recheck the DQ_ACTIVE_B after that.
589 wait_on_dquot(dquot);
590 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
591 ret = fn(dquot, priv);
595 spin_lock(&dq_list_lock);
596 /* We are safe to continue now because our dquot could not
597 * be moved out of the inuse list while we hold the reference */
599 spin_unlock(&dq_list_lock);
602 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
605 EXPORT_SYMBOL(dquot_scan_active);
607 /* Write all dquot structures to quota files */
608 int dquot_writeback_dquots(struct super_block *sb, int type)
610 struct list_head *dirty;
612 struct quota_info *dqopt = sb_dqopt(sb);
616 mutex_lock(&dqopt->dqonoff_mutex);
617 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
618 if (type != -1 && cnt != type)
620 if (!sb_has_quota_active(sb, cnt))
622 spin_lock(&dq_list_lock);
623 dirty = &dqopt->info[cnt].dqi_dirty_list;
624 while (!list_empty(dirty)) {
625 dquot = list_first_entry(dirty, struct dquot,
627 /* Dirty and inactive can be only bad dquot... */
628 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
629 clear_dquot_dirty(dquot);
632 /* Now we have active dquot from which someone is
633 * holding reference so we can safely just increase
635 atomic_inc(&dquot->dq_count);
636 spin_unlock(&dq_list_lock);
637 dqstats_inc(DQST_LOOKUPS);
638 err = sb->dq_op->write_dquot(dquot);
642 spin_lock(&dq_list_lock);
644 spin_unlock(&dq_list_lock);
647 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
648 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
649 && info_dirty(&dqopt->info[cnt]))
650 sb->dq_op->write_info(sb, cnt);
651 dqstats_inc(DQST_SYNCS);
652 mutex_unlock(&dqopt->dqonoff_mutex);
656 EXPORT_SYMBOL(dquot_writeback_dquots);
658 /* Write all dquot structures to disk and make them visible from userspace */
659 int dquot_quota_sync(struct super_block *sb, int type)
661 struct quota_info *dqopt = sb_dqopt(sb);
665 ret = dquot_writeback_dquots(sb, type);
668 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
671 /* This is not very clever (and fast) but currently I don't know about
672 * any other simple way of getting quota data to disk and we must get
673 * them there for userspace to be visible... */
674 if (sb->s_op->sync_fs)
675 sb->s_op->sync_fs(sb, 1);
676 sync_blockdev(sb->s_bdev);
679 * Now when everything is written we can discard the pagecache so
680 * that userspace sees the changes.
682 mutex_lock(&dqopt->dqonoff_mutex);
683 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
684 if (type != -1 && cnt != type)
686 if (!sb_has_quota_active(sb, cnt))
688 mutex_lock(&dqopt->files[cnt]->i_mutex);
689 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
690 mutex_unlock(&dqopt->files[cnt]->i_mutex);
692 mutex_unlock(&dqopt->dqonoff_mutex);
696 EXPORT_SYMBOL(dquot_quota_sync);
698 /* Free unused dquots from cache */
699 static void prune_dqcache(int count)
701 struct list_head *head;
704 head = free_dquots.prev;
705 while (head != &free_dquots && count) {
706 dquot = list_entry(head, struct dquot, dq_free);
707 remove_dquot_hash(dquot);
708 remove_free_dquot(dquot);
710 do_destroy_dquot(dquot);
712 head = free_dquots.prev;
717 * This is called from kswapd when we think we need some
720 static int shrink_dqcache_memory(struct shrinker *shrink,
721 struct shrink_control *sc)
723 int nr = sc->nr_to_scan;
726 spin_lock(&dq_list_lock);
728 spin_unlock(&dq_list_lock);
731 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])
732 /100) * sysctl_vfs_cache_pressure;
735 static struct shrinker dqcache_shrinker = {
736 .shrink = shrink_dqcache_memory,
737 .seeks = DEFAULT_SEEKS,
741 * Put reference to dquot
742 * NOTE: If you change this function please check whether dqput_blocks() works right...
744 void dqput(struct dquot *dquot)
750 #ifdef CONFIG_QUOTA_DEBUG
751 if (!atomic_read(&dquot->dq_count)) {
752 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
753 quotatypes[dquot->dq_id.type],
754 from_kqid(&init_user_ns, dquot->dq_id));
758 dqstats_inc(DQST_DROPS);
760 spin_lock(&dq_list_lock);
761 if (atomic_read(&dquot->dq_count) > 1) {
762 /* We have more than one user... nothing to do */
763 atomic_dec(&dquot->dq_count);
764 /* Releasing dquot during quotaoff phase? */
765 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
766 atomic_read(&dquot->dq_count) == 1)
767 wake_up(&dquot->dq_wait_unused);
768 spin_unlock(&dq_list_lock);
771 /* Need to release dquot? */
772 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
773 spin_unlock(&dq_list_lock);
774 /* Commit dquot before releasing */
775 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
777 quota_error(dquot->dq_sb, "Can't write quota structure"
778 " (error %d). Quota may get out of sync!",
781 * We clear dirty bit anyway, so that we avoid
784 spin_lock(&dq_list_lock);
785 clear_dquot_dirty(dquot);
786 spin_unlock(&dq_list_lock);
790 /* Clear flag in case dquot was inactive (something bad happened) */
791 clear_dquot_dirty(dquot);
792 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
793 spin_unlock(&dq_list_lock);
794 dquot->dq_sb->dq_op->release_dquot(dquot);
797 atomic_dec(&dquot->dq_count);
798 #ifdef CONFIG_QUOTA_DEBUG
800 BUG_ON(!list_empty(&dquot->dq_free));
802 put_dquot_last(dquot);
803 spin_unlock(&dq_list_lock);
805 EXPORT_SYMBOL(dqput);
807 struct dquot *dquot_alloc(struct super_block *sb, int type)
809 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
811 EXPORT_SYMBOL(dquot_alloc);
813 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
817 dquot = sb->dq_op->alloc_dquot(sb, type);
821 mutex_init(&dquot->dq_lock);
822 INIT_LIST_HEAD(&dquot->dq_free);
823 INIT_LIST_HEAD(&dquot->dq_inuse);
824 INIT_HLIST_NODE(&dquot->dq_hash);
825 INIT_LIST_HEAD(&dquot->dq_dirty);
826 init_waitqueue_head(&dquot->dq_wait_unused);
828 dquot->dq_id = make_kqid_invalid(type);
829 atomic_set(&dquot->dq_count, 1);
835 * Get reference to dquot
837 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
838 * destroying our dquot by:
839 * a) checking for quota flags under dq_list_lock and
840 * b) getting a reference to dquot before we release dq_list_lock
842 struct dquot *dqget(struct super_block *sb, struct kqid qid)
844 unsigned int hashent = hashfn(sb, qid);
845 struct dquot *dquot = NULL, *empty = NULL;
847 if (!sb_has_quota_active(sb, qid.type))
850 spin_lock(&dq_list_lock);
851 spin_lock(&dq_state_lock);
852 if (!sb_has_quota_active(sb, qid.type)) {
853 spin_unlock(&dq_state_lock);
854 spin_unlock(&dq_list_lock);
857 spin_unlock(&dq_state_lock);
859 dquot = find_dquot(hashent, sb, qid);
862 spin_unlock(&dq_list_lock);
863 empty = get_empty_dquot(sb, qid.type);
865 schedule(); /* Try to wait for a moment... */
871 /* all dquots go on the inuse_list */
873 /* hash it first so it can be found */
874 insert_dquot_hash(dquot);
875 spin_unlock(&dq_list_lock);
876 dqstats_inc(DQST_LOOKUPS);
878 if (!atomic_read(&dquot->dq_count))
879 remove_free_dquot(dquot);
880 atomic_inc(&dquot->dq_count);
881 spin_unlock(&dq_list_lock);
882 dqstats_inc(DQST_CACHE_HITS);
883 dqstats_inc(DQST_LOOKUPS);
885 /* Wait for dq_lock - after this we know that either dquot_release() is
886 * already finished or it will be canceled due to dq_count > 1 test */
887 wait_on_dquot(dquot);
888 /* Read the dquot / allocate space in quota file */
889 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
890 sb->dq_op->acquire_dquot(dquot) < 0) {
895 #ifdef CONFIG_QUOTA_DEBUG
896 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
900 do_destroy_dquot(empty);
904 EXPORT_SYMBOL(dqget);
906 static int dqinit_needed(struct inode *inode, int type)
910 if (IS_NOQUOTA(inode))
913 return !inode->i_dquot[type];
914 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
915 if (!inode->i_dquot[cnt])
920 /* This routine is guarded by dqonoff_mutex mutex */
921 static void add_dquot_ref(struct super_block *sb, int type)
923 struct inode *inode, *old_inode = NULL;
924 #ifdef CONFIG_QUOTA_DEBUG
928 spin_lock(&inode_sb_list_lock);
929 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
930 spin_lock(&inode->i_lock);
931 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
932 !atomic_read(&inode->i_writecount) ||
933 !dqinit_needed(inode, type)) {
934 spin_unlock(&inode->i_lock);
938 spin_unlock(&inode->i_lock);
939 spin_unlock(&inode_sb_list_lock);
941 #ifdef CONFIG_QUOTA_DEBUG
942 if (unlikely(inode_get_rsv_space(inode) > 0))
946 __dquot_initialize(inode, type);
949 * We hold a reference to 'inode' so it couldn't have been
950 * removed from s_inodes list while we dropped the
951 * inode_sb_list_lock We cannot iput the inode now as we can be
952 * holding the last reference and we cannot iput it under
953 * inode_sb_list_lock. So we keep the reference and iput it
957 spin_lock(&inode_sb_list_lock);
959 spin_unlock(&inode_sb_list_lock);
962 #ifdef CONFIG_QUOTA_DEBUG
964 quota_error(sb, "Writes happened before quota was turned on "
965 "thus quota information is probably inconsistent. "
966 "Please run quotacheck(8)");
972 * Return 0 if dqput() won't block.
973 * (note that 1 doesn't necessarily mean blocking)
975 static inline int dqput_blocks(struct dquot *dquot)
977 if (atomic_read(&dquot->dq_count) <= 1)
983 * Remove references to dquots from inode and add dquot to list for freeing
984 * if we have the last reference to dquot
985 * We can't race with anybody because we hold dqptr_sem for writing...
987 static int remove_inode_dquot_ref(struct inode *inode, int type,
988 struct list_head *tofree_head)
990 struct dquot *dquot = inode->i_dquot[type];
992 inode->i_dquot[type] = NULL;
994 if (dqput_blocks(dquot)) {
995 #ifdef CONFIG_QUOTA_DEBUG
996 if (atomic_read(&dquot->dq_count) != 1)
997 quota_error(inode->i_sb, "Adding dquot with "
998 "dq_count %d to dispose list",
999 atomic_read(&dquot->dq_count));
1001 spin_lock(&dq_list_lock);
1002 /* As dquot must have currently users it can't be on
1003 * the free list... */
1004 list_add(&dquot->dq_free, tofree_head);
1005 spin_unlock(&dq_list_lock);
1009 dqput(dquot); /* We have guaranteed we won't block */
1015 * Free list of dquots
1016 * Dquots are removed from inodes and no new references can be got so we are
1017 * the only ones holding reference
1019 static void put_dquot_list(struct list_head *tofree_head)
1021 struct list_head *act_head;
1022 struct dquot *dquot;
1024 act_head = tofree_head->next;
1025 while (act_head != tofree_head) {
1026 dquot = list_entry(act_head, struct dquot, dq_free);
1027 act_head = act_head->next;
1028 /* Remove dquot from the list so we won't have problems... */
1029 list_del_init(&dquot->dq_free);
1034 static void remove_dquot_ref(struct super_block *sb, int type,
1035 struct list_head *tofree_head)
1037 struct inode *inode;
1040 spin_lock(&inode_sb_list_lock);
1041 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1043 * We have to scan also I_NEW inodes because they can already
1044 * have quota pointer initialized. Luckily, we need to touch
1045 * only quota pointers and these have separate locking
1048 if (!IS_NOQUOTA(inode)) {
1049 if (unlikely(inode_get_rsv_space(inode) > 0))
1051 remove_inode_dquot_ref(inode, type, tofree_head);
1054 spin_unlock(&inode_sb_list_lock);
1055 #ifdef CONFIG_QUOTA_DEBUG
1057 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1058 " was disabled thus quota information is probably "
1059 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1064 /* Gather all references from inodes and drop them */
1065 static void drop_dquot_ref(struct super_block *sb, int type)
1067 LIST_HEAD(tofree_head);
1070 down_write(&sb_dqopt(sb)->dqptr_sem);
1071 remove_dquot_ref(sb, type, &tofree_head);
1072 up_write(&sb_dqopt(sb)->dqptr_sem);
1073 put_dquot_list(&tofree_head);
1077 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1079 dquot->dq_dqb.dqb_curinodes += number;
1082 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1084 dquot->dq_dqb.dqb_curspace += number;
1087 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1089 dquot->dq_dqb.dqb_rsvspace += number;
1093 * Claim reserved quota space
1095 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1097 if (dquot->dq_dqb.dqb_rsvspace < number) {
1099 number = dquot->dq_dqb.dqb_rsvspace;
1101 dquot->dq_dqb.dqb_curspace += number;
1102 dquot->dq_dqb.dqb_rsvspace -= number;
1106 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1108 if (dquot->dq_dqb.dqb_rsvspace >= number)
1109 dquot->dq_dqb.dqb_rsvspace -= number;
1112 dquot->dq_dqb.dqb_rsvspace = 0;
1116 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1118 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1119 dquot->dq_dqb.dqb_curinodes >= number)
1120 dquot->dq_dqb.dqb_curinodes -= number;
1122 dquot->dq_dqb.dqb_curinodes = 0;
1123 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1124 dquot->dq_dqb.dqb_itime = (time_t) 0;
1125 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1128 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1130 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1131 dquot->dq_dqb.dqb_curspace >= number)
1132 dquot->dq_dqb.dqb_curspace -= number;
1134 dquot->dq_dqb.dqb_curspace = 0;
1135 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1136 dquot->dq_dqb.dqb_btime = (time_t) 0;
1137 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1141 struct super_block *w_sb;
1142 struct kqid w_dq_id;
1146 static int warning_issued(struct dquot *dquot, const int warntype)
1148 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1149 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1150 ((warntype == QUOTA_NL_IHARDWARN ||
1151 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1155 return test_and_set_bit(flag, &dquot->dq_flags);
1158 #ifdef CONFIG_PRINT_QUOTA_WARNING
1159 static int flag_print_warnings = 1;
1161 static int need_print_warning(struct dquot_warn *warn)
1163 if (!flag_print_warnings)
1166 switch (warn->w_dq_id.type) {
1168 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1170 return in_group_p(warn->w_dq_id.gid);
1171 case PRJQUOTA: /* Never taken... Just make gcc happy */
1177 /* Print warning to user which exceeded quota */
1178 static void print_warning(struct dquot_warn *warn)
1181 struct tty_struct *tty;
1182 int warntype = warn->w_type;
1184 if (warntype == QUOTA_NL_IHARDBELOW ||
1185 warntype == QUOTA_NL_ISOFTBELOW ||
1186 warntype == QUOTA_NL_BHARDBELOW ||
1187 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1190 tty = get_current_tty();
1193 tty_write_message(tty, warn->w_sb->s_id);
1194 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1195 tty_write_message(tty, ": warning, ");
1197 tty_write_message(tty, ": write failed, ");
1198 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1200 case QUOTA_NL_IHARDWARN:
1201 msg = " file limit reached.\r\n";
1203 case QUOTA_NL_ISOFTLONGWARN:
1204 msg = " file quota exceeded too long.\r\n";
1206 case QUOTA_NL_ISOFTWARN:
1207 msg = " file quota exceeded.\r\n";
1209 case QUOTA_NL_BHARDWARN:
1210 msg = " block limit reached.\r\n";
1212 case QUOTA_NL_BSOFTLONGWARN:
1213 msg = " block quota exceeded too long.\r\n";
1215 case QUOTA_NL_BSOFTWARN:
1216 msg = " block quota exceeded.\r\n";
1219 tty_write_message(tty, msg);
1224 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1227 if (warning_issued(dquot, warntype))
1229 warn->w_type = warntype;
1230 warn->w_sb = dquot->dq_sb;
1231 warn->w_dq_id = dquot->dq_id;
1235 * Write warnings to the console and send warning messages over netlink.
1237 * Note that this function can call into tty and networking code.
1239 static void flush_warnings(struct dquot_warn *warn)
1243 for (i = 0; i < MAXQUOTAS; i++) {
1244 if (warn[i].w_type == QUOTA_NL_NOWARN)
1246 #ifdef CONFIG_PRINT_QUOTA_WARNING
1247 print_warning(&warn[i]);
1249 quota_send_warning(warn[i].w_dq_id,
1250 warn[i].w_sb->s_dev, warn[i].w_type);
1254 static int ignore_hardlimit(struct dquot *dquot)
1256 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1258 return capable(CAP_SYS_RESOURCE) &&
1259 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1260 !(info->dqi_flags & V1_DQF_RSQUASH));
1263 /* needs dq_data_lock */
1264 static int check_idq(struct dquot *dquot, qsize_t inodes,
1265 struct dquot_warn *warn)
1267 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1269 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1270 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1273 if (dquot->dq_dqb.dqb_ihardlimit &&
1274 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1275 !ignore_hardlimit(dquot)) {
1276 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1280 if (dquot->dq_dqb.dqb_isoftlimit &&
1281 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1282 dquot->dq_dqb.dqb_itime &&
1283 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1284 !ignore_hardlimit(dquot)) {
1285 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1289 if (dquot->dq_dqb.dqb_isoftlimit &&
1290 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1291 dquot->dq_dqb.dqb_itime == 0) {
1292 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1293 dquot->dq_dqb.dqb_itime = get_seconds() +
1294 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1300 /* needs dq_data_lock */
1301 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
1302 struct dquot_warn *warn)
1305 struct super_block *sb = dquot->dq_sb;
1307 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1308 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1311 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1314 if (dquot->dq_dqb.dqb_bhardlimit &&
1315 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1316 !ignore_hardlimit(dquot)) {
1318 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1322 if (dquot->dq_dqb.dqb_bsoftlimit &&
1323 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1324 dquot->dq_dqb.dqb_btime &&
1325 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1326 !ignore_hardlimit(dquot)) {
1328 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1332 if (dquot->dq_dqb.dqb_bsoftlimit &&
1333 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1334 dquot->dq_dqb.dqb_btime == 0) {
1336 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1337 dquot->dq_dqb.dqb_btime = get_seconds() +
1338 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1342 * We don't allow preallocation to exceed softlimit so exceeding will
1351 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1355 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1356 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1357 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1358 return QUOTA_NL_NOWARN;
1360 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1361 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1362 return QUOTA_NL_ISOFTBELOW;
1363 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1364 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1365 return QUOTA_NL_IHARDBELOW;
1366 return QUOTA_NL_NOWARN;
1369 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1371 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1372 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1373 return QUOTA_NL_NOWARN;
1375 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1376 return QUOTA_NL_BSOFTBELOW;
1377 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1378 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1379 return QUOTA_NL_BHARDBELOW;
1380 return QUOTA_NL_NOWARN;
1383 static int dquot_active(const struct inode *inode)
1385 struct super_block *sb = inode->i_sb;
1387 if (IS_NOQUOTA(inode))
1389 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1393 * Initialize quota pointers in inode
1395 * We do things in a bit complicated way but by that we avoid calling
1396 * dqget() and thus filesystem callbacks under dqptr_sem.
1398 * It is better to call this function outside of any transaction as it
1399 * might need a lot of space in journal for dquot structure allocation.
1401 static void __dquot_initialize(struct inode *inode, int type)
1404 struct dquot *got[MAXQUOTAS];
1405 struct super_block *sb = inode->i_sb;
1408 /* First test before acquiring mutex - solves deadlocks when we
1409 * re-enter the quota code and are already holding the mutex */
1410 if (!dquot_active(inode))
1413 /* First get references to structures we might need. */
1414 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1417 if (type != -1 && cnt != type)
1421 qid = make_kqid_uid(inode->i_uid);
1424 qid = make_kqid_gid(inode->i_gid);
1427 got[cnt] = dqget(sb, qid);
1430 down_write(&sb_dqopt(sb)->dqptr_sem);
1431 if (IS_NOQUOTA(inode))
1433 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1434 if (type != -1 && cnt != type)
1436 /* Avoid races with quotaoff() */
1437 if (!sb_has_quota_active(sb, cnt))
1439 /* We could race with quotaon or dqget() could have failed */
1442 if (!inode->i_dquot[cnt]) {
1443 inode->i_dquot[cnt] = got[cnt];
1446 * Make quota reservation system happy if someone
1447 * did a write before quota was turned on
1449 rsv = inode_get_rsv_space(inode);
1450 if (unlikely(rsv)) {
1451 spin_lock(&dq_data_lock);
1452 dquot_resv_space(inode->i_dquot[cnt], rsv);
1453 spin_unlock(&dq_data_lock);
1458 up_write(&sb_dqopt(sb)->dqptr_sem);
1459 /* Drop unused references */
1463 void dquot_initialize(struct inode *inode)
1465 __dquot_initialize(inode, -1);
1467 EXPORT_SYMBOL(dquot_initialize);
1470 * Release all quotas referenced by inode
1472 static void __dquot_drop(struct inode *inode)
1475 struct dquot *put[MAXQUOTAS];
1477 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1478 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1479 put[cnt] = inode->i_dquot[cnt];
1480 inode->i_dquot[cnt] = NULL;
1482 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1486 void dquot_drop(struct inode *inode)
1490 if (IS_NOQUOTA(inode))
1494 * Test before calling to rule out calls from proc and such
1495 * where we are not allowed to block. Note that this is
1496 * actually reliable test even without the lock - the caller
1497 * must assure that nobody can come after the DQUOT_DROP and
1498 * add quota pointers back anyway.
1500 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1501 if (inode->i_dquot[cnt])
1505 if (cnt < MAXQUOTAS)
1506 __dquot_drop(inode);
1508 EXPORT_SYMBOL(dquot_drop);
1511 * inode_reserved_space is managed internally by quota, and protected by
1512 * i_lock similar to i_blocks+i_bytes.
1514 static qsize_t *inode_reserved_space(struct inode * inode)
1516 /* Filesystem must explicitly define it's own method in order to use
1517 * quota reservation interface */
1518 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1519 return inode->i_sb->dq_op->get_reserved_space(inode);
1522 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1524 spin_lock(&inode->i_lock);
1525 *inode_reserved_space(inode) += number;
1526 spin_unlock(&inode->i_lock);
1528 EXPORT_SYMBOL(inode_add_rsv_space);
1530 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1532 spin_lock(&inode->i_lock);
1533 *inode_reserved_space(inode) -= number;
1534 __inode_add_bytes(inode, number);
1535 spin_unlock(&inode->i_lock);
1537 EXPORT_SYMBOL(inode_claim_rsv_space);
1539 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1541 spin_lock(&inode->i_lock);
1542 *inode_reserved_space(inode) -= number;
1543 spin_unlock(&inode->i_lock);
1545 EXPORT_SYMBOL(inode_sub_rsv_space);
1547 static qsize_t inode_get_rsv_space(struct inode *inode)
1551 if (!inode->i_sb->dq_op->get_reserved_space)
1553 spin_lock(&inode->i_lock);
1554 ret = *inode_reserved_space(inode);
1555 spin_unlock(&inode->i_lock);
1559 static void inode_incr_space(struct inode *inode, qsize_t number,
1563 inode_add_rsv_space(inode, number);
1565 inode_add_bytes(inode, number);
1568 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1571 inode_sub_rsv_space(inode, number);
1573 inode_sub_bytes(inode, number);
1577 * This functions updates i_blocks+i_bytes fields and quota information
1578 * (together with appropriate checks).
1580 * NOTE: We absolutely rely on the fact that caller dirties the inode
1581 * (usually helpers in quotaops.h care about this) and holds a handle for
1582 * the current transaction so that dquot write and inode write go into the
1587 * This operation can block, but only after everything is updated
1589 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1592 struct dquot_warn warn[MAXQUOTAS];
1593 struct dquot **dquots = inode->i_dquot;
1594 int reserve = flags & DQUOT_SPACE_RESERVE;
1597 * First test before acquiring mutex - solves deadlocks when we
1598 * re-enter the quota code and are already holding the mutex
1600 if (!dquot_active(inode)) {
1601 inode_incr_space(inode, number, reserve);
1605 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1606 warn[cnt].w_type = QUOTA_NL_NOWARN;
1608 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1609 spin_lock(&dq_data_lock);
1610 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1613 ret = check_bdq(dquots[cnt], number,
1614 !(flags & DQUOT_SPACE_WARN), &warn[cnt]);
1615 if (ret && !(flags & DQUOT_SPACE_NOFAIL)) {
1616 spin_unlock(&dq_data_lock);
1617 goto out_flush_warn;
1620 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1624 dquot_resv_space(dquots[cnt], number);
1626 dquot_incr_space(dquots[cnt], number);
1628 inode_incr_space(inode, number, reserve);
1629 spin_unlock(&dq_data_lock);
1632 goto out_flush_warn;
1633 mark_all_dquot_dirty(dquots);
1635 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1636 flush_warnings(warn);
1640 EXPORT_SYMBOL(__dquot_alloc_space);
1643 * This operation can block, but only after everything is updated
1645 int dquot_alloc_inode(const struct inode *inode)
1648 struct dquot_warn warn[MAXQUOTAS];
1649 struct dquot * const *dquots = inode->i_dquot;
1651 /* First test before acquiring mutex - solves deadlocks when we
1652 * re-enter the quota code and are already holding the mutex */
1653 if (!dquot_active(inode))
1655 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1656 warn[cnt].w_type = QUOTA_NL_NOWARN;
1657 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1658 spin_lock(&dq_data_lock);
1659 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1662 ret = check_idq(dquots[cnt], 1, &warn[cnt]);
1667 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1670 dquot_incr_inodes(dquots[cnt], 1);
1674 spin_unlock(&dq_data_lock);
1676 mark_all_dquot_dirty(dquots);
1677 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1678 flush_warnings(warn);
1681 EXPORT_SYMBOL(dquot_alloc_inode);
1684 * Convert in-memory reserved quotas to real consumed quotas
1686 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1690 if (!dquot_active(inode)) {
1691 inode_claim_rsv_space(inode, number);
1695 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1696 spin_lock(&dq_data_lock);
1697 /* Claim reserved quotas to allocated quotas */
1698 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1699 if (inode->i_dquot[cnt])
1700 dquot_claim_reserved_space(inode->i_dquot[cnt],
1703 /* Update inode bytes */
1704 inode_claim_rsv_space(inode, number);
1705 spin_unlock(&dq_data_lock);
1706 mark_all_dquot_dirty(inode->i_dquot);
1707 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1710 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1713 * This operation can block, but only after everything is updated
1715 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1718 struct dquot_warn warn[MAXQUOTAS];
1719 struct dquot **dquots = inode->i_dquot;
1720 int reserve = flags & DQUOT_SPACE_RESERVE;
1722 /* First test before acquiring mutex - solves deadlocks when we
1723 * re-enter the quota code and are already holding the mutex */
1724 if (!dquot_active(inode)) {
1725 inode_decr_space(inode, number, reserve);
1729 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1730 spin_lock(&dq_data_lock);
1731 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1734 warn[cnt].w_type = QUOTA_NL_NOWARN;
1737 wtype = info_bdq_free(dquots[cnt], number);
1738 if (wtype != QUOTA_NL_NOWARN)
1739 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1741 dquot_free_reserved_space(dquots[cnt], number);
1743 dquot_decr_space(dquots[cnt], number);
1745 inode_decr_space(inode, number, reserve);
1746 spin_unlock(&dq_data_lock);
1750 mark_all_dquot_dirty(dquots);
1752 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1753 flush_warnings(warn);
1755 EXPORT_SYMBOL(__dquot_free_space);
1758 * This operation can block, but only after everything is updated
1760 void dquot_free_inode(const struct inode *inode)
1763 struct dquot_warn warn[MAXQUOTAS];
1764 struct dquot * const *dquots = inode->i_dquot;
1766 /* First test before acquiring mutex - solves deadlocks when we
1767 * re-enter the quota code and are already holding the mutex */
1768 if (!dquot_active(inode))
1771 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1772 spin_lock(&dq_data_lock);
1773 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1776 warn[cnt].w_type = QUOTA_NL_NOWARN;
1779 wtype = info_idq_free(dquots[cnt], 1);
1780 if (wtype != QUOTA_NL_NOWARN)
1781 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1782 dquot_decr_inodes(dquots[cnt], 1);
1784 spin_unlock(&dq_data_lock);
1785 mark_all_dquot_dirty(dquots);
1786 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1787 flush_warnings(warn);
1789 EXPORT_SYMBOL(dquot_free_inode);
1792 * Transfer the number of inode and blocks from one diskquota to an other.
1793 * On success, dquot references in transfer_to are consumed and references
1794 * to original dquots that need to be released are placed there. On failure,
1795 * references are kept untouched.
1797 * This operation can block, but only after everything is updated
1798 * A transaction must be started when entering this function.
1801 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1803 qsize_t space, cur_space;
1804 qsize_t rsv_space = 0;
1805 struct dquot *transfer_from[MAXQUOTAS] = {};
1807 char is_valid[MAXQUOTAS] = {};
1808 struct dquot_warn warn_to[MAXQUOTAS];
1809 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1810 struct dquot_warn warn_from_space[MAXQUOTAS];
1812 /* First test before acquiring mutex - solves deadlocks when we
1813 * re-enter the quota code and are already holding the mutex */
1814 if (IS_NOQUOTA(inode))
1816 /* Initialize the arrays */
1817 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1818 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1819 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1820 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1822 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1823 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1824 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1827 spin_lock(&dq_data_lock);
1828 cur_space = inode_get_bytes(inode);
1829 rsv_space = inode_get_rsv_space(inode);
1830 space = cur_space + rsv_space;
1831 /* Build the transfer_from list and check the limits */
1832 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1834 * Skip changes for same uid or gid or for turned off quota-type.
1836 if (!transfer_to[cnt])
1838 /* Avoid races with quotaoff() */
1839 if (!sb_has_quota_active(inode->i_sb, cnt))
1842 transfer_from[cnt] = inode->i_dquot[cnt];
1843 ret = check_idq(transfer_to[cnt], 1, &warn_to[cnt]);
1846 ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]);
1852 * Finally perform the needed transfer from transfer_from to transfer_to
1854 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1857 /* Due to IO error we might not have transfer_from[] structure */
1858 if (transfer_from[cnt]) {
1860 wtype = info_idq_free(transfer_from[cnt], 1);
1861 if (wtype != QUOTA_NL_NOWARN)
1862 prepare_warning(&warn_from_inodes[cnt],
1863 transfer_from[cnt], wtype);
1864 wtype = info_bdq_free(transfer_from[cnt], space);
1865 if (wtype != QUOTA_NL_NOWARN)
1866 prepare_warning(&warn_from_space[cnt],
1867 transfer_from[cnt], wtype);
1868 dquot_decr_inodes(transfer_from[cnt], 1);
1869 dquot_decr_space(transfer_from[cnt], cur_space);
1870 dquot_free_reserved_space(transfer_from[cnt],
1874 dquot_incr_inodes(transfer_to[cnt], 1);
1875 dquot_incr_space(transfer_to[cnt], cur_space);
1876 dquot_resv_space(transfer_to[cnt], rsv_space);
1878 inode->i_dquot[cnt] = transfer_to[cnt];
1880 spin_unlock(&dq_data_lock);
1881 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1883 mark_all_dquot_dirty(transfer_from);
1884 mark_all_dquot_dirty(transfer_to);
1885 flush_warnings(warn_to);
1886 flush_warnings(warn_from_inodes);
1887 flush_warnings(warn_from_space);
1888 /* Pass back references to put */
1889 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1891 transfer_to[cnt] = transfer_from[cnt];
1894 spin_unlock(&dq_data_lock);
1895 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1896 flush_warnings(warn_to);
1899 EXPORT_SYMBOL(__dquot_transfer);
1901 /* Wrapper for transferring ownership of an inode for uid/gid only
1902 * Called from FSXXX_setattr()
1904 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1906 struct dquot *transfer_to[MAXQUOTAS] = {};
1907 struct super_block *sb = inode->i_sb;
1910 if (!dquot_active(inode))
1913 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
1914 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
1915 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
1916 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
1918 ret = __dquot_transfer(inode, transfer_to);
1919 dqput_all(transfer_to);
1922 EXPORT_SYMBOL(dquot_transfer);
1925 * Write info of quota file to disk
1927 int dquot_commit_info(struct super_block *sb, int type)
1930 struct quota_info *dqopt = sb_dqopt(sb);
1932 mutex_lock(&dqopt->dqio_mutex);
1933 ret = dqopt->ops[type]->write_file_info(sb, type);
1934 mutex_unlock(&dqopt->dqio_mutex);
1937 EXPORT_SYMBOL(dquot_commit_info);
1940 * Definitions of diskquota operations.
1942 const struct dquot_operations dquot_operations = {
1943 .write_dquot = dquot_commit,
1944 .acquire_dquot = dquot_acquire,
1945 .release_dquot = dquot_release,
1946 .mark_dirty = dquot_mark_dquot_dirty,
1947 .write_info = dquot_commit_info,
1948 .alloc_dquot = dquot_alloc,
1949 .destroy_dquot = dquot_destroy,
1951 EXPORT_SYMBOL(dquot_operations);
1954 * Generic helper for ->open on filesystems supporting disk quotas.
1956 int dquot_file_open(struct inode *inode, struct file *file)
1960 error = generic_file_open(inode, file);
1961 if (!error && (file->f_mode & FMODE_WRITE))
1962 dquot_initialize(inode);
1965 EXPORT_SYMBOL(dquot_file_open);
1968 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1970 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
1973 struct quota_info *dqopt = sb_dqopt(sb);
1974 struct inode *toputinode[MAXQUOTAS];
1976 /* Cannot turn off usage accounting without turning off limits, or
1977 * suspend quotas and simultaneously turn quotas off. */
1978 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
1979 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
1980 DQUOT_USAGE_ENABLED)))
1983 /* We need to serialize quota_off() for device */
1984 mutex_lock(&dqopt->dqonoff_mutex);
1987 * Skip everything if there's nothing to do. We have to do this because
1988 * sometimes we are called when fill_super() failed and calling
1989 * sync_fs() in such cases does no good.
1991 if (!sb_any_quota_loaded(sb)) {
1992 mutex_unlock(&dqopt->dqonoff_mutex);
1995 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1996 toputinode[cnt] = NULL;
1997 if (type != -1 && cnt != type)
1999 if (!sb_has_quota_loaded(sb, cnt))
2002 if (flags & DQUOT_SUSPENDED) {
2003 spin_lock(&dq_state_lock);
2005 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2006 spin_unlock(&dq_state_lock);
2008 spin_lock(&dq_state_lock);
2009 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2010 /* Turning off suspended quotas? */
2011 if (!sb_has_quota_loaded(sb, cnt) &&
2012 sb_has_quota_suspended(sb, cnt)) {
2013 dqopt->flags &= ~dquot_state_flag(
2014 DQUOT_SUSPENDED, cnt);
2015 spin_unlock(&dq_state_lock);
2016 iput(dqopt->files[cnt]);
2017 dqopt->files[cnt] = NULL;
2020 spin_unlock(&dq_state_lock);
2023 /* We still have to keep quota loaded? */
2024 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2027 /* Note: these are blocking operations */
2028 drop_dquot_ref(sb, cnt);
2029 invalidate_dquots(sb, cnt);
2031 * Now all dquots should be invalidated, all writes done so we
2032 * should be only users of the info. No locks needed.
2034 if (info_dirty(&dqopt->info[cnt]))
2035 sb->dq_op->write_info(sb, cnt);
2036 if (dqopt->ops[cnt]->free_file_info)
2037 dqopt->ops[cnt]->free_file_info(sb, cnt);
2038 put_quota_format(dqopt->info[cnt].dqi_format);
2040 toputinode[cnt] = dqopt->files[cnt];
2041 if (!sb_has_quota_loaded(sb, cnt))
2042 dqopt->files[cnt] = NULL;
2043 dqopt->info[cnt].dqi_flags = 0;
2044 dqopt->info[cnt].dqi_igrace = 0;
2045 dqopt->info[cnt].dqi_bgrace = 0;
2046 dqopt->ops[cnt] = NULL;
2048 mutex_unlock(&dqopt->dqonoff_mutex);
2050 /* Skip syncing and setting flags if quota files are hidden */
2051 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2054 /* Sync the superblock so that buffers with quota data are written to
2055 * disk (and so userspace sees correct data afterwards). */
2056 if (sb->s_op->sync_fs)
2057 sb->s_op->sync_fs(sb, 1);
2058 sync_blockdev(sb->s_bdev);
2059 /* Now the quota files are just ordinary files and we can set the
2060 * inode flags back. Moreover we discard the pagecache so that
2061 * userspace sees the writes we did bypassing the pagecache. We
2062 * must also discard the blockdev buffers so that we see the
2063 * changes done by userspace on the next quotaon() */
2064 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2065 if (toputinode[cnt]) {
2066 mutex_lock(&dqopt->dqonoff_mutex);
2067 /* If quota was reenabled in the meantime, we have
2069 if (!sb_has_quota_loaded(sb, cnt)) {
2070 mutex_lock(&toputinode[cnt]->i_mutex);
2071 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
2072 S_NOATIME | S_NOQUOTA);
2073 truncate_inode_pages(&toputinode[cnt]->i_data,
2075 mutex_unlock(&toputinode[cnt]->i_mutex);
2076 mark_inode_dirty_sync(toputinode[cnt]);
2078 mutex_unlock(&dqopt->dqonoff_mutex);
2081 invalidate_bdev(sb->s_bdev);
2083 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2084 if (toputinode[cnt]) {
2085 /* On remount RO, we keep the inode pointer so that we
2086 * can reenable quota on the subsequent remount RW. We
2087 * have to check 'flags' variable and not use sb_has_
2088 * function because another quotaon / quotaoff could
2089 * change global state before we got here. We refuse
2090 * to suspend quotas when there is pending delete on
2091 * the quota file... */
2092 if (!(flags & DQUOT_SUSPENDED))
2093 iput(toputinode[cnt]);
2094 else if (!toputinode[cnt]->i_nlink)
2099 EXPORT_SYMBOL(dquot_disable);
2101 int dquot_quota_off(struct super_block *sb, int type)
2103 return dquot_disable(sb, type,
2104 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2106 EXPORT_SYMBOL(dquot_quota_off);
2109 * Turn quotas on on a device
2113 * Helper function to turn quotas on when we already have the inode of
2114 * quota file and no quota information is loaded.
2116 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2119 struct quota_format_type *fmt = find_quota_format(format_id);
2120 struct super_block *sb = inode->i_sb;
2121 struct quota_info *dqopt = sb_dqopt(sb);
2127 if (!S_ISREG(inode->i_mode)) {
2131 if (IS_RDONLY(inode)) {
2135 if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
2139 /* Usage always has to be set... */
2140 if (!(flags & DQUOT_USAGE_ENABLED)) {
2145 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2146 /* As we bypass the pagecache we must now flush all the
2147 * dirty data and invalidate caches so that kernel sees
2148 * changes from userspace. It is not enough to just flush
2149 * the quota file since if blocksize < pagesize, invalidation
2150 * of the cache could fail because of other unrelated dirty
2152 sync_filesystem(sb);
2153 invalidate_bdev(sb->s_bdev);
2155 mutex_lock(&dqopt->dqonoff_mutex);
2156 if (sb_has_quota_loaded(sb, type)) {
2161 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2162 /* We don't want quota and atime on quota files (deadlocks
2163 * possible) Also nobody should write to the file - we use
2164 * special IO operations which ignore the immutable bit. */
2165 mutex_lock(&inode->i_mutex);
2166 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2168 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2169 mutex_unlock(&inode->i_mutex);
2171 * When S_NOQUOTA is set, remove dquot references as no more
2172 * references can be added
2174 __dquot_drop(inode);
2178 dqopt->files[type] = igrab(inode);
2179 if (!dqopt->files[type])
2182 if (!fmt->qf_ops->check_quota_file(sb, type))
2185 dqopt->ops[type] = fmt->qf_ops;
2186 dqopt->info[type].dqi_format = fmt;
2187 dqopt->info[type].dqi_fmt_id = format_id;
2188 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2189 mutex_lock(&dqopt->dqio_mutex);
2190 error = dqopt->ops[type]->read_file_info(sb, type);
2192 mutex_unlock(&dqopt->dqio_mutex);
2195 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2196 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2197 mutex_unlock(&dqopt->dqio_mutex);
2198 spin_lock(&dq_state_lock);
2199 dqopt->flags |= dquot_state_flag(flags, type);
2200 spin_unlock(&dq_state_lock);
2202 add_dquot_ref(sb, type);
2203 mutex_unlock(&dqopt->dqonoff_mutex);
2208 dqopt->files[type] = NULL;
2211 if (oldflags != -1) {
2212 mutex_lock(&inode->i_mutex);
2213 /* Set the flags back (in the case of accidental quotaon()
2214 * on a wrong file we don't want to mess up the flags) */
2215 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2216 inode->i_flags |= oldflags;
2217 mutex_unlock(&inode->i_mutex);
2219 mutex_unlock(&dqopt->dqonoff_mutex);
2221 put_quota_format(fmt);
2226 /* Reenable quotas on remount RW */
2227 int dquot_resume(struct super_block *sb, int type)
2229 struct quota_info *dqopt = sb_dqopt(sb);
2230 struct inode *inode;
2234 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2235 if (type != -1 && cnt != type)
2238 mutex_lock(&dqopt->dqonoff_mutex);
2239 if (!sb_has_quota_suspended(sb, cnt)) {
2240 mutex_unlock(&dqopt->dqonoff_mutex);
2243 inode = dqopt->files[cnt];
2244 dqopt->files[cnt] = NULL;
2245 spin_lock(&dq_state_lock);
2246 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2247 DQUOT_LIMITS_ENABLED,
2249 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2250 spin_unlock(&dq_state_lock);
2251 mutex_unlock(&dqopt->dqonoff_mutex);
2253 flags = dquot_generic_flag(flags, cnt);
2254 ret = vfs_load_quota_inode(inode, cnt,
2255 dqopt->info[cnt].dqi_fmt_id, flags);
2261 EXPORT_SYMBOL(dquot_resume);
2263 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2266 int error = security_quota_on(path->dentry);
2269 /* Quota file not on the same filesystem? */
2270 if (path->dentry->d_sb != sb)
2273 error = vfs_load_quota_inode(path->dentry->d_inode, type,
2274 format_id, DQUOT_USAGE_ENABLED |
2275 DQUOT_LIMITS_ENABLED);
2278 EXPORT_SYMBOL(dquot_quota_on);
2281 * More powerful function for turning on quotas allowing setting
2282 * of individual quota flags
2284 int dquot_enable(struct inode *inode, int type, int format_id,
2288 struct super_block *sb = inode->i_sb;
2289 struct quota_info *dqopt = sb_dqopt(sb);
2291 /* Just unsuspend quotas? */
2292 BUG_ON(flags & DQUOT_SUSPENDED);
2296 /* Just updating flags needed? */
2297 if (sb_has_quota_loaded(sb, type)) {
2298 mutex_lock(&dqopt->dqonoff_mutex);
2299 /* Now do a reliable test... */
2300 if (!sb_has_quota_loaded(sb, type)) {
2301 mutex_unlock(&dqopt->dqonoff_mutex);
2304 if (flags & DQUOT_USAGE_ENABLED &&
2305 sb_has_quota_usage_enabled(sb, type)) {
2309 if (flags & DQUOT_LIMITS_ENABLED &&
2310 sb_has_quota_limits_enabled(sb, type)) {
2314 spin_lock(&dq_state_lock);
2315 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2316 spin_unlock(&dq_state_lock);
2318 mutex_unlock(&dqopt->dqonoff_mutex);
2323 return vfs_load_quota_inode(inode, type, format_id, flags);
2325 EXPORT_SYMBOL(dquot_enable);
2328 * This function is used when filesystem needs to initialize quotas
2329 * during mount time.
2331 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2332 int format_id, int type)
2334 struct dentry *dentry;
2337 mutex_lock(&sb->s_root->d_inode->i_mutex);
2338 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2339 mutex_unlock(&sb->s_root->d_inode->i_mutex);
2341 return PTR_ERR(dentry);
2343 if (!dentry->d_inode) {
2348 error = security_quota_on(dentry);
2350 error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2351 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2357 EXPORT_SYMBOL(dquot_quota_on_mount);
2359 static inline qsize_t qbtos(qsize_t blocks)
2361 return blocks << QIF_DQBLKSIZE_BITS;
2364 static inline qsize_t stoqb(qsize_t space)
2366 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
2369 /* Generic routine for getting common part of quota structure */
2370 static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2372 struct mem_dqblk *dm = &dquot->dq_dqb;
2374 memset(di, 0, sizeof(*di));
2375 di->d_version = FS_DQUOT_VERSION;
2376 di->d_flags = dquot->dq_id.type == USRQUOTA ?
2377 FS_USER_QUOTA : FS_GROUP_QUOTA;
2378 di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
2380 spin_lock(&dq_data_lock);
2381 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2382 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2383 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2384 di->d_ino_softlimit = dm->dqb_isoftlimit;
2385 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2386 di->d_icount = dm->dqb_curinodes;
2387 di->d_btimer = dm->dqb_btime;
2388 di->d_itimer = dm->dqb_itime;
2389 spin_unlock(&dq_data_lock);
2392 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2393 struct fs_disk_quota *di)
2395 struct dquot *dquot;
2397 dquot = dqget(sb, qid);
2400 do_get_dqblk(dquot, di);
2405 EXPORT_SYMBOL(dquot_get_dqblk);
2407 #define VFS_FS_DQ_MASK \
2408 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2409 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2410 FS_DQ_BTIMER | FS_DQ_ITIMER)
2412 /* Generic routine for setting common part of quota structure */
2413 static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2415 struct mem_dqblk *dm = &dquot->dq_dqb;
2416 int check_blim = 0, check_ilim = 0;
2417 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2419 if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2422 if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2423 (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2424 ((di->d_fieldmask & FS_DQ_BHARD) &&
2425 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2426 ((di->d_fieldmask & FS_DQ_ISOFT) &&
2427 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2428 ((di->d_fieldmask & FS_DQ_IHARD) &&
2429 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2432 spin_lock(&dq_data_lock);
2433 if (di->d_fieldmask & FS_DQ_BCOUNT) {
2434 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2436 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2439 if (di->d_fieldmask & FS_DQ_BSOFT)
2440 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2441 if (di->d_fieldmask & FS_DQ_BHARD)
2442 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2443 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2445 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2448 if (di->d_fieldmask & FS_DQ_ICOUNT) {
2449 dm->dqb_curinodes = di->d_icount;
2451 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2454 if (di->d_fieldmask & FS_DQ_ISOFT)
2455 dm->dqb_isoftlimit = di->d_ino_softlimit;
2456 if (di->d_fieldmask & FS_DQ_IHARD)
2457 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2458 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2460 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2463 if (di->d_fieldmask & FS_DQ_BTIMER) {
2464 dm->dqb_btime = di->d_btimer;
2466 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2469 if (di->d_fieldmask & FS_DQ_ITIMER) {
2470 dm->dqb_itime = di->d_itimer;
2472 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2476 if (!dm->dqb_bsoftlimit ||
2477 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2479 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2480 } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
2481 /* Set grace only if user hasn't provided his own... */
2482 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2485 if (!dm->dqb_isoftlimit ||
2486 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2488 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2489 } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
2490 /* Set grace only if user hasn't provided his own... */
2491 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2493 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2495 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2497 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2498 spin_unlock(&dq_data_lock);
2499 mark_dquot_dirty(dquot);
2504 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2505 struct fs_disk_quota *di)
2507 struct dquot *dquot;
2510 dquot = dqget(sb, qid);
2515 rc = do_set_dqblk(dquot, di);
2520 EXPORT_SYMBOL(dquot_set_dqblk);
2522 /* Generic routine for getting common part of quota file information */
2523 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2525 struct mem_dqinfo *mi;
2527 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2528 if (!sb_has_quota_active(sb, type)) {
2529 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2532 mi = sb_dqopt(sb)->info + type;
2533 spin_lock(&dq_data_lock);
2534 ii->dqi_bgrace = mi->dqi_bgrace;
2535 ii->dqi_igrace = mi->dqi_igrace;
2536 ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
2537 ii->dqi_valid = IIF_ALL;
2538 spin_unlock(&dq_data_lock);
2539 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2542 EXPORT_SYMBOL(dquot_get_dqinfo);
2544 /* Generic routine for setting common part of quota file information */
2545 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2547 struct mem_dqinfo *mi;
2550 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2551 if (!sb_has_quota_active(sb, type)) {
2555 mi = sb_dqopt(sb)->info + type;
2556 spin_lock(&dq_data_lock);
2557 if (ii->dqi_valid & IIF_BGRACE)
2558 mi->dqi_bgrace = ii->dqi_bgrace;
2559 if (ii->dqi_valid & IIF_IGRACE)
2560 mi->dqi_igrace = ii->dqi_igrace;
2561 if (ii->dqi_valid & IIF_FLAGS)
2562 mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
2563 (ii->dqi_flags & DQF_SETINFO_MASK);
2564 spin_unlock(&dq_data_lock);
2565 mark_info_dirty(sb, type);
2566 /* Force write to disk */
2567 sb->dq_op->write_info(sb, type);
2569 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2572 EXPORT_SYMBOL(dquot_set_dqinfo);
2574 const struct quotactl_ops dquot_quotactl_ops = {
2575 .quota_on = dquot_quota_on,
2576 .quota_off = dquot_quota_off,
2577 .quota_sync = dquot_quota_sync,
2578 .get_info = dquot_get_dqinfo,
2579 .set_info = dquot_set_dqinfo,
2580 .get_dqblk = dquot_get_dqblk,
2581 .set_dqblk = dquot_set_dqblk
2583 EXPORT_SYMBOL(dquot_quotactl_ops);
2585 static int do_proc_dqstats(struct ctl_table *table, int write,
2586 void __user *buffer, size_t *lenp, loff_t *ppos)
2588 unsigned int type = (int *)table->data - dqstats.stat;
2590 /* Update global table */
2591 dqstats.stat[type] =
2592 percpu_counter_sum_positive(&dqstats.counter[type]);
2593 return proc_dointvec(table, write, buffer, lenp, ppos);
2596 static ctl_table fs_dqstats_table[] = {
2598 .procname = "lookups",
2599 .data = &dqstats.stat[DQST_LOOKUPS],
2600 .maxlen = sizeof(int),
2602 .proc_handler = do_proc_dqstats,
2605 .procname = "drops",
2606 .data = &dqstats.stat[DQST_DROPS],
2607 .maxlen = sizeof(int),
2609 .proc_handler = do_proc_dqstats,
2612 .procname = "reads",
2613 .data = &dqstats.stat[DQST_READS],
2614 .maxlen = sizeof(int),
2616 .proc_handler = do_proc_dqstats,
2619 .procname = "writes",
2620 .data = &dqstats.stat[DQST_WRITES],
2621 .maxlen = sizeof(int),
2623 .proc_handler = do_proc_dqstats,
2626 .procname = "cache_hits",
2627 .data = &dqstats.stat[DQST_CACHE_HITS],
2628 .maxlen = sizeof(int),
2630 .proc_handler = do_proc_dqstats,
2633 .procname = "allocated_dquots",
2634 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2635 .maxlen = sizeof(int),
2637 .proc_handler = do_proc_dqstats,
2640 .procname = "free_dquots",
2641 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2642 .maxlen = sizeof(int),
2644 .proc_handler = do_proc_dqstats,
2647 .procname = "syncs",
2648 .data = &dqstats.stat[DQST_SYNCS],
2649 .maxlen = sizeof(int),
2651 .proc_handler = do_proc_dqstats,
2653 #ifdef CONFIG_PRINT_QUOTA_WARNING
2655 .procname = "warnings",
2656 .data = &flag_print_warnings,
2657 .maxlen = sizeof(int),
2659 .proc_handler = proc_dointvec,
2665 static ctl_table fs_table[] = {
2667 .procname = "quota",
2669 .child = fs_dqstats_table,
2674 static ctl_table sys_table[] = {
2683 static int __init dquot_init(void)
2686 unsigned long nr_hash, order;
2688 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2690 register_sysctl_table(sys_table);
2692 dquot_cachep = kmem_cache_create("dquot",
2693 sizeof(struct dquot), sizeof(unsigned long) * 4,
2694 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2695 SLAB_MEM_SPREAD|SLAB_PANIC),
2699 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2701 panic("Cannot create dquot hash table");
2703 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2704 ret = percpu_counter_init(&dqstats.counter[i], 0);
2706 panic("Cannot create dquot stat counters");
2709 /* Find power-of-two hlist_heads which can fit into allocation */
2710 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2714 } while (nr_hash >> dq_hash_bits);
2717 nr_hash = 1UL << dq_hash_bits;
2718 dq_hash_mask = nr_hash - 1;
2719 for (i = 0; i < nr_hash; i++)
2720 INIT_HLIST_HEAD(dquot_hash + i);
2722 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2723 nr_hash, order, (PAGE_SIZE << order));
2725 register_shrinker(&dqcache_shrinker);
2729 module_init(dquot_init);