2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
48 * Jan Kara, <jack@suse.cz>, 10/2002
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
59 #include <linux/mount.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/sched.h>
75 #include <linux/kmod.h>
76 #include <linux/namei.h>
77 #include <linux/capability.h>
78 #include <linux/quotaops.h>
79 #include "../internal.h" /* ugh */
81 #include <linux/uaccess.h>
84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
89 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
90 * modifications of quota state (on quotaon and quotaoff) and readers who care
91 * about latest values take it as well.
93 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
94 * dq_list_lock > dq_state_lock
96 * Note that some things (eg. sb pointer, type, id) doesn't change during
97 * the life of the dquot structure and so needn't to be protected by a lock
99 * Any operation working on dquots via inode pointers must hold dqptr_sem. If
100 * operation is just reading pointers from inode (or not using them at all) the
101 * read lock is enough. If pointers are altered function must hold write lock.
102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
103 * inode is a quota file). Functions adding pointers from inode to dquots have
104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
105 * have to do all pointer modifications before dropping dqptr_sem. This makes
106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
107 * then drops all pointers to dquots from an inode.
109 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
110 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
111 * Currently dquot is locked only when it is being read to memory (or space for
112 * it is being allocated) on the first dqget() and when it is being released on
113 * the last dqput(). The allocation and release oparations are serialized by
114 * the dq_lock and by checking the use count in dquot_release(). Write
115 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
116 * spinlock to internal buffers before writing.
118 * Lock ordering (including related VFS locks) is the following:
119 * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
121 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
122 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
123 * dqptr_sem. But filesystem has to count with the fact that functions such as
124 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
125 * from inside a transaction to keep filesystem consistency after a crash. Also
126 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
127 * called with dqptr_sem held.
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
131 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
132 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133 EXPORT_SYMBOL(dq_data_lock);
135 void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...)
138 if (printk_ratelimit()) {
140 struct va_format vaf;
147 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
148 sb->s_id, func, &vaf);
153 EXPORT_SYMBOL(__quota_error);
155 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
156 static char *quotatypes[] = INITQFNAMES;
158 static struct quota_format_type *quota_formats; /* List of registered formats */
159 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
161 /* SLAB cache for dquot structures */
162 static struct kmem_cache *dquot_cachep;
164 int register_quota_format(struct quota_format_type *fmt)
166 spin_lock(&dq_list_lock);
167 fmt->qf_next = quota_formats;
169 spin_unlock(&dq_list_lock);
172 EXPORT_SYMBOL(register_quota_format);
174 void unregister_quota_format(struct quota_format_type *fmt)
176 struct quota_format_type **actqf;
178 spin_lock(&dq_list_lock);
179 for (actqf = "a_formats; *actqf && *actqf != fmt;
180 actqf = &(*actqf)->qf_next)
183 *actqf = (*actqf)->qf_next;
184 spin_unlock(&dq_list_lock);
186 EXPORT_SYMBOL(unregister_quota_format);
188 static struct quota_format_type *find_quota_format(int id)
190 struct quota_format_type *actqf;
192 spin_lock(&dq_list_lock);
193 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
194 actqf = actqf->qf_next)
196 if (!actqf || !try_module_get(actqf->qf_owner)) {
199 spin_unlock(&dq_list_lock);
201 for (qm = 0; module_names[qm].qm_fmt_id &&
202 module_names[qm].qm_fmt_id != id; qm++)
204 if (!module_names[qm].qm_fmt_id ||
205 request_module(module_names[qm].qm_mod_name))
208 spin_lock(&dq_list_lock);
209 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
210 actqf = actqf->qf_next)
212 if (actqf && !try_module_get(actqf->qf_owner))
215 spin_unlock(&dq_list_lock);
219 static void put_quota_format(struct quota_format_type *fmt)
221 module_put(fmt->qf_owner);
225 * Dquot List Management:
226 * The quota code uses three lists for dquot management: the inuse_list,
227 * free_dquots, and dquot_hash[] array. A single dquot structure may be
228 * on all three lists, depending on its current state.
230 * All dquots are placed to the end of inuse_list when first created, and this
231 * list is used for invalidate operation, which must look at every dquot.
233 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
234 * and this list is searched whenever we need an available dquot. Dquots are
235 * removed from the list as soon as they are used again, and
236 * dqstats.free_dquots gives the number of dquots on the list. When
237 * dquot is invalidated it's completely released from memory.
239 * Dquots with a specific identity (device, type and id) are placed on
240 * one of the dquot_hash[] hash chains. The provides an efficient search
241 * mechanism to locate a specific dquot.
244 static LIST_HEAD(inuse_list);
245 static LIST_HEAD(free_dquots);
246 static unsigned int dq_hash_bits, dq_hash_mask;
247 static struct hlist_head *dquot_hash;
249 struct dqstats dqstats;
250 EXPORT_SYMBOL(dqstats);
252 static qsize_t inode_get_rsv_space(struct inode *inode);
253 static void __dquot_initialize(struct inode *inode, int type);
255 static inline unsigned int
256 hashfn(const struct super_block *sb, struct kqid qid)
258 unsigned int id = from_kqid(&init_user_ns, qid);
262 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
263 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
267 * Following list functions expect dq_list_lock to be held
269 static inline void insert_dquot_hash(struct dquot *dquot)
271 struct hlist_head *head;
272 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
273 hlist_add_head(&dquot->dq_hash, head);
276 static inline void remove_dquot_hash(struct dquot *dquot)
278 hlist_del_init(&dquot->dq_hash);
281 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
284 struct hlist_node *node;
287 hlist_for_each (node, dquot_hash+hashent) {
288 dquot = hlist_entry(node, struct dquot, dq_hash);
289 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
295 /* Add a dquot to the tail of the free list */
296 static inline void put_dquot_last(struct dquot *dquot)
298 list_add_tail(&dquot->dq_free, &free_dquots);
299 dqstats_inc(DQST_FREE_DQUOTS);
302 static inline void remove_free_dquot(struct dquot *dquot)
304 if (list_empty(&dquot->dq_free))
306 list_del_init(&dquot->dq_free);
307 dqstats_dec(DQST_FREE_DQUOTS);
310 static inline void put_inuse(struct dquot *dquot)
312 /* We add to the back of inuse list so we don't have to restart
313 * when traversing this list and we block */
314 list_add_tail(&dquot->dq_inuse, &inuse_list);
315 dqstats_inc(DQST_ALLOC_DQUOTS);
318 static inline void remove_inuse(struct dquot *dquot)
320 dqstats_dec(DQST_ALLOC_DQUOTS);
321 list_del(&dquot->dq_inuse);
324 * End of list functions needing dq_list_lock
327 static void wait_on_dquot(struct dquot *dquot)
329 mutex_lock(&dquot->dq_lock);
330 mutex_unlock(&dquot->dq_lock);
333 static inline int dquot_dirty(struct dquot *dquot)
335 return test_bit(DQ_MOD_B, &dquot->dq_flags);
338 static inline int mark_dquot_dirty(struct dquot *dquot)
340 return dquot->dq_sb->dq_op->mark_dirty(dquot);
343 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
344 int dquot_mark_dquot_dirty(struct dquot *dquot)
348 /* If quota is dirty already, we don't have to acquire dq_list_lock */
349 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
352 spin_lock(&dq_list_lock);
353 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
354 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
355 info[dquot->dq_id.type].dqi_dirty_list);
358 spin_unlock(&dq_list_lock);
361 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
363 /* Dirtify all the dquots - this can block when journalling */
364 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
369 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
371 /* Even in case of error we have to continue */
372 ret = mark_dquot_dirty(dquot[cnt]);
379 static inline void dqput_all(struct dquot **dquot)
383 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
387 /* This function needs dq_list_lock */
388 static inline int clear_dquot_dirty(struct dquot *dquot)
390 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
392 list_del_init(&dquot->dq_dirty);
396 void mark_info_dirty(struct super_block *sb, int type)
398 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
400 EXPORT_SYMBOL(mark_info_dirty);
403 * Read dquot from disk and alloc space for it
406 int dquot_acquire(struct dquot *dquot)
408 int ret = 0, ret2 = 0;
409 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
411 mutex_lock(&dquot->dq_lock);
412 mutex_lock(&dqopt->dqio_mutex);
413 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
414 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
417 set_bit(DQ_READ_B, &dquot->dq_flags);
418 /* Instantiate dquot if needed */
419 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
420 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
421 /* Write the info if needed */
422 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
423 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
424 dquot->dq_sb, dquot->dq_id.type);
433 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
435 mutex_unlock(&dqopt->dqio_mutex);
436 mutex_unlock(&dquot->dq_lock);
439 EXPORT_SYMBOL(dquot_acquire);
442 * Write dquot to disk
444 int dquot_commit(struct dquot *dquot)
447 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
449 mutex_lock(&dqopt->dqio_mutex);
450 spin_lock(&dq_list_lock);
451 if (!clear_dquot_dirty(dquot)) {
452 spin_unlock(&dq_list_lock);
455 spin_unlock(&dq_list_lock);
456 /* Inactive dquot can be only if there was error during read/init
457 * => we have better not writing it */
458 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
459 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
463 mutex_unlock(&dqopt->dqio_mutex);
466 EXPORT_SYMBOL(dquot_commit);
471 int dquot_release(struct dquot *dquot)
473 int ret = 0, ret2 = 0;
474 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
476 mutex_lock(&dquot->dq_lock);
477 /* Check whether we are not racing with some other dqget() */
478 if (atomic_read(&dquot->dq_count) > 1)
480 mutex_lock(&dqopt->dqio_mutex);
481 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
482 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
484 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
485 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
486 dquot->dq_sb, dquot->dq_id.type);
491 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
492 mutex_unlock(&dqopt->dqio_mutex);
494 mutex_unlock(&dquot->dq_lock);
497 EXPORT_SYMBOL(dquot_release);
499 void dquot_destroy(struct dquot *dquot)
501 kmem_cache_free(dquot_cachep, dquot);
503 EXPORT_SYMBOL(dquot_destroy);
505 static inline void do_destroy_dquot(struct dquot *dquot)
507 dquot->dq_sb->dq_op->destroy_dquot(dquot);
510 /* Invalidate all dquots on the list. Note that this function is called after
511 * quota is disabled and pointers from inodes removed so there cannot be new
512 * quota users. There can still be some users of quotas due to inodes being
513 * just deleted or pruned by prune_icache() (those are not attached to any
514 * list) or parallel quotactl call. We have to wait for such users.
516 static void invalidate_dquots(struct super_block *sb, int type)
518 struct dquot *dquot, *tmp;
521 spin_lock(&dq_list_lock);
522 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
523 if (dquot->dq_sb != sb)
525 if (dquot->dq_id.type != type)
527 /* Wait for dquot users */
528 if (atomic_read(&dquot->dq_count)) {
531 atomic_inc(&dquot->dq_count);
532 prepare_to_wait(&dquot->dq_wait_unused, &wait,
533 TASK_UNINTERRUPTIBLE);
534 spin_unlock(&dq_list_lock);
535 /* Once dqput() wakes us up, we know it's time to free
537 * IMPORTANT: we rely on the fact that there is always
538 * at most one process waiting for dquot to free.
539 * Otherwise dq_count would be > 1 and we would never
542 if (atomic_read(&dquot->dq_count) > 1)
544 finish_wait(&dquot->dq_wait_unused, &wait);
546 /* At this moment dquot() need not exist (it could be
547 * reclaimed by prune_dqcache(). Hence we must
552 * Quota now has no users and it has been written on last
555 remove_dquot_hash(dquot);
556 remove_free_dquot(dquot);
558 do_destroy_dquot(dquot);
560 spin_unlock(&dq_list_lock);
563 /* Call callback for every active dquot on given filesystem */
564 int dquot_scan_active(struct super_block *sb,
565 int (*fn)(struct dquot *dquot, unsigned long priv),
568 struct dquot *dquot, *old_dquot = NULL;
571 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
572 spin_lock(&dq_list_lock);
573 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
574 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
576 if (dquot->dq_sb != sb)
578 /* Now we have active dquot so we can just increase use count */
579 atomic_inc(&dquot->dq_count);
580 spin_unlock(&dq_list_lock);
581 dqstats_inc(DQST_LOOKUPS);
584 ret = fn(dquot, priv);
587 spin_lock(&dq_list_lock);
588 /* We are safe to continue now because our dquot could not
589 * be moved out of the inuse list while we hold the reference */
591 spin_unlock(&dq_list_lock);
594 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
597 EXPORT_SYMBOL(dquot_scan_active);
599 /* Write all dquot structures to quota files */
600 int dquot_writeback_dquots(struct super_block *sb, int type)
602 struct list_head *dirty;
604 struct quota_info *dqopt = sb_dqopt(sb);
608 mutex_lock(&dqopt->dqonoff_mutex);
609 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
610 if (type != -1 && cnt != type)
612 if (!sb_has_quota_active(sb, cnt))
614 spin_lock(&dq_list_lock);
615 dirty = &dqopt->info[cnt].dqi_dirty_list;
616 while (!list_empty(dirty)) {
617 dquot = list_first_entry(dirty, struct dquot,
619 /* Dirty and inactive can be only bad dquot... */
620 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
621 clear_dquot_dirty(dquot);
624 /* Now we have active dquot from which someone is
625 * holding reference so we can safely just increase
627 atomic_inc(&dquot->dq_count);
628 spin_unlock(&dq_list_lock);
629 dqstats_inc(DQST_LOOKUPS);
630 err = sb->dq_op->write_dquot(dquot);
634 spin_lock(&dq_list_lock);
636 spin_unlock(&dq_list_lock);
639 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
640 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
641 && info_dirty(&dqopt->info[cnt]))
642 sb->dq_op->write_info(sb, cnt);
643 dqstats_inc(DQST_SYNCS);
644 mutex_unlock(&dqopt->dqonoff_mutex);
648 EXPORT_SYMBOL(dquot_writeback_dquots);
650 /* Write all dquot structures to disk and make them visible from userspace */
651 int dquot_quota_sync(struct super_block *sb, int type)
653 struct quota_info *dqopt = sb_dqopt(sb);
657 ret = dquot_writeback_dquots(sb, type);
660 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
663 /* This is not very clever (and fast) but currently I don't know about
664 * any other simple way of getting quota data to disk and we must get
665 * them there for userspace to be visible... */
666 if (sb->s_op->sync_fs)
667 sb->s_op->sync_fs(sb, 1);
668 sync_blockdev(sb->s_bdev);
671 * Now when everything is written we can discard the pagecache so
672 * that userspace sees the changes.
674 mutex_lock(&dqopt->dqonoff_mutex);
675 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
676 if (type != -1 && cnt != type)
678 if (!sb_has_quota_active(sb, cnt))
680 mutex_lock(&dqopt->files[cnt]->i_mutex);
681 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
682 mutex_unlock(&dqopt->files[cnt]->i_mutex);
684 mutex_unlock(&dqopt->dqonoff_mutex);
688 EXPORT_SYMBOL(dquot_quota_sync);
690 /* Free unused dquots from cache */
691 static void prune_dqcache(int count)
693 struct list_head *head;
696 head = free_dquots.prev;
697 while (head != &free_dquots && count) {
698 dquot = list_entry(head, struct dquot, dq_free);
699 remove_dquot_hash(dquot);
700 remove_free_dquot(dquot);
702 do_destroy_dquot(dquot);
704 head = free_dquots.prev;
709 * This is called from kswapd when we think we need some
712 static int shrink_dqcache_memory(struct shrinker *shrink,
713 struct shrink_control *sc)
715 int nr = sc->nr_to_scan;
718 spin_lock(&dq_list_lock);
720 spin_unlock(&dq_list_lock);
722 return vfs_pressure_ratio(
723 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
726 static struct shrinker dqcache_shrinker = {
727 .shrink = shrink_dqcache_memory,
728 .seeks = DEFAULT_SEEKS,
732 * Put reference to dquot
733 * NOTE: If you change this function please check whether dqput_blocks() works right...
735 void dqput(struct dquot *dquot)
741 #ifdef CONFIG_QUOTA_DEBUG
742 if (!atomic_read(&dquot->dq_count)) {
743 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
744 quotatypes[dquot->dq_id.type],
745 from_kqid(&init_user_ns, dquot->dq_id));
749 dqstats_inc(DQST_DROPS);
751 spin_lock(&dq_list_lock);
752 if (atomic_read(&dquot->dq_count) > 1) {
753 /* We have more than one user... nothing to do */
754 atomic_dec(&dquot->dq_count);
755 /* Releasing dquot during quotaoff phase? */
756 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
757 atomic_read(&dquot->dq_count) == 1)
758 wake_up(&dquot->dq_wait_unused);
759 spin_unlock(&dq_list_lock);
762 /* Need to release dquot? */
763 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
764 spin_unlock(&dq_list_lock);
765 /* Commit dquot before releasing */
766 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
768 quota_error(dquot->dq_sb, "Can't write quota structure"
769 " (error %d). Quota may get out of sync!",
772 * We clear dirty bit anyway, so that we avoid
775 spin_lock(&dq_list_lock);
776 clear_dquot_dirty(dquot);
777 spin_unlock(&dq_list_lock);
781 /* Clear flag in case dquot was inactive (something bad happened) */
782 clear_dquot_dirty(dquot);
783 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
784 spin_unlock(&dq_list_lock);
785 dquot->dq_sb->dq_op->release_dquot(dquot);
788 atomic_dec(&dquot->dq_count);
789 #ifdef CONFIG_QUOTA_DEBUG
791 BUG_ON(!list_empty(&dquot->dq_free));
793 put_dquot_last(dquot);
794 spin_unlock(&dq_list_lock);
796 EXPORT_SYMBOL(dqput);
798 struct dquot *dquot_alloc(struct super_block *sb, int type)
800 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
802 EXPORT_SYMBOL(dquot_alloc);
804 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
808 dquot = sb->dq_op->alloc_dquot(sb, type);
812 mutex_init(&dquot->dq_lock);
813 INIT_LIST_HEAD(&dquot->dq_free);
814 INIT_LIST_HEAD(&dquot->dq_inuse);
815 INIT_HLIST_NODE(&dquot->dq_hash);
816 INIT_LIST_HEAD(&dquot->dq_dirty);
817 init_waitqueue_head(&dquot->dq_wait_unused);
819 dquot->dq_id = make_kqid_invalid(type);
820 atomic_set(&dquot->dq_count, 1);
826 * Get reference to dquot
828 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
829 * destroying our dquot by:
830 * a) checking for quota flags under dq_list_lock and
831 * b) getting a reference to dquot before we release dq_list_lock
833 struct dquot *dqget(struct super_block *sb, struct kqid qid)
835 unsigned int hashent = hashfn(sb, qid);
836 struct dquot *dquot = NULL, *empty = NULL;
838 if (!sb_has_quota_active(sb, qid.type))
841 spin_lock(&dq_list_lock);
842 spin_lock(&dq_state_lock);
843 if (!sb_has_quota_active(sb, qid.type)) {
844 spin_unlock(&dq_state_lock);
845 spin_unlock(&dq_list_lock);
848 spin_unlock(&dq_state_lock);
850 dquot = find_dquot(hashent, sb, qid);
853 spin_unlock(&dq_list_lock);
854 empty = get_empty_dquot(sb, qid.type);
856 schedule(); /* Try to wait for a moment... */
862 /* all dquots go on the inuse_list */
864 /* hash it first so it can be found */
865 insert_dquot_hash(dquot);
866 spin_unlock(&dq_list_lock);
867 dqstats_inc(DQST_LOOKUPS);
869 if (!atomic_read(&dquot->dq_count))
870 remove_free_dquot(dquot);
871 atomic_inc(&dquot->dq_count);
872 spin_unlock(&dq_list_lock);
873 dqstats_inc(DQST_CACHE_HITS);
874 dqstats_inc(DQST_LOOKUPS);
876 /* Wait for dq_lock - after this we know that either dquot_release() is
877 * already finished or it will be canceled due to dq_count > 1 test */
878 wait_on_dquot(dquot);
879 /* Read the dquot / allocate space in quota file */
880 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
881 sb->dq_op->acquire_dquot(dquot) < 0) {
886 #ifdef CONFIG_QUOTA_DEBUG
887 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
891 do_destroy_dquot(empty);
895 EXPORT_SYMBOL(dqget);
897 static int dqinit_needed(struct inode *inode, int type)
901 if (IS_NOQUOTA(inode))
904 return !inode->i_dquot[type];
905 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
906 if (!inode->i_dquot[cnt])
911 /* This routine is guarded by dqonoff_mutex mutex */
912 static void add_dquot_ref(struct super_block *sb, int type)
914 struct inode *inode, *old_inode = NULL;
915 #ifdef CONFIG_QUOTA_DEBUG
919 spin_lock(&inode_sb_list_lock);
920 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
921 spin_lock(&inode->i_lock);
922 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
923 !atomic_read(&inode->i_writecount) ||
924 !dqinit_needed(inode, type)) {
925 spin_unlock(&inode->i_lock);
929 spin_unlock(&inode->i_lock);
930 spin_unlock(&inode_sb_list_lock);
932 #ifdef CONFIG_QUOTA_DEBUG
933 if (unlikely(inode_get_rsv_space(inode) > 0))
937 __dquot_initialize(inode, type);
940 * We hold a reference to 'inode' so it couldn't have been
941 * removed from s_inodes list while we dropped the
942 * inode_sb_list_lock We cannot iput the inode now as we can be
943 * holding the last reference and we cannot iput it under
944 * inode_sb_list_lock. So we keep the reference and iput it
948 spin_lock(&inode_sb_list_lock);
950 spin_unlock(&inode_sb_list_lock);
953 #ifdef CONFIG_QUOTA_DEBUG
955 quota_error(sb, "Writes happened before quota was turned on "
956 "thus quota information is probably inconsistent. "
957 "Please run quotacheck(8)");
963 * Return 0 if dqput() won't block.
964 * (note that 1 doesn't necessarily mean blocking)
966 static inline int dqput_blocks(struct dquot *dquot)
968 if (atomic_read(&dquot->dq_count) <= 1)
974 * Remove references to dquots from inode and add dquot to list for freeing
975 * if we have the last reference to dquot
976 * We can't race with anybody because we hold dqptr_sem for writing...
978 static int remove_inode_dquot_ref(struct inode *inode, int type,
979 struct list_head *tofree_head)
981 struct dquot *dquot = inode->i_dquot[type];
983 inode->i_dquot[type] = NULL;
985 if (dqput_blocks(dquot)) {
986 #ifdef CONFIG_QUOTA_DEBUG
987 if (atomic_read(&dquot->dq_count) != 1)
988 quota_error(inode->i_sb, "Adding dquot with "
989 "dq_count %d to dispose list",
990 atomic_read(&dquot->dq_count));
992 spin_lock(&dq_list_lock);
993 /* As dquot must have currently users it can't be on
994 * the free list... */
995 list_add(&dquot->dq_free, tofree_head);
996 spin_unlock(&dq_list_lock);
1000 dqput(dquot); /* We have guaranteed we won't block */
1006 * Free list of dquots
1007 * Dquots are removed from inodes and no new references can be got so we are
1008 * the only ones holding reference
1010 static void put_dquot_list(struct list_head *tofree_head)
1012 struct list_head *act_head;
1013 struct dquot *dquot;
1015 act_head = tofree_head->next;
1016 while (act_head != tofree_head) {
1017 dquot = list_entry(act_head, struct dquot, dq_free);
1018 act_head = act_head->next;
1019 /* Remove dquot from the list so we won't have problems... */
1020 list_del_init(&dquot->dq_free);
1025 static void remove_dquot_ref(struct super_block *sb, int type,
1026 struct list_head *tofree_head)
1028 struct inode *inode;
1031 spin_lock(&inode_sb_list_lock);
1032 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1034 * We have to scan also I_NEW inodes because they can already
1035 * have quota pointer initialized. Luckily, we need to touch
1036 * only quota pointers and these have separate locking
1039 if (!IS_NOQUOTA(inode)) {
1040 if (unlikely(inode_get_rsv_space(inode) > 0))
1042 remove_inode_dquot_ref(inode, type, tofree_head);
1045 spin_unlock(&inode_sb_list_lock);
1046 #ifdef CONFIG_QUOTA_DEBUG
1048 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1049 " was disabled thus quota information is probably "
1050 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1055 /* Gather all references from inodes and drop them */
1056 static void drop_dquot_ref(struct super_block *sb, int type)
1058 LIST_HEAD(tofree_head);
1061 down_write(&sb_dqopt(sb)->dqptr_sem);
1062 remove_dquot_ref(sb, type, &tofree_head);
1063 up_write(&sb_dqopt(sb)->dqptr_sem);
1064 put_dquot_list(&tofree_head);
1068 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1070 dquot->dq_dqb.dqb_curinodes += number;
1073 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1075 dquot->dq_dqb.dqb_curspace += number;
1078 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1080 dquot->dq_dqb.dqb_rsvspace += number;
1084 * Claim reserved quota space
1086 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1088 if (dquot->dq_dqb.dqb_rsvspace < number) {
1090 number = dquot->dq_dqb.dqb_rsvspace;
1092 dquot->dq_dqb.dqb_curspace += number;
1093 dquot->dq_dqb.dqb_rsvspace -= number;
1096 static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
1098 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1099 number = dquot->dq_dqb.dqb_curspace;
1100 dquot->dq_dqb.dqb_rsvspace += number;
1101 dquot->dq_dqb.dqb_curspace -= number;
1105 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1107 if (dquot->dq_dqb.dqb_rsvspace >= number)
1108 dquot->dq_dqb.dqb_rsvspace -= number;
1111 dquot->dq_dqb.dqb_rsvspace = 0;
1115 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1117 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1118 dquot->dq_dqb.dqb_curinodes >= number)
1119 dquot->dq_dqb.dqb_curinodes -= number;
1121 dquot->dq_dqb.dqb_curinodes = 0;
1122 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1123 dquot->dq_dqb.dqb_itime = (time_t) 0;
1124 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1127 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1129 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1130 dquot->dq_dqb.dqb_curspace >= number)
1131 dquot->dq_dqb.dqb_curspace -= number;
1133 dquot->dq_dqb.dqb_curspace = 0;
1134 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1135 dquot->dq_dqb.dqb_btime = (time_t) 0;
1136 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1140 struct super_block *w_sb;
1141 struct kqid w_dq_id;
1145 static int warning_issued(struct dquot *dquot, const int warntype)
1147 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1148 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1149 ((warntype == QUOTA_NL_IHARDWARN ||
1150 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1154 return test_and_set_bit(flag, &dquot->dq_flags);
1157 #ifdef CONFIG_PRINT_QUOTA_WARNING
1158 static int flag_print_warnings = 1;
1160 static int need_print_warning(struct dquot_warn *warn)
1162 if (!flag_print_warnings)
1165 switch (warn->w_dq_id.type) {
1167 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1169 return in_group_p(warn->w_dq_id.gid);
1170 case PRJQUOTA: /* Never taken... Just make gcc happy */
1176 /* Print warning to user which exceeded quota */
1177 static void print_warning(struct dquot_warn *warn)
1180 struct tty_struct *tty;
1181 int warntype = warn->w_type;
1183 if (warntype == QUOTA_NL_IHARDBELOW ||
1184 warntype == QUOTA_NL_ISOFTBELOW ||
1185 warntype == QUOTA_NL_BHARDBELOW ||
1186 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1189 tty = get_current_tty();
1192 tty_write_message(tty, warn->w_sb->s_id);
1193 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1194 tty_write_message(tty, ": warning, ");
1196 tty_write_message(tty, ": write failed, ");
1197 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1199 case QUOTA_NL_IHARDWARN:
1200 msg = " file limit reached.\r\n";
1202 case QUOTA_NL_ISOFTLONGWARN:
1203 msg = " file quota exceeded too long.\r\n";
1205 case QUOTA_NL_ISOFTWARN:
1206 msg = " file quota exceeded.\r\n";
1208 case QUOTA_NL_BHARDWARN:
1209 msg = " block limit reached.\r\n";
1211 case QUOTA_NL_BSOFTLONGWARN:
1212 msg = " block quota exceeded too long.\r\n";
1214 case QUOTA_NL_BSOFTWARN:
1215 msg = " block quota exceeded.\r\n";
1218 tty_write_message(tty, msg);
1223 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1226 if (warning_issued(dquot, warntype))
1228 warn->w_type = warntype;
1229 warn->w_sb = dquot->dq_sb;
1230 warn->w_dq_id = dquot->dq_id;
1234 * Write warnings to the console and send warning messages over netlink.
1236 * Note that this function can call into tty and networking code.
1238 static void flush_warnings(struct dquot_warn *warn)
1242 for (i = 0; i < MAXQUOTAS; i++) {
1243 if (warn[i].w_type == QUOTA_NL_NOWARN)
1245 #ifdef CONFIG_PRINT_QUOTA_WARNING
1246 print_warning(&warn[i]);
1248 quota_send_warning(warn[i].w_dq_id,
1249 warn[i].w_sb->s_dev, warn[i].w_type);
1253 static int ignore_hardlimit(struct dquot *dquot)
1255 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1257 return capable(CAP_SYS_RESOURCE) &&
1258 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1259 !(info->dqi_flags & V1_DQF_RSQUASH));
1262 /* needs dq_data_lock */
1263 static int check_idq(struct dquot *dquot, qsize_t inodes,
1264 struct dquot_warn *warn)
1266 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1268 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1269 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1272 if (dquot->dq_dqb.dqb_ihardlimit &&
1273 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1274 !ignore_hardlimit(dquot)) {
1275 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1279 if (dquot->dq_dqb.dqb_isoftlimit &&
1280 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1281 dquot->dq_dqb.dqb_itime &&
1282 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1283 !ignore_hardlimit(dquot)) {
1284 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1288 if (dquot->dq_dqb.dqb_isoftlimit &&
1289 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1290 dquot->dq_dqb.dqb_itime == 0) {
1291 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1292 dquot->dq_dqb.dqb_itime = get_seconds() +
1293 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1299 /* needs dq_data_lock */
1300 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
1301 struct dquot_warn *warn)
1304 struct super_block *sb = dquot->dq_sb;
1306 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1307 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1310 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1313 if (dquot->dq_dqb.dqb_bhardlimit &&
1314 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1315 !ignore_hardlimit(dquot)) {
1317 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1321 if (dquot->dq_dqb.dqb_bsoftlimit &&
1322 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1323 dquot->dq_dqb.dqb_btime &&
1324 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1325 !ignore_hardlimit(dquot)) {
1327 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1331 if (dquot->dq_dqb.dqb_bsoftlimit &&
1332 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1333 dquot->dq_dqb.dqb_btime == 0) {
1335 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1336 dquot->dq_dqb.dqb_btime = get_seconds() +
1337 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1341 * We don't allow preallocation to exceed softlimit so exceeding will
1350 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1354 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1355 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1356 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1357 return QUOTA_NL_NOWARN;
1359 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1360 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1361 return QUOTA_NL_ISOFTBELOW;
1362 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1363 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1364 return QUOTA_NL_IHARDBELOW;
1365 return QUOTA_NL_NOWARN;
1368 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1370 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1371 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1372 return QUOTA_NL_NOWARN;
1374 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1375 return QUOTA_NL_BSOFTBELOW;
1376 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1377 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1378 return QUOTA_NL_BHARDBELOW;
1379 return QUOTA_NL_NOWARN;
1382 static int dquot_active(const struct inode *inode)
1384 struct super_block *sb = inode->i_sb;
1386 if (IS_NOQUOTA(inode))
1388 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1392 * Initialize quota pointers in inode
1394 * We do things in a bit complicated way but by that we avoid calling
1395 * dqget() and thus filesystem callbacks under dqptr_sem.
1397 * It is better to call this function outside of any transaction as it
1398 * might need a lot of space in journal for dquot structure allocation.
1400 static void __dquot_initialize(struct inode *inode, int type)
1403 struct dquot *got[MAXQUOTAS];
1404 struct super_block *sb = inode->i_sb;
1407 /* First test before acquiring mutex - solves deadlocks when we
1408 * re-enter the quota code and are already holding the mutex */
1409 if (!dquot_active(inode))
1412 /* First get references to structures we might need. */
1413 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1416 if (type != -1 && cnt != type)
1420 qid = make_kqid_uid(inode->i_uid);
1423 qid = make_kqid_gid(inode->i_gid);
1426 got[cnt] = dqget(sb, qid);
1429 down_write(&sb_dqopt(sb)->dqptr_sem);
1430 if (IS_NOQUOTA(inode))
1432 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1433 if (type != -1 && cnt != type)
1435 /* Avoid races with quotaoff() */
1436 if (!sb_has_quota_active(sb, cnt))
1438 /* We could race with quotaon or dqget() could have failed */
1441 if (!inode->i_dquot[cnt]) {
1442 inode->i_dquot[cnt] = got[cnt];
1445 * Make quota reservation system happy if someone
1446 * did a write before quota was turned on
1448 rsv = inode_get_rsv_space(inode);
1449 if (unlikely(rsv)) {
1450 spin_lock(&dq_data_lock);
1451 dquot_resv_space(inode->i_dquot[cnt], rsv);
1452 spin_unlock(&dq_data_lock);
1457 up_write(&sb_dqopt(sb)->dqptr_sem);
1458 /* Drop unused references */
1462 void dquot_initialize(struct inode *inode)
1464 __dquot_initialize(inode, -1);
1466 EXPORT_SYMBOL(dquot_initialize);
1469 * Release all quotas referenced by inode
1471 static void __dquot_drop(struct inode *inode)
1474 struct dquot *put[MAXQUOTAS];
1476 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1477 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1478 put[cnt] = inode->i_dquot[cnt];
1479 inode->i_dquot[cnt] = NULL;
1481 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1485 void dquot_drop(struct inode *inode)
1489 if (IS_NOQUOTA(inode))
1493 * Test before calling to rule out calls from proc and such
1494 * where we are not allowed to block. Note that this is
1495 * actually reliable test even without the lock - the caller
1496 * must assure that nobody can come after the DQUOT_DROP and
1497 * add quota pointers back anyway.
1499 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1500 if (inode->i_dquot[cnt])
1504 if (cnt < MAXQUOTAS)
1505 __dquot_drop(inode);
1507 EXPORT_SYMBOL(dquot_drop);
1510 * inode_reserved_space is managed internally by quota, and protected by
1511 * i_lock similar to i_blocks+i_bytes.
1513 static qsize_t *inode_reserved_space(struct inode * inode)
1515 /* Filesystem must explicitly define it's own method in order to use
1516 * quota reservation interface */
1517 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1518 return inode->i_sb->dq_op->get_reserved_space(inode);
1521 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1523 spin_lock(&inode->i_lock);
1524 *inode_reserved_space(inode) += number;
1525 spin_unlock(&inode->i_lock);
1527 EXPORT_SYMBOL(inode_add_rsv_space);
1529 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1531 spin_lock(&inode->i_lock);
1532 *inode_reserved_space(inode) -= number;
1533 __inode_add_bytes(inode, number);
1534 spin_unlock(&inode->i_lock);
1536 EXPORT_SYMBOL(inode_claim_rsv_space);
1538 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
1540 spin_lock(&inode->i_lock);
1541 *inode_reserved_space(inode) += number;
1542 __inode_sub_bytes(inode, number);
1543 spin_unlock(&inode->i_lock);
1545 EXPORT_SYMBOL(inode_reclaim_rsv_space);
1547 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1549 spin_lock(&inode->i_lock);
1550 *inode_reserved_space(inode) -= number;
1551 spin_unlock(&inode->i_lock);
1553 EXPORT_SYMBOL(inode_sub_rsv_space);
1555 static qsize_t inode_get_rsv_space(struct inode *inode)
1559 if (!inode->i_sb->dq_op->get_reserved_space)
1561 spin_lock(&inode->i_lock);
1562 ret = *inode_reserved_space(inode);
1563 spin_unlock(&inode->i_lock);
1567 static void inode_incr_space(struct inode *inode, qsize_t number,
1571 inode_add_rsv_space(inode, number);
1573 inode_add_bytes(inode, number);
1576 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1579 inode_sub_rsv_space(inode, number);
1581 inode_sub_bytes(inode, number);
1585 * This functions updates i_blocks+i_bytes fields and quota information
1586 * (together with appropriate checks).
1588 * NOTE: We absolutely rely on the fact that caller dirties the inode
1589 * (usually helpers in quotaops.h care about this) and holds a handle for
1590 * the current transaction so that dquot write and inode write go into the
1595 * This operation can block, but only after everything is updated
1597 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1600 struct dquot_warn warn[MAXQUOTAS];
1601 struct dquot **dquots = inode->i_dquot;
1602 int reserve = flags & DQUOT_SPACE_RESERVE;
1605 * First test before acquiring mutex - solves deadlocks when we
1606 * re-enter the quota code and are already holding the mutex
1608 if (!dquot_active(inode)) {
1609 inode_incr_space(inode, number, reserve);
1613 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1614 warn[cnt].w_type = QUOTA_NL_NOWARN;
1616 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1617 spin_lock(&dq_data_lock);
1618 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1621 ret = check_bdq(dquots[cnt], number,
1622 !(flags & DQUOT_SPACE_WARN), &warn[cnt]);
1623 if (ret && !(flags & DQUOT_SPACE_NOFAIL)) {
1624 spin_unlock(&dq_data_lock);
1625 goto out_flush_warn;
1628 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1632 dquot_resv_space(dquots[cnt], number);
1634 dquot_incr_space(dquots[cnt], number);
1636 inode_incr_space(inode, number, reserve);
1637 spin_unlock(&dq_data_lock);
1640 goto out_flush_warn;
1641 mark_all_dquot_dirty(dquots);
1643 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1644 flush_warnings(warn);
1648 EXPORT_SYMBOL(__dquot_alloc_space);
1651 * This operation can block, but only after everything is updated
1653 int dquot_alloc_inode(const struct inode *inode)
1656 struct dquot_warn warn[MAXQUOTAS];
1657 struct dquot * const *dquots = inode->i_dquot;
1659 /* First test before acquiring mutex - solves deadlocks when we
1660 * re-enter the quota code and are already holding the mutex */
1661 if (!dquot_active(inode))
1663 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1664 warn[cnt].w_type = QUOTA_NL_NOWARN;
1665 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1666 spin_lock(&dq_data_lock);
1667 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1670 ret = check_idq(dquots[cnt], 1, &warn[cnt]);
1675 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1678 dquot_incr_inodes(dquots[cnt], 1);
1682 spin_unlock(&dq_data_lock);
1684 mark_all_dquot_dirty(dquots);
1685 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1686 flush_warnings(warn);
1689 EXPORT_SYMBOL(dquot_alloc_inode);
1692 * Convert in-memory reserved quotas to real consumed quotas
1694 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1698 if (!dquot_active(inode)) {
1699 inode_claim_rsv_space(inode, number);
1703 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1704 spin_lock(&dq_data_lock);
1705 /* Claim reserved quotas to allocated quotas */
1706 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1707 if (inode->i_dquot[cnt])
1708 dquot_claim_reserved_space(inode->i_dquot[cnt],
1711 /* Update inode bytes */
1712 inode_claim_rsv_space(inode, number);
1713 spin_unlock(&dq_data_lock);
1714 mark_all_dquot_dirty(inode->i_dquot);
1715 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1718 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1721 * Convert allocated space back to in-memory reserved quotas
1723 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1727 if (!dquot_active(inode)) {
1728 inode_reclaim_rsv_space(inode, number);
1732 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1733 spin_lock(&dq_data_lock);
1734 /* Claim reserved quotas to allocated quotas */
1735 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1736 if (inode->i_dquot[cnt])
1737 dquot_reclaim_reserved_space(inode->i_dquot[cnt],
1740 /* Update inode bytes */
1741 inode_reclaim_rsv_space(inode, number);
1742 spin_unlock(&dq_data_lock);
1743 mark_all_dquot_dirty(inode->i_dquot);
1744 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1747 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1750 * This operation can block, but only after everything is updated
1752 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1755 struct dquot_warn warn[MAXQUOTAS];
1756 struct dquot **dquots = inode->i_dquot;
1757 int reserve = flags & DQUOT_SPACE_RESERVE;
1759 /* First test before acquiring mutex - solves deadlocks when we
1760 * re-enter the quota code and are already holding the mutex */
1761 if (!dquot_active(inode)) {
1762 inode_decr_space(inode, number, reserve);
1766 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1767 spin_lock(&dq_data_lock);
1768 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1771 warn[cnt].w_type = QUOTA_NL_NOWARN;
1774 wtype = info_bdq_free(dquots[cnt], number);
1775 if (wtype != QUOTA_NL_NOWARN)
1776 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1778 dquot_free_reserved_space(dquots[cnt], number);
1780 dquot_decr_space(dquots[cnt], number);
1782 inode_decr_space(inode, number, reserve);
1783 spin_unlock(&dq_data_lock);
1787 mark_all_dquot_dirty(dquots);
1789 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1790 flush_warnings(warn);
1792 EXPORT_SYMBOL(__dquot_free_space);
1795 * This operation can block, but only after everything is updated
1797 void dquot_free_inode(const struct inode *inode)
1800 struct dquot_warn warn[MAXQUOTAS];
1801 struct dquot * const *dquots = inode->i_dquot;
1803 /* First test before acquiring mutex - solves deadlocks when we
1804 * re-enter the quota code and are already holding the mutex */
1805 if (!dquot_active(inode))
1808 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1809 spin_lock(&dq_data_lock);
1810 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1813 warn[cnt].w_type = QUOTA_NL_NOWARN;
1816 wtype = info_idq_free(dquots[cnt], 1);
1817 if (wtype != QUOTA_NL_NOWARN)
1818 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1819 dquot_decr_inodes(dquots[cnt], 1);
1821 spin_unlock(&dq_data_lock);
1822 mark_all_dquot_dirty(dquots);
1823 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1824 flush_warnings(warn);
1826 EXPORT_SYMBOL(dquot_free_inode);
1829 * Transfer the number of inode and blocks from one diskquota to an other.
1830 * On success, dquot references in transfer_to are consumed and references
1831 * to original dquots that need to be released are placed there. On failure,
1832 * references are kept untouched.
1834 * This operation can block, but only after everything is updated
1835 * A transaction must be started when entering this function.
1838 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1840 qsize_t space, cur_space;
1841 qsize_t rsv_space = 0;
1842 struct dquot *transfer_from[MAXQUOTAS] = {};
1844 char is_valid[MAXQUOTAS] = {};
1845 struct dquot_warn warn_to[MAXQUOTAS];
1846 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1847 struct dquot_warn warn_from_space[MAXQUOTAS];
1849 /* First test before acquiring mutex - solves deadlocks when we
1850 * re-enter the quota code and are already holding the mutex */
1851 if (IS_NOQUOTA(inode))
1853 /* Initialize the arrays */
1854 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1855 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1856 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1857 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1859 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1860 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1861 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1864 spin_lock(&dq_data_lock);
1865 cur_space = inode_get_bytes(inode);
1866 rsv_space = inode_get_rsv_space(inode);
1867 space = cur_space + rsv_space;
1868 /* Build the transfer_from list and check the limits */
1869 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1871 * Skip changes for same uid or gid or for turned off quota-type.
1873 if (!transfer_to[cnt])
1875 /* Avoid races with quotaoff() */
1876 if (!sb_has_quota_active(inode->i_sb, cnt))
1879 transfer_from[cnt] = inode->i_dquot[cnt];
1880 ret = check_idq(transfer_to[cnt], 1, &warn_to[cnt]);
1883 ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]);
1889 * Finally perform the needed transfer from transfer_from to transfer_to
1891 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1894 /* Due to IO error we might not have transfer_from[] structure */
1895 if (transfer_from[cnt]) {
1897 wtype = info_idq_free(transfer_from[cnt], 1);
1898 if (wtype != QUOTA_NL_NOWARN)
1899 prepare_warning(&warn_from_inodes[cnt],
1900 transfer_from[cnt], wtype);
1901 wtype = info_bdq_free(transfer_from[cnt], space);
1902 if (wtype != QUOTA_NL_NOWARN)
1903 prepare_warning(&warn_from_space[cnt],
1904 transfer_from[cnt], wtype);
1905 dquot_decr_inodes(transfer_from[cnt], 1);
1906 dquot_decr_space(transfer_from[cnt], cur_space);
1907 dquot_free_reserved_space(transfer_from[cnt],
1911 dquot_incr_inodes(transfer_to[cnt], 1);
1912 dquot_incr_space(transfer_to[cnt], cur_space);
1913 dquot_resv_space(transfer_to[cnt], rsv_space);
1915 inode->i_dquot[cnt] = transfer_to[cnt];
1917 spin_unlock(&dq_data_lock);
1918 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1920 mark_all_dquot_dirty(transfer_from);
1921 mark_all_dquot_dirty(transfer_to);
1922 flush_warnings(warn_to);
1923 flush_warnings(warn_from_inodes);
1924 flush_warnings(warn_from_space);
1925 /* Pass back references to put */
1926 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1928 transfer_to[cnt] = transfer_from[cnt];
1931 spin_unlock(&dq_data_lock);
1932 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1933 flush_warnings(warn_to);
1936 EXPORT_SYMBOL(__dquot_transfer);
1938 /* Wrapper for transferring ownership of an inode for uid/gid only
1939 * Called from FSXXX_setattr()
1941 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1943 struct dquot *transfer_to[MAXQUOTAS] = {};
1944 struct super_block *sb = inode->i_sb;
1947 if (!dquot_active(inode))
1950 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
1951 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
1952 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
1953 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
1955 ret = __dquot_transfer(inode, transfer_to);
1956 dqput_all(transfer_to);
1959 EXPORT_SYMBOL(dquot_transfer);
1962 * Write info of quota file to disk
1964 int dquot_commit_info(struct super_block *sb, int type)
1967 struct quota_info *dqopt = sb_dqopt(sb);
1969 mutex_lock(&dqopt->dqio_mutex);
1970 ret = dqopt->ops[type]->write_file_info(sb, type);
1971 mutex_unlock(&dqopt->dqio_mutex);
1974 EXPORT_SYMBOL(dquot_commit_info);
1977 * Definitions of diskquota operations.
1979 const struct dquot_operations dquot_operations = {
1980 .write_dquot = dquot_commit,
1981 .acquire_dquot = dquot_acquire,
1982 .release_dquot = dquot_release,
1983 .mark_dirty = dquot_mark_dquot_dirty,
1984 .write_info = dquot_commit_info,
1985 .alloc_dquot = dquot_alloc,
1986 .destroy_dquot = dquot_destroy,
1988 EXPORT_SYMBOL(dquot_operations);
1991 * Generic helper for ->open on filesystems supporting disk quotas.
1993 int dquot_file_open(struct inode *inode, struct file *file)
1997 error = generic_file_open(inode, file);
1998 if (!error && (file->f_mode & FMODE_WRITE))
1999 dquot_initialize(inode);
2002 EXPORT_SYMBOL(dquot_file_open);
2005 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2007 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2010 struct quota_info *dqopt = sb_dqopt(sb);
2011 struct inode *toputinode[MAXQUOTAS];
2013 /* Cannot turn off usage accounting without turning off limits, or
2014 * suspend quotas and simultaneously turn quotas off. */
2015 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2016 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2017 DQUOT_USAGE_ENABLED)))
2020 /* We need to serialize quota_off() for device */
2021 mutex_lock(&dqopt->dqonoff_mutex);
2024 * Skip everything if there's nothing to do. We have to do this because
2025 * sometimes we are called when fill_super() failed and calling
2026 * sync_fs() in such cases does no good.
2028 if (!sb_any_quota_loaded(sb)) {
2029 mutex_unlock(&dqopt->dqonoff_mutex);
2032 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2033 toputinode[cnt] = NULL;
2034 if (type != -1 && cnt != type)
2036 if (!sb_has_quota_loaded(sb, cnt))
2039 if (flags & DQUOT_SUSPENDED) {
2040 spin_lock(&dq_state_lock);
2042 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2043 spin_unlock(&dq_state_lock);
2045 spin_lock(&dq_state_lock);
2046 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2047 /* Turning off suspended quotas? */
2048 if (!sb_has_quota_loaded(sb, cnt) &&
2049 sb_has_quota_suspended(sb, cnt)) {
2050 dqopt->flags &= ~dquot_state_flag(
2051 DQUOT_SUSPENDED, cnt);
2052 spin_unlock(&dq_state_lock);
2053 iput(dqopt->files[cnt]);
2054 dqopt->files[cnt] = NULL;
2057 spin_unlock(&dq_state_lock);
2060 /* We still have to keep quota loaded? */
2061 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2064 /* Note: these are blocking operations */
2065 drop_dquot_ref(sb, cnt);
2066 invalidate_dquots(sb, cnt);
2068 * Now all dquots should be invalidated, all writes done so we
2069 * should be only users of the info. No locks needed.
2071 if (info_dirty(&dqopt->info[cnt]))
2072 sb->dq_op->write_info(sb, cnt);
2073 if (dqopt->ops[cnt]->free_file_info)
2074 dqopt->ops[cnt]->free_file_info(sb, cnt);
2075 put_quota_format(dqopt->info[cnt].dqi_format);
2077 toputinode[cnt] = dqopt->files[cnt];
2078 if (!sb_has_quota_loaded(sb, cnt))
2079 dqopt->files[cnt] = NULL;
2080 dqopt->info[cnt].dqi_flags = 0;
2081 dqopt->info[cnt].dqi_igrace = 0;
2082 dqopt->info[cnt].dqi_bgrace = 0;
2083 dqopt->ops[cnt] = NULL;
2085 mutex_unlock(&dqopt->dqonoff_mutex);
2087 /* Skip syncing and setting flags if quota files are hidden */
2088 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2091 /* Sync the superblock so that buffers with quota data are written to
2092 * disk (and so userspace sees correct data afterwards). */
2093 if (sb->s_op->sync_fs)
2094 sb->s_op->sync_fs(sb, 1);
2095 sync_blockdev(sb->s_bdev);
2096 /* Now the quota files are just ordinary files and we can set the
2097 * inode flags back. Moreover we discard the pagecache so that
2098 * userspace sees the writes we did bypassing the pagecache. We
2099 * must also discard the blockdev buffers so that we see the
2100 * changes done by userspace on the next quotaon() */
2101 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2102 if (toputinode[cnt]) {
2103 mutex_lock(&dqopt->dqonoff_mutex);
2104 /* If quota was reenabled in the meantime, we have
2106 if (!sb_has_quota_loaded(sb, cnt)) {
2107 mutex_lock(&toputinode[cnt]->i_mutex);
2108 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
2109 S_NOATIME | S_NOQUOTA);
2110 truncate_inode_pages(&toputinode[cnt]->i_data,
2112 mutex_unlock(&toputinode[cnt]->i_mutex);
2113 mark_inode_dirty_sync(toputinode[cnt]);
2115 mutex_unlock(&dqopt->dqonoff_mutex);
2118 invalidate_bdev(sb->s_bdev);
2120 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2121 if (toputinode[cnt]) {
2122 /* On remount RO, we keep the inode pointer so that we
2123 * can reenable quota on the subsequent remount RW. We
2124 * have to check 'flags' variable and not use sb_has_
2125 * function because another quotaon / quotaoff could
2126 * change global state before we got here. We refuse
2127 * to suspend quotas when there is pending delete on
2128 * the quota file... */
2129 if (!(flags & DQUOT_SUSPENDED))
2130 iput(toputinode[cnt]);
2131 else if (!toputinode[cnt]->i_nlink)
2136 EXPORT_SYMBOL(dquot_disable);
2138 int dquot_quota_off(struct super_block *sb, int type)
2140 return dquot_disable(sb, type,
2141 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2143 EXPORT_SYMBOL(dquot_quota_off);
2146 * Turn quotas on on a device
2150 * Helper function to turn quotas on when we already have the inode of
2151 * quota file and no quota information is loaded.
2153 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2156 struct quota_format_type *fmt = find_quota_format(format_id);
2157 struct super_block *sb = inode->i_sb;
2158 struct quota_info *dqopt = sb_dqopt(sb);
2164 if (!S_ISREG(inode->i_mode)) {
2168 if (IS_RDONLY(inode)) {
2172 if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
2176 /* Usage always has to be set... */
2177 if (!(flags & DQUOT_USAGE_ENABLED)) {
2182 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2183 /* As we bypass the pagecache we must now flush all the
2184 * dirty data and invalidate caches so that kernel sees
2185 * changes from userspace. It is not enough to just flush
2186 * the quota file since if blocksize < pagesize, invalidation
2187 * of the cache could fail because of other unrelated dirty
2189 sync_filesystem(sb);
2190 invalidate_bdev(sb->s_bdev);
2192 mutex_lock(&dqopt->dqonoff_mutex);
2193 if (sb_has_quota_loaded(sb, type)) {
2198 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2199 /* We don't want quota and atime on quota files (deadlocks
2200 * possible) Also nobody should write to the file - we use
2201 * special IO operations which ignore the immutable bit. */
2202 mutex_lock(&inode->i_mutex);
2203 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2205 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2206 mutex_unlock(&inode->i_mutex);
2208 * When S_NOQUOTA is set, remove dquot references as no more
2209 * references can be added
2211 __dquot_drop(inode);
2215 dqopt->files[type] = igrab(inode);
2216 if (!dqopt->files[type])
2219 if (!fmt->qf_ops->check_quota_file(sb, type))
2222 dqopt->ops[type] = fmt->qf_ops;
2223 dqopt->info[type].dqi_format = fmt;
2224 dqopt->info[type].dqi_fmt_id = format_id;
2225 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2226 mutex_lock(&dqopt->dqio_mutex);
2227 error = dqopt->ops[type]->read_file_info(sb, type);
2229 mutex_unlock(&dqopt->dqio_mutex);
2232 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2233 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2234 mutex_unlock(&dqopt->dqio_mutex);
2235 spin_lock(&dq_state_lock);
2236 dqopt->flags |= dquot_state_flag(flags, type);
2237 spin_unlock(&dq_state_lock);
2239 add_dquot_ref(sb, type);
2240 mutex_unlock(&dqopt->dqonoff_mutex);
2245 dqopt->files[type] = NULL;
2248 if (oldflags != -1) {
2249 mutex_lock(&inode->i_mutex);
2250 /* Set the flags back (in the case of accidental quotaon()
2251 * on a wrong file we don't want to mess up the flags) */
2252 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2253 inode->i_flags |= oldflags;
2254 mutex_unlock(&inode->i_mutex);
2256 mutex_unlock(&dqopt->dqonoff_mutex);
2258 put_quota_format(fmt);
2263 /* Reenable quotas on remount RW */
2264 int dquot_resume(struct super_block *sb, int type)
2266 struct quota_info *dqopt = sb_dqopt(sb);
2267 struct inode *inode;
2271 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2272 if (type != -1 && cnt != type)
2275 mutex_lock(&dqopt->dqonoff_mutex);
2276 if (!sb_has_quota_suspended(sb, cnt)) {
2277 mutex_unlock(&dqopt->dqonoff_mutex);
2280 inode = dqopt->files[cnt];
2281 dqopt->files[cnt] = NULL;
2282 spin_lock(&dq_state_lock);
2283 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2284 DQUOT_LIMITS_ENABLED,
2286 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2287 spin_unlock(&dq_state_lock);
2288 mutex_unlock(&dqopt->dqonoff_mutex);
2290 flags = dquot_generic_flag(flags, cnt);
2291 ret = vfs_load_quota_inode(inode, cnt,
2292 dqopt->info[cnt].dqi_fmt_id, flags);
2298 EXPORT_SYMBOL(dquot_resume);
2300 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2303 int error = security_quota_on(path->dentry);
2306 /* Quota file not on the same filesystem? */
2307 if (path->dentry->d_sb != sb)
2310 error = vfs_load_quota_inode(path->dentry->d_inode, type,
2311 format_id, DQUOT_USAGE_ENABLED |
2312 DQUOT_LIMITS_ENABLED);
2315 EXPORT_SYMBOL(dquot_quota_on);
2318 * More powerful function for turning on quotas allowing setting
2319 * of individual quota flags
2321 int dquot_enable(struct inode *inode, int type, int format_id,
2325 struct super_block *sb = inode->i_sb;
2326 struct quota_info *dqopt = sb_dqopt(sb);
2328 /* Just unsuspend quotas? */
2329 BUG_ON(flags & DQUOT_SUSPENDED);
2333 /* Just updating flags needed? */
2334 if (sb_has_quota_loaded(sb, type)) {
2335 mutex_lock(&dqopt->dqonoff_mutex);
2336 /* Now do a reliable test... */
2337 if (!sb_has_quota_loaded(sb, type)) {
2338 mutex_unlock(&dqopt->dqonoff_mutex);
2341 if (flags & DQUOT_USAGE_ENABLED &&
2342 sb_has_quota_usage_enabled(sb, type)) {
2346 if (flags & DQUOT_LIMITS_ENABLED &&
2347 sb_has_quota_limits_enabled(sb, type)) {
2351 spin_lock(&dq_state_lock);
2352 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2353 spin_unlock(&dq_state_lock);
2355 mutex_unlock(&dqopt->dqonoff_mutex);
2360 return vfs_load_quota_inode(inode, type, format_id, flags);
2362 EXPORT_SYMBOL(dquot_enable);
2365 * This function is used when filesystem needs to initialize quotas
2366 * during mount time.
2368 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2369 int format_id, int type)
2371 struct dentry *dentry;
2374 mutex_lock(&sb->s_root->d_inode->i_mutex);
2375 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2376 mutex_unlock(&sb->s_root->d_inode->i_mutex);
2378 return PTR_ERR(dentry);
2380 if (!dentry->d_inode) {
2385 error = security_quota_on(dentry);
2387 error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2388 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2394 EXPORT_SYMBOL(dquot_quota_on_mount);
2396 static inline qsize_t qbtos(qsize_t blocks)
2398 return blocks << QIF_DQBLKSIZE_BITS;
2401 static inline qsize_t stoqb(qsize_t space)
2403 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
2406 /* Generic routine for getting common part of quota structure */
2407 static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2409 struct mem_dqblk *dm = &dquot->dq_dqb;
2411 memset(di, 0, sizeof(*di));
2412 di->d_version = FS_DQUOT_VERSION;
2413 di->d_flags = dquot->dq_id.type == USRQUOTA ?
2414 FS_USER_QUOTA : FS_GROUP_QUOTA;
2415 di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
2417 spin_lock(&dq_data_lock);
2418 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2419 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2420 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2421 di->d_ino_softlimit = dm->dqb_isoftlimit;
2422 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2423 di->d_icount = dm->dqb_curinodes;
2424 di->d_btimer = dm->dqb_btime;
2425 di->d_itimer = dm->dqb_itime;
2426 spin_unlock(&dq_data_lock);
2429 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2430 struct fs_disk_quota *di)
2432 struct dquot *dquot;
2434 dquot = dqget(sb, qid);
2437 do_get_dqblk(dquot, di);
2442 EXPORT_SYMBOL(dquot_get_dqblk);
2444 #define VFS_FS_DQ_MASK \
2445 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2446 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2447 FS_DQ_BTIMER | FS_DQ_ITIMER)
2449 /* Generic routine for setting common part of quota structure */
2450 static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2452 struct mem_dqblk *dm = &dquot->dq_dqb;
2453 int check_blim = 0, check_ilim = 0;
2454 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2456 if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2459 if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2460 (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2461 ((di->d_fieldmask & FS_DQ_BHARD) &&
2462 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2463 ((di->d_fieldmask & FS_DQ_ISOFT) &&
2464 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2465 ((di->d_fieldmask & FS_DQ_IHARD) &&
2466 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2469 spin_lock(&dq_data_lock);
2470 if (di->d_fieldmask & FS_DQ_BCOUNT) {
2471 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2473 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2476 if (di->d_fieldmask & FS_DQ_BSOFT)
2477 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2478 if (di->d_fieldmask & FS_DQ_BHARD)
2479 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2480 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2482 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2485 if (di->d_fieldmask & FS_DQ_ICOUNT) {
2486 dm->dqb_curinodes = di->d_icount;
2488 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2491 if (di->d_fieldmask & FS_DQ_ISOFT)
2492 dm->dqb_isoftlimit = di->d_ino_softlimit;
2493 if (di->d_fieldmask & FS_DQ_IHARD)
2494 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2495 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2497 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2500 if (di->d_fieldmask & FS_DQ_BTIMER) {
2501 dm->dqb_btime = di->d_btimer;
2503 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2506 if (di->d_fieldmask & FS_DQ_ITIMER) {
2507 dm->dqb_itime = di->d_itimer;
2509 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2513 if (!dm->dqb_bsoftlimit ||
2514 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2516 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2517 } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
2518 /* Set grace only if user hasn't provided his own... */
2519 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2522 if (!dm->dqb_isoftlimit ||
2523 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2525 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2526 } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
2527 /* Set grace only if user hasn't provided his own... */
2528 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2530 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2532 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2534 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2535 spin_unlock(&dq_data_lock);
2536 mark_dquot_dirty(dquot);
2541 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2542 struct fs_disk_quota *di)
2544 struct dquot *dquot;
2547 dquot = dqget(sb, qid);
2552 rc = do_set_dqblk(dquot, di);
2557 EXPORT_SYMBOL(dquot_set_dqblk);
2559 /* Generic routine for getting common part of quota file information */
2560 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2562 struct mem_dqinfo *mi;
2564 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2565 if (!sb_has_quota_active(sb, type)) {
2566 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2569 mi = sb_dqopt(sb)->info + type;
2570 spin_lock(&dq_data_lock);
2571 ii->dqi_bgrace = mi->dqi_bgrace;
2572 ii->dqi_igrace = mi->dqi_igrace;
2573 ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
2574 ii->dqi_valid = IIF_ALL;
2575 spin_unlock(&dq_data_lock);
2576 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2579 EXPORT_SYMBOL(dquot_get_dqinfo);
2581 /* Generic routine for setting common part of quota file information */
2582 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2584 struct mem_dqinfo *mi;
2587 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2588 if (!sb_has_quota_active(sb, type)) {
2592 mi = sb_dqopt(sb)->info + type;
2593 spin_lock(&dq_data_lock);
2594 if (ii->dqi_valid & IIF_BGRACE)
2595 mi->dqi_bgrace = ii->dqi_bgrace;
2596 if (ii->dqi_valid & IIF_IGRACE)
2597 mi->dqi_igrace = ii->dqi_igrace;
2598 if (ii->dqi_valid & IIF_FLAGS)
2599 mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
2600 (ii->dqi_flags & DQF_SETINFO_MASK);
2601 spin_unlock(&dq_data_lock);
2602 mark_info_dirty(sb, type);
2603 /* Force write to disk */
2604 sb->dq_op->write_info(sb, type);
2606 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2609 EXPORT_SYMBOL(dquot_set_dqinfo);
2611 const struct quotactl_ops dquot_quotactl_ops = {
2612 .quota_on = dquot_quota_on,
2613 .quota_off = dquot_quota_off,
2614 .quota_sync = dquot_quota_sync,
2615 .get_info = dquot_get_dqinfo,
2616 .set_info = dquot_set_dqinfo,
2617 .get_dqblk = dquot_get_dqblk,
2618 .set_dqblk = dquot_set_dqblk
2620 EXPORT_SYMBOL(dquot_quotactl_ops);
2622 static int do_proc_dqstats(struct ctl_table *table, int write,
2623 void __user *buffer, size_t *lenp, loff_t *ppos)
2625 unsigned int type = (int *)table->data - dqstats.stat;
2627 /* Update global table */
2628 dqstats.stat[type] =
2629 percpu_counter_sum_positive(&dqstats.counter[type]);
2630 return proc_dointvec(table, write, buffer, lenp, ppos);
2633 static struct ctl_table fs_dqstats_table[] = {
2635 .procname = "lookups",
2636 .data = &dqstats.stat[DQST_LOOKUPS],
2637 .maxlen = sizeof(int),
2639 .proc_handler = do_proc_dqstats,
2642 .procname = "drops",
2643 .data = &dqstats.stat[DQST_DROPS],
2644 .maxlen = sizeof(int),
2646 .proc_handler = do_proc_dqstats,
2649 .procname = "reads",
2650 .data = &dqstats.stat[DQST_READS],
2651 .maxlen = sizeof(int),
2653 .proc_handler = do_proc_dqstats,
2656 .procname = "writes",
2657 .data = &dqstats.stat[DQST_WRITES],
2658 .maxlen = sizeof(int),
2660 .proc_handler = do_proc_dqstats,
2663 .procname = "cache_hits",
2664 .data = &dqstats.stat[DQST_CACHE_HITS],
2665 .maxlen = sizeof(int),
2667 .proc_handler = do_proc_dqstats,
2670 .procname = "allocated_dquots",
2671 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2672 .maxlen = sizeof(int),
2674 .proc_handler = do_proc_dqstats,
2677 .procname = "free_dquots",
2678 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2679 .maxlen = sizeof(int),
2681 .proc_handler = do_proc_dqstats,
2684 .procname = "syncs",
2685 .data = &dqstats.stat[DQST_SYNCS],
2686 .maxlen = sizeof(int),
2688 .proc_handler = do_proc_dqstats,
2690 #ifdef CONFIG_PRINT_QUOTA_WARNING
2692 .procname = "warnings",
2693 .data = &flag_print_warnings,
2694 .maxlen = sizeof(int),
2696 .proc_handler = proc_dointvec,
2702 static struct ctl_table fs_table[] = {
2704 .procname = "quota",
2706 .child = fs_dqstats_table,
2711 static struct ctl_table sys_table[] = {
2720 static int __init dquot_init(void)
2723 unsigned long nr_hash, order;
2725 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2727 register_sysctl_table(sys_table);
2729 dquot_cachep = kmem_cache_create("dquot",
2730 sizeof(struct dquot), sizeof(unsigned long) * 4,
2731 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2732 SLAB_MEM_SPREAD|SLAB_PANIC),
2736 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2738 panic("Cannot create dquot hash table");
2740 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2741 ret = percpu_counter_init(&dqstats.counter[i], 0);
2743 panic("Cannot create dquot stat counters");
2746 /* Find power-of-two hlist_heads which can fit into allocation */
2747 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2751 } while (nr_hash >> dq_hash_bits);
2754 nr_hash = 1UL << dq_hash_bits;
2755 dq_hash_mask = nr_hash - 1;
2756 for (i = 0; i < nr_hash; i++)
2757 INIT_HLIST_HEAD(dquot_hash + i);
2759 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2760 nr_hash, order, (PAGE_SIZE << order));
2762 register_shrinker(&dqcache_shrinker);
2766 module_init(dquot_init);