4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include "../include/obd_class.h"
44 #include "../include/obd_support.h"
45 #include "../include/lustre_fid.h"
46 #include <linux/list.h>
47 #include "../include/cl_object.h"
48 #include "cl_internal.h"
50 /** Lock class of cl_lock::cll_guard */
51 static struct lock_class_key cl_lock_guard_class;
52 static struct kmem_cache *cl_lock_kmem;
54 static struct lu_kmem_descr cl_lock_caches[] = {
56 .ckd_cache = &cl_lock_kmem,
57 .ckd_name = "cl_lock_kmem",
58 .ckd_size = sizeof (struct cl_lock)
65 #define CS_LOCK_INC(o, item)
66 #define CS_LOCK_DEC(o, item)
67 #define CS_LOCKSTATE_INC(o, state)
68 #define CS_LOCKSTATE_DEC(o, state)
71 * Basic lock invariant that is maintained at all times. Caller either has a
72 * reference to \a lock, or somehow assures that \a lock cannot be freed.
74 * \see cl_lock_invariant()
76 static int cl_lock_invariant_trusted(const struct lu_env *env,
77 const struct cl_lock *lock)
79 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81 lock->cll_holds >= lock->cll_users &&
82 lock->cll_holds >= 0 &&
83 lock->cll_users >= 0 &&
88 * Stronger lock invariant, checking that caller has a reference on a lock.
90 * \see cl_lock_invariant_trusted()
92 static int cl_lock_invariant(const struct lu_env *env,
93 const struct cl_lock *lock)
97 result = atomic_read(&lock->cll_ref) > 0 &&
98 cl_lock_invariant_trusted(env, lock);
99 if (!result && env != NULL)
100 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
105 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
107 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
109 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
113 * Returns a set of counters for this lock, depending on a lock nesting.
115 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
116 const struct cl_lock *lock)
118 struct cl_thread_info *info;
119 enum clt_nesting_level nesting;
121 info = cl_env_info(env);
122 nesting = cl_lock_nesting(lock);
123 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
124 return &info->clt_counters[nesting];
127 static void cl_lock_trace0(int level, const struct lu_env *env,
128 const char *prefix, const struct cl_lock *lock,
129 const char *func, const int line)
131 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
132 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
133 prefix, lock, atomic_read(&lock->cll_ref),
134 lock->cll_guarder, lock->cll_depth,
135 lock->cll_state, lock->cll_error, lock->cll_holds,
136 lock->cll_users, lock->cll_flags,
137 env, h->coh_nesting, cl_lock_nr_mutexed(env),
140 #define cl_lock_trace(level, env, prefix, lock) \
141 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
143 #define RETIP ((unsigned long)__builtin_return_address(0))
145 #ifdef CONFIG_LOCKDEP
146 static struct lock_class_key cl_lock_key;
148 static void cl_lock_lockdep_init(struct cl_lock *lock)
150 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
153 static void cl_lock_lockdep_acquire(const struct lu_env *env,
154 struct cl_lock *lock, __u32 enqflags)
156 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
157 lock_map_acquire(&lock->dep_map);
160 static void cl_lock_lockdep_release(const struct lu_env *env,
161 struct cl_lock *lock)
163 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
164 lock_release(&lock->dep_map, 0, RETIP);
167 #else /* !CONFIG_LOCKDEP */
169 static void cl_lock_lockdep_init(struct cl_lock *lock)
171 static void cl_lock_lockdep_acquire(const struct lu_env *env,
172 struct cl_lock *lock, __u32 enqflags)
174 static void cl_lock_lockdep_release(const struct lu_env *env,
175 struct cl_lock *lock)
178 #endif /* !CONFIG_LOCKDEP */
181 * Adds lock slice to the compound lock.
183 * This is called by cl_object_operations::coo_lock_init() methods to add a
184 * per-layer state to the lock. New state is added at the end of
185 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
187 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
189 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
190 struct cl_object *obj,
191 const struct cl_lock_operations *ops)
193 slice->cls_lock = lock;
194 list_add_tail(&slice->cls_linkage, &lock->cll_layers);
195 slice->cls_obj = obj;
196 slice->cls_ops = ops;
198 EXPORT_SYMBOL(cl_lock_slice_add);
201 * Returns true iff a lock with the mode \a has provides at least the same
202 * guarantees as a lock with the mode \a need.
204 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
206 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
207 need == CLM_PHANTOM || need == CLM_GROUP);
208 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
209 has == CLM_PHANTOM || has == CLM_GROUP);
210 CLASSERT(CLM_PHANTOM < CLM_READ);
211 CLASSERT(CLM_READ < CLM_WRITE);
212 CLASSERT(CLM_WRITE < CLM_GROUP);
214 if (has != CLM_GROUP)
219 EXPORT_SYMBOL(cl_lock_mode_match);
222 * Returns true iff extent portions of lock descriptions match.
224 int cl_lock_ext_match(const struct cl_lock_descr *has,
225 const struct cl_lock_descr *need)
228 has->cld_start <= need->cld_start &&
229 has->cld_end >= need->cld_end &&
230 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
231 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
233 EXPORT_SYMBOL(cl_lock_ext_match);
236 * Returns true iff a lock with the description \a has provides at least the
237 * same guarantees as a lock with the description \a need.
239 int cl_lock_descr_match(const struct cl_lock_descr *has,
240 const struct cl_lock_descr *need)
243 cl_object_same(has->cld_obj, need->cld_obj) &&
244 cl_lock_ext_match(has, need);
246 EXPORT_SYMBOL(cl_lock_descr_match);
248 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
250 struct cl_object *obj = lock->cll_descr.cld_obj;
252 LINVRNT(!cl_lock_is_mutexed(lock));
254 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
256 while (!list_empty(&lock->cll_layers)) {
257 struct cl_lock_slice *slice;
259 slice = list_entry(lock->cll_layers.next,
260 struct cl_lock_slice, cls_linkage);
261 list_del_init(lock->cll_layers.next);
262 slice->cls_ops->clo_fini(env, slice);
264 CS_LOCK_DEC(obj, total);
265 CS_LOCKSTATE_DEC(obj, lock->cll_state);
266 lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
267 cl_object_put(env, obj);
268 lu_ref_fini(&lock->cll_reference);
269 lu_ref_fini(&lock->cll_holders);
270 mutex_destroy(&lock->cll_guard);
271 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
275 * Releases a reference on a lock.
277 * When last reference is released, lock is returned to the cache, unless it
278 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
281 * \see cl_object_put(), cl_page_put()
283 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
285 struct cl_object *obj;
287 LINVRNT(cl_lock_invariant(env, lock));
288 obj = lock->cll_descr.cld_obj;
289 LINVRNT(obj != NULL);
291 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
292 atomic_read(&lock->cll_ref), lock, RETIP);
294 if (atomic_dec_and_test(&lock->cll_ref)) {
295 if (lock->cll_state == CLS_FREEING) {
296 LASSERT(list_empty(&lock->cll_linkage));
297 cl_lock_free(env, lock);
299 CS_LOCK_DEC(obj, busy);
302 EXPORT_SYMBOL(cl_lock_put);
305 * Acquires an additional reference to a lock.
307 * This can be called only by caller already possessing a reference to \a
310 * \see cl_object_get(), cl_page_get()
312 void cl_lock_get(struct cl_lock *lock)
314 LINVRNT(cl_lock_invariant(NULL, lock));
315 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
316 atomic_read(&lock->cll_ref), lock, RETIP);
317 atomic_inc(&lock->cll_ref);
319 EXPORT_SYMBOL(cl_lock_get);
322 * Acquires a reference to a lock.
324 * This is much like cl_lock_get(), except that this function can be used to
325 * acquire initial reference to the cached lock. Caller has to deal with all
326 * possible races. Use with care!
328 * \see cl_page_get_trust()
330 void cl_lock_get_trust(struct cl_lock *lock)
332 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
333 atomic_read(&lock->cll_ref), lock, RETIP);
334 if (atomic_inc_return(&lock->cll_ref) == 1)
335 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
337 EXPORT_SYMBOL(cl_lock_get_trust);
340 * Helper function destroying the lock that wasn't completely initialized.
342 * Other threads can acquire references to the top-lock through its
343 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
345 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
347 cl_lock_mutex_get(env, lock);
348 cl_lock_cancel(env, lock);
349 cl_lock_delete(env, lock);
350 cl_lock_mutex_put(env, lock);
351 cl_lock_put(env, lock);
354 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
355 struct cl_object *obj,
356 const struct cl_io *io,
357 const struct cl_lock_descr *descr)
359 struct cl_lock *lock;
360 struct lu_object_header *head;
362 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS);
364 atomic_set(&lock->cll_ref, 1);
365 lock->cll_descr = *descr;
366 lock->cll_state = CLS_NEW;
368 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
370 INIT_LIST_HEAD(&lock->cll_layers);
371 INIT_LIST_HEAD(&lock->cll_linkage);
372 INIT_LIST_HEAD(&lock->cll_inclosure);
373 lu_ref_init(&lock->cll_reference);
374 lu_ref_init(&lock->cll_holders);
375 mutex_init(&lock->cll_guard);
376 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
377 init_waitqueue_head(&lock->cll_wq);
378 head = obj->co_lu.lo_header;
379 CS_LOCKSTATE_INC(obj, CLS_NEW);
380 CS_LOCK_INC(obj, total);
381 CS_LOCK_INC(obj, create);
382 cl_lock_lockdep_init(lock);
383 list_for_each_entry(obj, &head->loh_layers,
387 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
389 cl_lock_finish(env, lock);
395 lock = ERR_PTR(-ENOMEM);
400 * Transfer the lock into INTRANSIT state and return the original state.
402 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
403 * \post state: CLS_INTRANSIT
406 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
407 struct cl_lock *lock)
409 enum cl_lock_state state = lock->cll_state;
411 LASSERT(cl_lock_is_mutexed(lock));
412 LASSERT(state != CLS_INTRANSIT);
413 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
414 "Malformed lock state %d.\n", state);
416 cl_lock_state_set(env, lock, CLS_INTRANSIT);
417 lock->cll_intransit_owner = current;
418 cl_lock_hold_add(env, lock, "intransit", current);
421 EXPORT_SYMBOL(cl_lock_intransit);
424 * Exit the intransit state and restore the lock state to the original state
426 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
427 enum cl_lock_state state)
429 LASSERT(cl_lock_is_mutexed(lock));
430 LASSERT(lock->cll_state == CLS_INTRANSIT);
431 LASSERT(state != CLS_INTRANSIT);
432 LASSERT(lock->cll_intransit_owner == current);
434 lock->cll_intransit_owner = NULL;
435 cl_lock_state_set(env, lock, state);
436 cl_lock_unhold(env, lock, "intransit", current);
438 EXPORT_SYMBOL(cl_lock_extransit);
441 * Checking whether the lock is intransit state
443 int cl_lock_is_intransit(struct cl_lock *lock)
445 LASSERT(cl_lock_is_mutexed(lock));
446 return lock->cll_state == CLS_INTRANSIT &&
447 lock->cll_intransit_owner != current;
449 EXPORT_SYMBOL(cl_lock_is_intransit);
451 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
452 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
453 * cover multiple stripes and can trigger cascading timeouts.
455 static int cl_lock_fits_into(const struct lu_env *env,
456 const struct cl_lock *lock,
457 const struct cl_lock_descr *need,
458 const struct cl_io *io)
460 const struct cl_lock_slice *slice;
462 LINVRNT(cl_lock_invariant_trusted(env, lock));
463 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
464 if (slice->cls_ops->clo_fits_into != NULL &&
465 !slice->cls_ops->clo_fits_into(env, slice, need, io))
471 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
472 struct cl_object *obj,
473 const struct cl_io *io,
474 const struct cl_lock_descr *need)
476 struct cl_lock *lock;
477 struct cl_object_header *head;
479 head = cl_object_header(obj);
480 assert_spin_locked(&head->coh_lock_guard);
481 CS_LOCK_INC(obj, lookup);
482 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
485 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
486 lock->cll_state < CLS_FREEING &&
487 lock->cll_error == 0 &&
488 !(lock->cll_flags & CLF_CANCELLED) &&
489 cl_lock_fits_into(env, lock, need, io);
490 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
491 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
494 cl_lock_get_trust(lock);
495 CS_LOCK_INC(obj, hit);
503 * Returns a lock matching description \a need.
505 * This is the main entry point into the cl_lock caching interface. First, a
506 * cache (implemented as a per-object linked list) is consulted. If lock is
507 * found there, it is returned immediately. Otherwise new lock is allocated
508 * and returned. In any case, additional reference to lock is acquired.
510 * \see cl_object_find(), cl_page_find()
512 static struct cl_lock *cl_lock_find(const struct lu_env *env,
513 const struct cl_io *io,
514 const struct cl_lock_descr *need)
516 struct cl_object_header *head;
517 struct cl_object *obj;
518 struct cl_lock *lock;
521 head = cl_object_header(obj);
523 spin_lock(&head->coh_lock_guard);
524 lock = cl_lock_lookup(env, obj, io, need);
525 spin_unlock(&head->coh_lock_guard);
528 lock = cl_lock_alloc(env, obj, io, need);
530 struct cl_lock *ghost;
532 spin_lock(&head->coh_lock_guard);
533 ghost = cl_lock_lookup(env, obj, io, need);
535 cl_lock_get_trust(lock);
536 list_add_tail(&lock->cll_linkage,
538 spin_unlock(&head->coh_lock_guard);
539 CS_LOCK_INC(obj, busy);
541 spin_unlock(&head->coh_lock_guard);
543 * Other threads can acquire references to the
544 * top-lock through its sub-locks. Hence, it
545 * cannot be cl_lock_free()-ed immediately.
547 cl_lock_finish(env, lock);
556 * Returns existing lock matching given description. This is similar to
557 * cl_lock_find() except that no new lock is created, and returned lock is
558 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
560 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
561 const struct cl_lock_descr *need,
562 const char *scope, const void *source)
564 struct cl_object_header *head;
565 struct cl_object *obj;
566 struct cl_lock *lock;
569 head = cl_object_header(obj);
572 spin_lock(&head->coh_lock_guard);
573 lock = cl_lock_lookup(env, obj, io, need);
574 spin_unlock(&head->coh_lock_guard);
578 cl_lock_mutex_get(env, lock);
579 if (lock->cll_state == CLS_INTRANSIT)
580 /* Don't care return value. */
581 cl_lock_state_wait(env, lock);
582 if (lock->cll_state == CLS_FREEING) {
583 cl_lock_mutex_put(env, lock);
584 cl_lock_put(env, lock);
587 } while (lock == NULL);
589 cl_lock_hold_add(env, lock, scope, source);
590 cl_lock_user_add(env, lock);
591 if (lock->cll_state == CLS_CACHED)
592 cl_use_try(env, lock, 1);
593 if (lock->cll_state == CLS_HELD) {
594 cl_lock_mutex_put(env, lock);
595 cl_lock_lockdep_acquire(env, lock, 0);
596 cl_lock_put(env, lock);
598 cl_unuse_try(env, lock);
599 cl_lock_unhold(env, lock, scope, source);
600 cl_lock_mutex_put(env, lock);
601 cl_lock_put(env, lock);
607 EXPORT_SYMBOL(cl_lock_peek);
610 * Returns a slice within a lock, corresponding to the given layer in the
615 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
616 const struct lu_device_type *dtype)
618 const struct cl_lock_slice *slice;
620 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
622 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
623 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
628 EXPORT_SYMBOL(cl_lock_at);
630 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
632 struct cl_thread_counters *counters;
634 counters = cl_lock_counters(env, lock);
636 counters->ctc_nr_locks_locked++;
637 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
638 cl_lock_trace(D_TRACE, env, "got mutex", lock);
642 * Locks cl_lock object.
644 * This is used to manipulate cl_lock fields, and to serialize state
645 * transitions in the lock state machine.
647 * \post cl_lock_is_mutexed(lock)
649 * \see cl_lock_mutex_put()
651 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
653 LINVRNT(cl_lock_invariant(env, lock));
655 if (lock->cll_guarder == current) {
656 LINVRNT(cl_lock_is_mutexed(lock));
657 LINVRNT(lock->cll_depth > 0);
659 struct cl_object_header *hdr;
660 struct cl_thread_info *info;
663 LINVRNT(lock->cll_guarder != current);
664 hdr = cl_object_header(lock->cll_descr.cld_obj);
666 * Check that mutices are taken in the bottom-to-top order.
668 info = cl_env_info(env);
669 for (i = 0; i < hdr->coh_nesting; ++i)
670 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
671 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
672 lock->cll_guarder = current;
673 LINVRNT(lock->cll_depth == 0);
675 cl_lock_mutex_tail(env, lock);
677 EXPORT_SYMBOL(cl_lock_mutex_get);
680 * Try-locks cl_lock object.
682 * \retval 0 \a lock was successfully locked
684 * \retval -EBUSY \a lock cannot be locked right now
686 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
688 * \see cl_lock_mutex_get()
690 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
694 LINVRNT(cl_lock_invariant_trusted(env, lock));
697 if (lock->cll_guarder == current) {
698 LINVRNT(lock->cll_depth > 0);
699 cl_lock_mutex_tail(env, lock);
700 } else if (mutex_trylock(&lock->cll_guard)) {
701 LINVRNT(lock->cll_depth == 0);
702 lock->cll_guarder = current;
703 cl_lock_mutex_tail(env, lock);
708 EXPORT_SYMBOL(cl_lock_mutex_try);
711 {* Unlocks cl_lock object.
713 * \pre cl_lock_is_mutexed(lock)
715 * \see cl_lock_mutex_get()
717 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
719 struct cl_thread_counters *counters;
721 LINVRNT(cl_lock_invariant(env, lock));
722 LINVRNT(cl_lock_is_mutexed(lock));
723 LINVRNT(lock->cll_guarder == current);
724 LINVRNT(lock->cll_depth > 0);
726 counters = cl_lock_counters(env, lock);
727 LINVRNT(counters->ctc_nr_locks_locked > 0);
729 cl_lock_trace(D_TRACE, env, "put mutex", lock);
730 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
731 counters->ctc_nr_locks_locked--;
732 if (--lock->cll_depth == 0) {
733 lock->cll_guarder = NULL;
734 mutex_unlock(&lock->cll_guard);
737 EXPORT_SYMBOL(cl_lock_mutex_put);
740 * Returns true iff lock's mutex is owned by the current thread.
742 int cl_lock_is_mutexed(struct cl_lock *lock)
744 return lock->cll_guarder == current;
746 EXPORT_SYMBOL(cl_lock_is_mutexed);
749 * Returns number of cl_lock mutices held by the current thread (environment).
751 int cl_lock_nr_mutexed(const struct lu_env *env)
753 struct cl_thread_info *info;
758 * NOTE: if summation across all nesting levels (currently 2) proves
759 * too expensive, a summary counter can be added to
760 * struct cl_thread_info.
762 info = cl_env_info(env);
763 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
764 locked += info->clt_counters[i].ctc_nr_locks_locked;
767 EXPORT_SYMBOL(cl_lock_nr_mutexed);
769 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
771 LINVRNT(cl_lock_is_mutexed(lock));
772 LINVRNT(cl_lock_invariant(env, lock));
773 if (!(lock->cll_flags & CLF_CANCELLED)) {
774 const struct cl_lock_slice *slice;
776 lock->cll_flags |= CLF_CANCELLED;
777 list_for_each_entry_reverse(slice, &lock->cll_layers,
779 if (slice->cls_ops->clo_cancel != NULL)
780 slice->cls_ops->clo_cancel(env, slice);
785 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
787 struct cl_object_header *head;
788 const struct cl_lock_slice *slice;
790 LINVRNT(cl_lock_is_mutexed(lock));
791 LINVRNT(cl_lock_invariant(env, lock));
793 if (lock->cll_state < CLS_FREEING) {
796 LASSERT(lock->cll_state != CLS_INTRANSIT);
797 cl_lock_state_set(env, lock, CLS_FREEING);
799 head = cl_object_header(lock->cll_descr.cld_obj);
801 spin_lock(&head->coh_lock_guard);
802 in_cache = !list_empty(&lock->cll_linkage);
804 list_del_init(&lock->cll_linkage);
805 spin_unlock(&head->coh_lock_guard);
807 if (in_cache) /* coh_locks cache holds a refcount. */
808 cl_lock_put(env, lock);
811 * From now on, no new references to this lock can be acquired
812 * by cl_lock_lookup().
814 list_for_each_entry_reverse(slice, &lock->cll_layers,
816 if (slice->cls_ops->clo_delete != NULL)
817 slice->cls_ops->clo_delete(env, slice);
820 * From now on, no new references to this lock can be acquired
821 * by layer-specific means (like a pointer from struct
822 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
825 * Lock will be finally freed in cl_lock_put() when last of
826 * existing references goes away.
832 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
833 * top-lock (nesting == 0) accounts for this modification in the per-thread
834 * debugging counters. Sub-lock holds can be released by a thread different
835 * from one that acquired it.
837 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
840 struct cl_thread_counters *counters;
841 enum clt_nesting_level nesting;
843 lock->cll_holds += delta;
844 nesting = cl_lock_nesting(lock);
845 if (nesting == CNL_TOP) {
846 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
847 counters->ctc_nr_held += delta;
848 LASSERT(counters->ctc_nr_held >= 0);
853 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
854 * cl_lock_hold_mod() for the explanation of the debugging code.
856 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
859 struct cl_thread_counters *counters;
860 enum clt_nesting_level nesting;
862 lock->cll_users += delta;
863 nesting = cl_lock_nesting(lock);
864 if (nesting == CNL_TOP) {
865 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
866 counters->ctc_nr_used += delta;
867 LASSERT(counters->ctc_nr_used >= 0);
871 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
872 const char *scope, const void *source)
874 LINVRNT(cl_lock_is_mutexed(lock));
875 LINVRNT(cl_lock_invariant(env, lock));
876 LASSERT(lock->cll_holds > 0);
878 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
879 lu_ref_del(&lock->cll_holders, scope, source);
880 cl_lock_hold_mod(env, lock, -1);
881 if (lock->cll_holds == 0) {
882 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
883 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
884 lock->cll_descr.cld_mode == CLM_GROUP ||
885 lock->cll_state != CLS_CACHED)
887 * If lock is still phantom or grouplock when user is
888 * done with it---destroy the lock.
890 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
891 if (lock->cll_flags & CLF_CANCELPEND) {
892 lock->cll_flags &= ~CLF_CANCELPEND;
893 cl_lock_cancel0(env, lock);
895 if (lock->cll_flags & CLF_DOOMED) {
896 /* no longer doomed: it's dead... Jim. */
897 lock->cll_flags &= ~CLF_DOOMED;
898 cl_lock_delete0(env, lock);
902 EXPORT_SYMBOL(cl_lock_hold_release);
905 * Waits until lock state is changed.
907 * This function is called with cl_lock mutex locked, atomically releases
908 * mutex and goes to sleep, waiting for a lock state change (signaled by
909 * cl_lock_signal()), and re-acquires the mutex before return.
911 * This function is used to wait until lock state machine makes some progress
912 * and to emulate synchronous operations on top of asynchronous lock
915 * \retval -EINTR wait was interrupted
917 * \retval 0 wait wasn't interrupted
919 * \pre cl_lock_is_mutexed(lock)
921 * \see cl_lock_signal()
923 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
929 LINVRNT(cl_lock_is_mutexed(lock));
930 LINVRNT(cl_lock_invariant(env, lock));
931 LASSERT(lock->cll_depth == 1);
932 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
934 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
935 result = lock->cll_error;
937 /* To avoid being interrupted by the 'non-fatal' signals
938 * (SIGCHLD, for instance), we'd block them temporarily.
940 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
942 init_waitqueue_entry(&waiter, current);
943 add_wait_queue(&lock->cll_wq, &waiter);
944 set_current_state(TASK_INTERRUPTIBLE);
945 cl_lock_mutex_put(env, lock);
947 LASSERT(cl_lock_nr_mutexed(env) == 0);
949 /* Returning ERESTARTSYS instead of EINTR so syscalls
950 * can be restarted if signals are pending here */
951 result = -ERESTARTSYS;
952 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
954 if (!cfs_signal_pending())
958 cl_lock_mutex_get(env, lock);
959 set_current_state(TASK_RUNNING);
960 remove_wait_queue(&lock->cll_wq, &waiter);
962 /* Restore old blocked signals */
963 cfs_restore_sigs(blocked);
967 EXPORT_SYMBOL(cl_lock_state_wait);
969 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
970 enum cl_lock_state state)
972 const struct cl_lock_slice *slice;
974 LINVRNT(cl_lock_is_mutexed(lock));
975 LINVRNT(cl_lock_invariant(env, lock));
977 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
978 if (slice->cls_ops->clo_state != NULL)
979 slice->cls_ops->clo_state(env, slice, state);
980 wake_up_all(&lock->cll_wq);
984 * Notifies waiters that lock state changed.
986 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
987 * layers about state change by calling cl_lock_operations::clo_state()
990 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
992 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
993 cl_lock_state_signal(env, lock, lock->cll_state);
995 EXPORT_SYMBOL(cl_lock_signal);
998 * Changes lock state.
1000 * This function is invoked to notify layers that lock state changed, possible
1001 * as a result of an asynchronous event such as call-back reception.
1003 * \post lock->cll_state == state
1005 * \see cl_lock_operations::clo_state()
1007 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1008 enum cl_lock_state state)
1010 LASSERT(lock->cll_state <= state ||
1011 (lock->cll_state == CLS_CACHED &&
1012 (state == CLS_HELD || /* lock found in cache */
1013 state == CLS_NEW || /* sub-lock canceled */
1014 state == CLS_INTRANSIT)) ||
1015 /* lock is in transit state */
1016 lock->cll_state == CLS_INTRANSIT);
1018 if (lock->cll_state != state) {
1019 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1020 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1022 cl_lock_state_signal(env, lock, state);
1023 lock->cll_state = state;
1026 EXPORT_SYMBOL(cl_lock_state_set);
1028 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1030 const struct cl_lock_slice *slice;
1036 LINVRNT(cl_lock_is_mutexed(lock));
1037 LINVRNT(cl_lock_invariant(env, lock));
1038 LASSERT(lock->cll_state == CLS_INTRANSIT);
1041 list_for_each_entry_reverse(slice, &lock->cll_layers,
1043 if (slice->cls_ops->clo_unuse != NULL) {
1044 result = slice->cls_ops->clo_unuse(env, slice);
1049 LASSERT(result != -ENOSYS);
1050 } while (result == CLO_REPEAT);
1056 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1057 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1058 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1059 * use process atomic
1061 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1063 const struct cl_lock_slice *slice;
1065 enum cl_lock_state state;
1067 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1069 LASSERT(lock->cll_state == CLS_CACHED);
1070 if (lock->cll_error)
1071 return lock->cll_error;
1074 state = cl_lock_intransit(env, lock);
1075 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1076 if (slice->cls_ops->clo_use != NULL) {
1077 result = slice->cls_ops->clo_use(env, slice);
1082 LASSERT(result != -ENOSYS);
1084 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1090 if (result == -ESTALE) {
1092 * ESTALE means sublock being cancelled
1093 * at this time, and set lock state to
1094 * be NEW here and ask the caller to repeat.
1097 result = CLO_REPEAT;
1100 /* @atomic means back-off-on-failure. */
1103 rc = cl_unuse_try_internal(env, lock);
1104 /* Vet the results. */
1105 if (rc < 0 && result > 0)
1110 cl_lock_extransit(env, lock, state);
1113 EXPORT_SYMBOL(cl_use_try);
1116 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1119 static int cl_enqueue_kick(const struct lu_env *env,
1120 struct cl_lock *lock,
1121 struct cl_io *io, __u32 flags)
1124 const struct cl_lock_slice *slice;
1127 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1128 if (slice->cls_ops->clo_enqueue != NULL) {
1129 result = slice->cls_ops->clo_enqueue(env,
1135 LASSERT(result != -ENOSYS);
1140 * Tries to enqueue a lock.
1142 * This function is called repeatedly by cl_enqueue() until either lock is
1143 * enqueued, or error occurs. This function does not block waiting for
1144 * networking communication to complete.
1146 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1147 * lock->cll_state == CLS_HELD)
1149 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1150 * \see cl_lock_state::CLS_ENQUEUED
1152 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1153 struct cl_io *io, __u32 flags)
1157 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1159 LINVRNT(cl_lock_is_mutexed(lock));
1161 result = lock->cll_error;
1165 switch (lock->cll_state) {
1167 cl_lock_state_set(env, lock, CLS_QUEUING);
1171 result = cl_enqueue_kick(env, lock, io, flags);
1172 /* For AGL case, the cl_lock::cll_state may
1173 * become CLS_HELD already. */
1174 if (result == 0 && lock->cll_state == CLS_QUEUING)
1175 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1178 LASSERT(cl_lock_is_intransit(lock));
1182 /* yank lock from the cache. */
1183 result = cl_use_try(env, lock, 0);
1192 * impossible, only held locks with increased
1193 * ->cll_holds can be enqueued, and they cannot be
1198 } while (result == CLO_REPEAT);
1201 EXPORT_SYMBOL(cl_enqueue_try);
1204 * Cancel the conflicting lock found during previous enqueue.
1206 * \retval 0 conflicting lock has been canceled.
1207 * \retval -ve error code.
1209 int cl_lock_enqueue_wait(const struct lu_env *env,
1210 struct cl_lock *lock,
1213 struct cl_lock *conflict;
1216 LASSERT(cl_lock_is_mutexed(lock));
1217 LASSERT(lock->cll_state == CLS_QUEUING);
1218 LASSERT(lock->cll_conflict != NULL);
1220 conflict = lock->cll_conflict;
1221 lock->cll_conflict = NULL;
1223 cl_lock_mutex_put(env, lock);
1224 LASSERT(cl_lock_nr_mutexed(env) == 0);
1226 cl_lock_mutex_get(env, conflict);
1227 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1228 cl_lock_cancel(env, conflict);
1229 cl_lock_delete(env, conflict);
1231 while (conflict->cll_state != CLS_FREEING) {
1232 rc = cl_lock_state_wait(env, conflict);
1236 cl_lock_mutex_put(env, conflict);
1237 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1238 cl_lock_put(env, conflict);
1241 cl_lock_mutex_get(env, lock);
1246 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1248 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1249 struct cl_io *io, __u32 enqflags)
1253 LINVRNT(cl_lock_is_mutexed(lock));
1254 LINVRNT(cl_lock_invariant(env, lock));
1255 LASSERT(lock->cll_holds > 0);
1257 cl_lock_user_add(env, lock);
1259 result = cl_enqueue_try(env, lock, io, enqflags);
1260 if (result == CLO_WAIT) {
1261 if (lock->cll_conflict != NULL)
1262 result = cl_lock_enqueue_wait(env, lock, 1);
1264 result = cl_lock_state_wait(env, lock);
1271 cl_unuse_try(env, lock);
1272 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1273 lock->cll_state == CLS_ENQUEUED ||
1274 lock->cll_state == CLS_HELD));
1279 * Tries to unlock a lock.
1281 * This function is called to release underlying resource:
1282 * 1. for top lock, the resource is sublocks it held;
1283 * 2. for sublock, the resource is the reference to dlmlock.
1285 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1287 * \see cl_unuse() cl_lock_operations::clo_unuse()
1288 * \see cl_lock_state::CLS_CACHED
1290 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1293 enum cl_lock_state state = CLS_NEW;
1295 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1297 if (lock->cll_users > 1) {
1298 cl_lock_user_del(env, lock);
1302 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1303 * underlying resources. */
1304 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1305 cl_lock_user_del(env, lock);
1310 * New lock users (->cll_users) are not protecting unlocking
1311 * from proceeding. From this point, lock eventually reaches
1312 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1315 state = cl_lock_intransit(env, lock);
1317 result = cl_unuse_try_internal(env, lock);
1318 LASSERT(lock->cll_state == CLS_INTRANSIT);
1319 LASSERT(result != CLO_WAIT);
1320 cl_lock_user_del(env, lock);
1321 if (result == 0 || result == -ESTALE) {
1323 * Return lock back to the cache. This is the only
1324 * place where lock is moved into CLS_CACHED state.
1326 * If one of ->clo_unuse() methods returned -ESTALE, lock
1327 * cannot be placed into cache and has to be
1328 * re-initialized. This happens e.g., when a sub-lock was
1329 * canceled while unlocking was in progress.
1331 if (state == CLS_HELD && result == 0)
1335 cl_lock_extransit(env, lock, state);
1338 * Hide -ESTALE error.
1339 * If the lock is a glimpse lock, and it has multiple
1340 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1341 * and other sublocks are matched write locks. In this case,
1342 * we can't set this lock to error because otherwise some of
1343 * its sublocks may not be canceled. This causes some dirty
1344 * pages won't be written to OSTs. -jay
1348 CERROR("result = %d, this is unlikely!\n", result);
1350 cl_lock_extransit(env, lock, state);
1352 return result ?: lock->cll_error;
1354 EXPORT_SYMBOL(cl_unuse_try);
1356 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1360 result = cl_unuse_try(env, lock);
1362 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1368 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1370 cl_lock_mutex_get(env, lock);
1371 cl_unuse_locked(env, lock);
1372 cl_lock_mutex_put(env, lock);
1373 cl_lock_lockdep_release(env, lock);
1375 EXPORT_SYMBOL(cl_unuse);
1378 * Tries to wait for a lock.
1380 * This function is called repeatedly by cl_wait() until either lock is
1381 * granted, or error occurs. This function does not block waiting for network
1382 * communication to complete.
1384 * \see cl_wait() cl_lock_operations::clo_wait()
1385 * \see cl_lock_state::CLS_HELD
1387 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1389 const struct cl_lock_slice *slice;
1392 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1394 LINVRNT(cl_lock_is_mutexed(lock));
1395 LINVRNT(cl_lock_invariant(env, lock));
1396 LASSERTF(lock->cll_state == CLS_QUEUING ||
1397 lock->cll_state == CLS_ENQUEUED ||
1398 lock->cll_state == CLS_HELD ||
1399 lock->cll_state == CLS_INTRANSIT,
1400 "lock state: %d\n", lock->cll_state);
1401 LASSERT(lock->cll_users > 0);
1402 LASSERT(lock->cll_holds > 0);
1404 result = lock->cll_error;
1408 if (cl_lock_is_intransit(lock)) {
1413 if (lock->cll_state == CLS_HELD)
1418 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1419 if (slice->cls_ops->clo_wait != NULL) {
1420 result = slice->cls_ops->clo_wait(env, slice);
1425 LASSERT(result != -ENOSYS);
1427 LASSERT(lock->cll_state != CLS_INTRANSIT);
1428 cl_lock_state_set(env, lock, CLS_HELD);
1430 } while (result == CLO_REPEAT);
1433 EXPORT_SYMBOL(cl_wait_try);
1436 * Waits until enqueued lock is granted.
1438 * \pre current thread or io owns a hold on the lock
1439 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1440 * lock->cll_state == CLS_HELD)
1442 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1444 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1448 cl_lock_mutex_get(env, lock);
1450 LINVRNT(cl_lock_invariant(env, lock));
1451 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1452 "Wrong state %d \n", lock->cll_state);
1453 LASSERT(lock->cll_holds > 0);
1456 result = cl_wait_try(env, lock);
1457 if (result == CLO_WAIT) {
1458 result = cl_lock_state_wait(env, lock);
1465 cl_unuse_try(env, lock);
1466 cl_lock_lockdep_release(env, lock);
1468 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1469 cl_lock_mutex_put(env, lock);
1470 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1473 EXPORT_SYMBOL(cl_wait);
1476 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1479 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1481 const struct cl_lock_slice *slice;
1482 unsigned long pound;
1483 unsigned long ounce;
1485 LINVRNT(cl_lock_is_mutexed(lock));
1486 LINVRNT(cl_lock_invariant(env, lock));
1489 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1490 if (slice->cls_ops->clo_weigh != NULL) {
1491 ounce = slice->cls_ops->clo_weigh(env, slice);
1493 if (pound < ounce) /* over-weight^Wflow */
1499 EXPORT_SYMBOL(cl_lock_weigh);
1502 * Notifies layers that lock description changed.
1504 * The server can grant client a lock different from one that was requested
1505 * (e.g., larger in extent). This method is called when actually granted lock
1506 * description becomes known to let layers to accommodate for changed lock
1509 * \see cl_lock_operations::clo_modify()
1511 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1512 const struct cl_lock_descr *desc)
1514 const struct cl_lock_slice *slice;
1515 struct cl_object *obj = lock->cll_descr.cld_obj;
1516 struct cl_object_header *hdr = cl_object_header(obj);
1519 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1520 /* don't allow object to change */
1521 LASSERT(obj == desc->cld_obj);
1522 LINVRNT(cl_lock_is_mutexed(lock));
1523 LINVRNT(cl_lock_invariant(env, lock));
1525 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1526 if (slice->cls_ops->clo_modify != NULL) {
1527 result = slice->cls_ops->clo_modify(env, slice, desc);
1532 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1533 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1535 * Just replace description in place. Nothing more is needed for
1536 * now. If locks were indexed according to their extent and/or mode,
1537 * that index would have to be updated here.
1539 spin_lock(&hdr->coh_lock_guard);
1540 lock->cll_descr = *desc;
1541 spin_unlock(&hdr->coh_lock_guard);
1544 EXPORT_SYMBOL(cl_lock_modify);
1547 * Initializes lock closure with a given origin.
1549 * \see cl_lock_closure
1551 void cl_lock_closure_init(const struct lu_env *env,
1552 struct cl_lock_closure *closure,
1553 struct cl_lock *origin, int wait)
1555 LINVRNT(cl_lock_is_mutexed(origin));
1556 LINVRNT(cl_lock_invariant(env, origin));
1558 INIT_LIST_HEAD(&closure->clc_list);
1559 closure->clc_origin = origin;
1560 closure->clc_wait = wait;
1561 closure->clc_nr = 0;
1563 EXPORT_SYMBOL(cl_lock_closure_init);
1566 * Builds a closure of \a lock.
1568 * Building of a closure consists of adding initial lock (\a lock) into it,
1569 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1570 * methods might call cl_lock_closure_build() recursively again, adding more
1571 * locks to the closure, etc.
1573 * \see cl_lock_closure
1575 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1576 struct cl_lock_closure *closure)
1578 const struct cl_lock_slice *slice;
1581 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1582 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1584 result = cl_lock_enclosure(env, lock, closure);
1586 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1587 if (slice->cls_ops->clo_closure != NULL) {
1588 result = slice->cls_ops->clo_closure(env, slice,
1596 cl_lock_disclosure(env, closure);
1599 EXPORT_SYMBOL(cl_lock_closure_build);
1602 * Adds new lock to a closure.
1604 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1605 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1606 * until next try-lock is likely to succeed.
1608 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1609 struct cl_lock_closure *closure)
1613 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1614 if (!cl_lock_mutex_try(env, lock)) {
1616 * If lock->cll_inclosure is not empty, lock is already in
1619 if (list_empty(&lock->cll_inclosure)) {
1620 cl_lock_get_trust(lock);
1621 lu_ref_add(&lock->cll_reference, "closure", closure);
1622 list_add(&lock->cll_inclosure, &closure->clc_list);
1625 cl_lock_mutex_put(env, lock);
1628 cl_lock_disclosure(env, closure);
1629 if (closure->clc_wait) {
1630 cl_lock_get_trust(lock);
1631 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1632 cl_lock_mutex_put(env, closure->clc_origin);
1634 LASSERT(cl_lock_nr_mutexed(env) == 0);
1635 cl_lock_mutex_get(env, lock);
1636 cl_lock_mutex_put(env, lock);
1638 cl_lock_mutex_get(env, closure->clc_origin);
1639 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1640 cl_lock_put(env, lock);
1642 result = CLO_REPEAT;
1646 EXPORT_SYMBOL(cl_lock_enclosure);
1648 /** Releases mutices of enclosed locks. */
1649 void cl_lock_disclosure(const struct lu_env *env,
1650 struct cl_lock_closure *closure)
1652 struct cl_lock *scan;
1653 struct cl_lock *temp;
1655 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1656 list_for_each_entry_safe(scan, temp, &closure->clc_list,
1658 list_del_init(&scan->cll_inclosure);
1659 cl_lock_mutex_put(env, scan);
1660 lu_ref_del(&scan->cll_reference, "closure", closure);
1661 cl_lock_put(env, scan);
1664 LASSERT(closure->clc_nr == 0);
1666 EXPORT_SYMBOL(cl_lock_disclosure);
1668 /** Finalizes a closure. */
1669 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1671 LASSERT(closure->clc_nr == 0);
1672 LASSERT(list_empty(&closure->clc_list));
1674 EXPORT_SYMBOL(cl_lock_closure_fini);
1677 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1678 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1679 * destruction until all holds are released. This is called when a decision is
1680 * made to destroy the lock in the future. E.g., when a blocking AST is
1681 * received on it, or fatal communication error happens.
1683 * Caller must have a reference on this lock to prevent a situation, when
1684 * deleted lock lingers in memory for indefinite time, because nobody calls
1685 * cl_lock_put() to finish it.
1687 * \pre atomic_read(&lock->cll_ref) > 0
1688 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1689 * cl_lock_nr_mutexed(env) == 1)
1690 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1691 * held, as deletion of sub-locks might require releasing a top-lock
1694 * \see cl_lock_operations::clo_delete()
1695 * \see cl_lock::cll_holds
1697 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1699 LINVRNT(cl_lock_is_mutexed(lock));
1700 LINVRNT(cl_lock_invariant(env, lock));
1701 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1702 cl_lock_nr_mutexed(env) == 1));
1704 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1705 if (lock->cll_holds == 0)
1706 cl_lock_delete0(env, lock);
1708 lock->cll_flags |= CLF_DOOMED;
1710 EXPORT_SYMBOL(cl_lock_delete);
1713 * Mark lock as irrecoverably failed, and mark it for destruction. This
1714 * happens when, e.g., server fails to grant a lock to us, or networking
1717 * \pre atomic_read(&lock->cll_ref) > 0
1719 * \see clo_lock_delete()
1720 * \see cl_lock::cll_holds
1722 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1724 LINVRNT(cl_lock_is_mutexed(lock));
1725 LINVRNT(cl_lock_invariant(env, lock));
1727 if (lock->cll_error == 0 && error != 0) {
1728 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1729 lock->cll_error = error;
1730 cl_lock_signal(env, lock);
1731 cl_lock_cancel(env, lock);
1732 cl_lock_delete(env, lock);
1735 EXPORT_SYMBOL(cl_lock_error);
1738 * Cancels this lock. Notifies layers
1739 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1740 * there are holds on the lock, postpone cancellation until
1741 * all holds are released.
1743 * Cancellation notification is delivered to layers at most once.
1745 * \see cl_lock_operations::clo_cancel()
1746 * \see cl_lock::cll_holds
1748 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1750 LINVRNT(cl_lock_is_mutexed(lock));
1751 LINVRNT(cl_lock_invariant(env, lock));
1753 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1754 if (lock->cll_holds == 0)
1755 cl_lock_cancel0(env, lock);
1757 lock->cll_flags |= CLF_CANCELPEND;
1759 EXPORT_SYMBOL(cl_lock_cancel);
1762 * Finds an existing lock covering given index and optionally different from a
1763 * given \a except lock.
1765 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1766 struct cl_object *obj, pgoff_t index,
1767 struct cl_lock *except,
1768 int pending, int canceld)
1770 struct cl_object_header *head;
1771 struct cl_lock *scan;
1772 struct cl_lock *lock;
1773 struct cl_lock_descr *need;
1775 head = cl_object_header(obj);
1776 need = &cl_env_info(env)->clt_descr;
1779 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1781 need->cld_start = need->cld_end = index;
1782 need->cld_enq_flags = 0;
1784 spin_lock(&head->coh_lock_guard);
1785 /* It is fine to match any group lock since there could be only one
1786 * with a uniq gid and it conflicts with all other lock modes too */
1787 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1788 if (scan != except &&
1789 (scan->cll_descr.cld_mode == CLM_GROUP ||
1790 cl_lock_ext_match(&scan->cll_descr, need)) &&
1791 scan->cll_state >= CLS_HELD &&
1792 scan->cll_state < CLS_FREEING &&
1794 * This check is racy as the lock can be canceled right
1795 * after it is done, but this is fine, because page exists
1798 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1799 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1800 /* Don't increase cs_hit here since this
1801 * is just a helper function. */
1802 cl_lock_get_trust(scan);
1807 spin_unlock(&head->coh_lock_guard);
1810 EXPORT_SYMBOL(cl_lock_at_pgoff);
1813 * Calculate the page offset at the layer of @lock.
1814 * At the time of this writing, @page is top page and @lock is sub lock.
1816 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1818 struct lu_device_type *dtype;
1819 const struct cl_page_slice *slice;
1821 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1822 slice = cl_page_at(page, dtype);
1823 LASSERT(slice != NULL);
1824 return slice->cpl_page->cp_index;
1828 * Check if page @page is covered by an extra lock or discard it.
1830 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1831 struct cl_page *page, void *cbdata)
1833 struct cl_thread_info *info = cl_env_info(env);
1834 struct cl_lock *lock = cbdata;
1835 pgoff_t index = pgoff_at_lock(page, lock);
1837 if (index >= info->clt_fn_index) {
1838 struct cl_lock *tmp;
1840 /* refresh non-overlapped index */
1841 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1844 /* Cache the first-non-overlapped index so as to skip
1845 * all pages within [index, clt_fn_index). This
1846 * is safe because if tmp lock is canceled, it will
1847 * discard these pages. */
1848 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1849 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1850 info->clt_fn_index = CL_PAGE_EOF;
1851 cl_lock_put(env, tmp);
1852 } else if (cl_page_own(env, io, page) == 0) {
1853 /* discard the page */
1854 cl_page_unmap(env, io, page);
1855 cl_page_discard(env, io, page);
1856 cl_page_disown(env, io, page);
1858 LASSERT(page->cp_state == CPS_FREEING);
1862 info->clt_next_index = index + 1;
1863 return CLP_GANG_OKAY;
1866 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1867 struct cl_page *page, void *cbdata)
1869 struct cl_thread_info *info = cl_env_info(env);
1870 struct cl_lock *lock = cbdata;
1872 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1873 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1874 !PageWriteback(cl_page_vmpage(env, page))));
1875 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1876 !PageDirty(cl_page_vmpage(env, page))));
1878 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1879 if (cl_page_own(env, io, page) == 0) {
1880 /* discard the page */
1881 cl_page_unmap(env, io, page);
1882 cl_page_discard(env, io, page);
1883 cl_page_disown(env, io, page);
1885 LASSERT(page->cp_state == CPS_FREEING);
1888 return CLP_GANG_OKAY;
1892 * Discard pages protected by the given lock. This function traverses radix
1893 * tree to find all covering pages and discard them. If a page is being covered
1894 * by other locks, it should remain in cache.
1896 * If error happens on any step, the process continues anyway (the reasoning
1897 * behind this being that lock cancellation cannot be delayed indefinitely).
1899 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1901 struct cl_thread_info *info = cl_env_info(env);
1902 struct cl_io *io = &info->clt_io;
1903 struct cl_lock_descr *descr = &lock->cll_descr;
1904 cl_page_gang_cb_t cb;
1908 LINVRNT(cl_lock_invariant(env, lock));
1910 io->ci_obj = cl_object_top(descr->cld_obj);
1911 io->ci_ignore_layout = 1;
1912 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1916 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1917 info->clt_fn_index = info->clt_next_index = descr->cld_start;
1919 res = cl_page_gang_lookup(env, descr->cld_obj, io,
1920 info->clt_next_index, descr->cld_end,
1922 if (info->clt_next_index > descr->cld_end)
1925 if (res == CLP_GANG_RESCHED)
1927 } while (res != CLP_GANG_OKAY);
1929 cl_io_fini(env, io);
1932 EXPORT_SYMBOL(cl_lock_discard_pages);
1935 * Eliminate all locks for a given object.
1937 * Caller has to guarantee that no lock is in active use.
1939 * \param cancel when this is set, cl_locks_prune() cancels locks before
1942 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1944 struct cl_object_header *head;
1945 struct cl_lock *lock;
1947 head = cl_object_header(obj);
1949 * If locks are destroyed without cancellation, all pages must be
1950 * already destroyed (as otherwise they will be left unprotected).
1952 LASSERT(ergo(!cancel,
1953 head->coh_tree.rnode == NULL && head->coh_pages == 0));
1955 spin_lock(&head->coh_lock_guard);
1956 while (!list_empty(&head->coh_locks)) {
1957 lock = container_of(head->coh_locks.next,
1958 struct cl_lock, cll_linkage);
1959 cl_lock_get_trust(lock);
1960 spin_unlock(&head->coh_lock_guard);
1961 lu_ref_add(&lock->cll_reference, "prune", current);
1964 cl_lock_mutex_get(env, lock);
1965 if (lock->cll_state < CLS_FREEING) {
1966 LASSERT(lock->cll_users <= 1);
1967 if (unlikely(lock->cll_users == 1)) {
1968 struct l_wait_info lwi = { 0 };
1970 cl_lock_mutex_put(env, lock);
1971 l_wait_event(lock->cll_wq,
1972 lock->cll_users == 0,
1978 cl_lock_cancel(env, lock);
1979 cl_lock_delete(env, lock);
1981 cl_lock_mutex_put(env, lock);
1982 lu_ref_del(&lock->cll_reference, "prune", current);
1983 cl_lock_put(env, lock);
1984 spin_lock(&head->coh_lock_guard);
1986 spin_unlock(&head->coh_lock_guard);
1988 EXPORT_SYMBOL(cl_locks_prune);
1990 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
1991 const struct cl_io *io,
1992 const struct cl_lock_descr *need,
1993 const char *scope, const void *source)
1995 struct cl_lock *lock;
1998 lock = cl_lock_find(env, io, need);
2001 cl_lock_mutex_get(env, lock);
2002 if (lock->cll_state < CLS_FREEING &&
2003 !(lock->cll_flags & CLF_CANCELLED)) {
2004 cl_lock_hold_mod(env, lock, 1);
2005 lu_ref_add(&lock->cll_holders, scope, source);
2006 lu_ref_add(&lock->cll_reference, scope, source);
2009 cl_lock_mutex_put(env, lock);
2010 cl_lock_put(env, lock);
2016 * Returns a lock matching \a need description with a reference and a hold on
2019 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2020 * guarantees that lock is not in the CLS_FREEING state on return.
2022 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2023 const struct cl_lock_descr *need,
2024 const char *scope, const void *source)
2026 struct cl_lock *lock;
2028 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2030 cl_lock_mutex_put(env, lock);
2033 EXPORT_SYMBOL(cl_lock_hold);
2036 * Main high-level entry point of cl_lock interface that finds existing or
2037 * enqueues new lock matching given description.
2039 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2040 const struct cl_lock_descr *need,
2041 const char *scope, const void *source)
2043 struct cl_lock *lock;
2045 __u32 enqflags = need->cld_enq_flags;
2048 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2052 rc = cl_enqueue_locked(env, lock, io, enqflags);
2054 if (cl_lock_fits_into(env, lock, need, io)) {
2055 if (!(enqflags & CEF_AGL)) {
2056 cl_lock_mutex_put(env, lock);
2057 cl_lock_lockdep_acquire(env, lock,
2063 cl_unuse_locked(env, lock);
2065 cl_lock_trace(D_DLMTRACE, env,
2066 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2067 cl_lock_hold_release(env, lock, scope, source);
2068 cl_lock_mutex_put(env, lock);
2069 lu_ref_del(&lock->cll_reference, scope, source);
2070 cl_lock_put(env, lock);
2072 LASSERT(enqflags & CEF_AGL);
2074 } else if (rc != 0) {
2080 EXPORT_SYMBOL(cl_lock_request);
2083 * Adds a hold to a known lock.
2085 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2086 const char *scope, const void *source)
2088 LINVRNT(cl_lock_is_mutexed(lock));
2089 LINVRNT(cl_lock_invariant(env, lock));
2090 LASSERT(lock->cll_state != CLS_FREEING);
2092 cl_lock_hold_mod(env, lock, 1);
2094 lu_ref_add(&lock->cll_holders, scope, source);
2095 lu_ref_add(&lock->cll_reference, scope, source);
2097 EXPORT_SYMBOL(cl_lock_hold_add);
2100 * Releases a hold and a reference on a lock, on which caller acquired a
2103 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2104 const char *scope, const void *source)
2106 LINVRNT(cl_lock_invariant(env, lock));
2107 cl_lock_hold_release(env, lock, scope, source);
2108 lu_ref_del(&lock->cll_reference, scope, source);
2109 cl_lock_put(env, lock);
2111 EXPORT_SYMBOL(cl_lock_unhold);
2114 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2116 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2117 const char *scope, const void *source)
2119 LINVRNT(cl_lock_invariant(env, lock));
2120 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2121 cl_lock_mutex_get(env, lock);
2122 cl_lock_hold_release(env, lock, scope, source);
2123 cl_lock_mutex_put(env, lock);
2124 lu_ref_del(&lock->cll_reference, scope, source);
2125 cl_lock_put(env, lock);
2127 EXPORT_SYMBOL(cl_lock_release);
2129 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2131 LINVRNT(cl_lock_is_mutexed(lock));
2132 LINVRNT(cl_lock_invariant(env, lock));
2134 cl_lock_used_mod(env, lock, 1);
2136 EXPORT_SYMBOL(cl_lock_user_add);
2138 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2140 LINVRNT(cl_lock_is_mutexed(lock));
2141 LINVRNT(cl_lock_invariant(env, lock));
2142 LASSERT(lock->cll_users > 0);
2144 cl_lock_used_mod(env, lock, -1);
2145 if (lock->cll_users == 0)
2146 wake_up_all(&lock->cll_wq);
2148 EXPORT_SYMBOL(cl_lock_user_del);
2150 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2152 static const char *names[] = {
2153 [CLM_PHANTOM] = "P",
2158 if (0 <= mode && mode < ARRAY_SIZE(names))
2163 EXPORT_SYMBOL(cl_lock_mode_name);
2166 * Prints human readable representation of a lock description.
2168 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2169 lu_printer_t printer,
2170 const struct cl_lock_descr *descr)
2172 const struct lu_fid *fid;
2174 fid = lu_object_fid(&descr->cld_obj->co_lu);
2175 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2177 EXPORT_SYMBOL(cl_lock_descr_print);
2180 * Prints human readable representation of \a lock to the \a f.
2182 void cl_lock_print(const struct lu_env *env, void *cookie,
2183 lu_printer_t printer, const struct cl_lock *lock)
2185 const struct cl_lock_slice *slice;
2186 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2187 lock, atomic_read(&lock->cll_ref),
2188 lock->cll_state, lock->cll_error, lock->cll_holds,
2189 lock->cll_users, lock->cll_flags);
2190 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2191 (*printer)(env, cookie, " {\n");
2193 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2194 (*printer)(env, cookie, " %s@%p: ",
2195 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2197 if (slice->cls_ops->clo_print != NULL)
2198 slice->cls_ops->clo_print(env, cookie, printer, slice);
2199 (*printer)(env, cookie, "\n");
2201 (*printer)(env, cookie, "} lock@%p\n", lock);
2203 EXPORT_SYMBOL(cl_lock_print);
2205 int cl_lock_init(void)
2207 return lu_kmem_init(cl_lock_caches);
2210 void cl_lock_fini(void)
2212 lu_kmem_fini(cl_lock_caches);