3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
67 static int atomic_inc_return_safe(atomic_t *v)
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t *v)
85 counter = atomic_dec_return(v);
94 #define RBD_DRV_NAME "rbd"
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
99 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100 #define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
103 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
105 #define RBD_SNAP_HEAD_NAME "-"
107 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
109 /* This allows a single page to hold an image name sent by OSD */
110 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
111 #define RBD_IMAGE_ID_LEN_MAX 64
113 #define RBD_OBJ_PREFIX_LEN_MAX 64
117 #define RBD_FEATURE_LAYERING (1<<0)
118 #define RBD_FEATURE_STRIPINGV2 (1<<1)
119 #define RBD_FEATURES_ALL \
120 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
122 /* Features supported by this (client software) implementation. */
124 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
127 * An RBD device name will be "rbd#", where the "rbd" comes from
128 * RBD_DRV_NAME above, and # is a unique integer identifier.
129 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
130 * enough to hold all possible device names.
132 #define DEV_NAME_LEN 32
133 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
136 * block device image metadata (in-memory version)
138 struct rbd_image_header {
139 /* These six fields never change for a given rbd image */
146 u64 features; /* Might be changeable someday? */
148 /* The remaining fields need to be updated occasionally */
150 struct ceph_snap_context *snapc;
151 char *snap_names; /* format 1 only */
152 u64 *snap_sizes; /* format 1 only */
156 * An rbd image specification.
158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
159 * identify an image. Each rbd_dev structure includes a pointer to
160 * an rbd_spec structure that encapsulates this identity.
162 * Each of the id's in an rbd_spec has an associated name. For a
163 * user-mapped image, the names are supplied and the id's associated
164 * with them are looked up. For a layered image, a parent image is
165 * defined by the tuple, and the names are looked up.
167 * An rbd_dev structure contains a parent_spec pointer which is
168 * non-null if the image it represents is a child in a layered
169 * image. This pointer will refer to the rbd_spec structure used
170 * by the parent rbd_dev for its own identity (i.e., the structure
171 * is shared between the parent and child).
173 * Since these structures are populated once, during the discovery
174 * phase of image construction, they are effectively immutable so
175 * we make no effort to synchronize access to them.
177 * Note that code herein does not assume the image name is known (it
178 * could be a null pointer).
182 const char *pool_name;
184 const char *image_id;
185 const char *image_name;
188 const char *snap_name;
194 * an instance of the client. multiple devices may share an rbd client.
197 struct ceph_client *client;
199 struct list_head node;
202 struct rbd_img_request;
203 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
205 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
207 struct rbd_obj_request;
208 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
210 enum obj_request_type {
211 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214 enum obj_operation_type {
221 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
222 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
223 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
224 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
227 struct rbd_obj_request {
228 const char *object_name;
229 u64 offset; /* object start byte */
230 u64 length; /* bytes from offset */
234 * An object request associated with an image will have its
235 * img_data flag set; a standalone object request will not.
237 * A standalone object request will have which == BAD_WHICH
238 * and a null obj_request pointer.
240 * An object request initiated in support of a layered image
241 * object (to check for its existence before a write) will
242 * have which == BAD_WHICH and a non-null obj_request pointer.
244 * Finally, an object request for rbd image data will have
245 * which != BAD_WHICH, and will have a non-null img_request
246 * pointer. The value of which will be in the range
247 * 0..(img_request->obj_request_count-1).
250 struct rbd_obj_request *obj_request; /* STAT op */
252 struct rbd_img_request *img_request;
254 /* links for img_request->obj_requests list */
255 struct list_head links;
258 u32 which; /* posn image request list */
260 enum obj_request_type type;
262 struct bio *bio_list;
268 struct page **copyup_pages;
269 u32 copyup_page_count;
271 struct ceph_osd_request *osd_req;
273 u64 xferred; /* bytes transferred */
276 rbd_obj_callback_t callback;
277 struct completion completion;
283 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
284 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
285 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
286 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
289 struct rbd_img_request {
290 struct rbd_device *rbd_dev;
291 u64 offset; /* starting image byte offset */
292 u64 length; /* byte count from offset */
295 u64 snap_id; /* for reads */
296 struct ceph_snap_context *snapc; /* for writes */
299 struct request *rq; /* block request */
300 struct rbd_obj_request *obj_request; /* obj req initiator */
302 struct page **copyup_pages;
303 u32 copyup_page_count;
304 spinlock_t completion_lock;/* protects next_completion */
306 rbd_img_callback_t callback;
307 u64 xferred;/* aggregate bytes transferred */
308 int result; /* first nonzero obj_request result */
310 u32 obj_request_count;
311 struct list_head obj_requests; /* rbd_obj_request structs */
316 #define for_each_obj_request(ireq, oreq) \
317 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
318 #define for_each_obj_request_from(ireq, oreq) \
319 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_safe(ireq, oreq, n) \
321 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
333 int dev_id; /* blkdev unique id */
335 int major; /* blkdev assigned major */
337 struct gendisk *disk; /* blkdev's gendisk and rq */
339 u32 image_format; /* Either 1 or 2 */
340 struct rbd_client *rbd_client;
342 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
344 spinlock_t lock; /* queue, flags, open_count */
346 struct rbd_image_header header;
347 unsigned long flags; /* possibly lock protected */
348 struct rbd_spec *spec;
352 struct ceph_file_layout layout;
354 struct ceph_osd_event *watch_event;
355 struct rbd_obj_request *watch_request;
357 struct rbd_spec *parent_spec;
360 struct rbd_device *parent;
362 /* Block layer tags. */
363 struct blk_mq_tag_set tag_set;
365 /* protects updating the header */
366 struct rw_semaphore header_rwsem;
368 struct rbd_mapping mapping;
370 struct list_head node;
374 unsigned long open_count; /* protected by lock */
378 * Flag bits for rbd_dev->flags. If atomicity is required,
379 * rbd_dev->lock is used to protect access.
381 * Currently, only the "removing" flag (which is coupled with the
382 * "open_count" field) requires atomic access.
385 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
386 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
389 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
391 static LIST_HEAD(rbd_dev_list); /* devices */
392 static DEFINE_SPINLOCK(rbd_dev_list_lock);
394 static LIST_HEAD(rbd_client_list); /* clients */
395 static DEFINE_SPINLOCK(rbd_client_list_lock);
397 /* Slab caches for frequently-allocated structures */
399 static struct kmem_cache *rbd_img_request_cache;
400 static struct kmem_cache *rbd_obj_request_cache;
401 static struct kmem_cache *rbd_segment_name_cache;
403 static int rbd_major;
404 static DEFINE_IDA(rbd_dev_id_ida);
406 static struct workqueue_struct *rbd_wq;
409 * Default to false for now, as single-major requires >= 0.75 version of
410 * userspace rbd utility.
412 static bool single_major = false;
413 module_param(single_major, bool, S_IRUGO);
414 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
416 static int rbd_img_request_submit(struct rbd_img_request *img_request);
418 static void rbd_dev_device_release(struct device *dev);
420 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
429 static void rbd_spec_put(struct rbd_spec *spec);
431 static int rbd_dev_id_to_minor(int dev_id)
433 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
436 static int minor_to_rbd_dev_id(int minor)
438 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
441 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
442 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
443 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
444 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
446 static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_remove.attr,
449 &bus_attr_add_single_major.attr,
450 &bus_attr_remove_single_major.attr,
454 static umode_t rbd_bus_is_visible(struct kobject *kobj,
455 struct attribute *attr, int index)
458 (attr == &bus_attr_add_single_major.attr ||
459 attr == &bus_attr_remove_single_major.attr))
465 static const struct attribute_group rbd_bus_group = {
466 .attrs = rbd_bus_attrs,
467 .is_visible = rbd_bus_is_visible,
469 __ATTRIBUTE_GROUPS(rbd_bus);
471 static struct bus_type rbd_bus_type = {
473 .bus_groups = rbd_bus_groups,
476 static void rbd_root_dev_release(struct device *dev)
480 static struct device rbd_root_dev = {
482 .release = rbd_root_dev_release,
485 static __printf(2, 3)
486 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488 struct va_format vaf;
496 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
497 else if (rbd_dev->disk)
498 printk(KERN_WARNING "%s: %s: %pV\n",
499 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
500 else if (rbd_dev->spec && rbd_dev->spec->image_name)
501 printk(KERN_WARNING "%s: image %s: %pV\n",
502 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
503 else if (rbd_dev->spec && rbd_dev->spec->image_id)
504 printk(KERN_WARNING "%s: id %s: %pV\n",
505 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
508 RBD_DRV_NAME, rbd_dev, &vaf);
513 #define rbd_assert(expr) \
514 if (unlikely(!(expr))) { \
515 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "\trbd_assert(%s);\n\n", \
518 __func__, __LINE__, #expr); \
521 #else /* !RBD_DEBUG */
522 # define rbd_assert(expr) ((void) 0)
523 #endif /* !RBD_DEBUG */
525 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
526 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
527 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
529 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
530 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
531 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
532 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
533 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
535 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
536 u8 *order, u64 *snap_size);
537 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
539 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
541 static int rbd_open(struct block_device *bdev, fmode_t mode)
543 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
544 bool removing = false;
546 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
549 spin_lock_irq(&rbd_dev->lock);
550 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
553 rbd_dev->open_count++;
554 spin_unlock_irq(&rbd_dev->lock);
558 (void) get_device(&rbd_dev->dev);
563 static void rbd_release(struct gendisk *disk, fmode_t mode)
565 struct rbd_device *rbd_dev = disk->private_data;
566 unsigned long open_count_before;
568 spin_lock_irq(&rbd_dev->lock);
569 open_count_before = rbd_dev->open_count--;
570 spin_unlock_irq(&rbd_dev->lock);
571 rbd_assert(open_count_before > 0);
573 put_device(&rbd_dev->dev);
576 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
581 bool ro_changed = false;
583 /* get_user() may sleep, so call it before taking rbd_dev->lock */
584 if (get_user(val, (int __user *)(arg)))
587 ro = val ? true : false;
588 /* Snapshot doesn't allow to write*/
589 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
592 spin_lock_irq(&rbd_dev->lock);
593 /* prevent others open this device */
594 if (rbd_dev->open_count > 1) {
599 if (rbd_dev->mapping.read_only != ro) {
600 rbd_dev->mapping.read_only = ro;
605 spin_unlock_irq(&rbd_dev->lock);
606 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
607 if (ret == 0 && ro_changed)
608 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
613 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
614 unsigned int cmd, unsigned long arg)
616 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
621 ret = rbd_ioctl_set_ro(rbd_dev, arg);
631 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
632 unsigned int cmd, unsigned long arg)
634 return rbd_ioctl(bdev, mode, cmd, arg);
636 #endif /* CONFIG_COMPAT */
638 static const struct block_device_operations rbd_bd_ops = {
639 .owner = THIS_MODULE,
641 .release = rbd_release,
644 .compat_ioctl = rbd_compat_ioctl,
649 * Initialize an rbd client instance. Success or not, this function
650 * consumes ceph_opts. Caller holds client_mutex.
652 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
654 struct rbd_client *rbdc;
657 dout("%s:\n", __func__);
658 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
662 kref_init(&rbdc->kref);
663 INIT_LIST_HEAD(&rbdc->node);
665 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
666 if (IS_ERR(rbdc->client))
668 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
670 ret = ceph_open_session(rbdc->client);
674 spin_lock(&rbd_client_list_lock);
675 list_add_tail(&rbdc->node, &rbd_client_list);
676 spin_unlock(&rbd_client_list_lock);
678 dout("%s: rbdc %p\n", __func__, rbdc);
682 ceph_destroy_client(rbdc->client);
687 ceph_destroy_options(ceph_opts);
688 dout("%s: error %d\n", __func__, ret);
693 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695 kref_get(&rbdc->kref);
701 * Find a ceph client with specific addr and configuration. If
702 * found, bump its reference count.
704 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
706 struct rbd_client *client_node;
709 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
712 spin_lock(&rbd_client_list_lock);
713 list_for_each_entry(client_node, &rbd_client_list, node) {
714 if (!ceph_compare_options(ceph_opts, client_node->client)) {
715 __rbd_get_client(client_node);
721 spin_unlock(&rbd_client_list_lock);
723 return found ? client_node : NULL;
727 * (Per device) rbd map options
733 /* string args above */
739 static match_table_t rbd_opts_tokens = {
741 /* string args above */
742 {Opt_read_only, "read_only"},
743 {Opt_read_only, "ro"}, /* Alternate spelling */
744 {Opt_read_write, "read_write"},
745 {Opt_read_write, "rw"}, /* Alternate spelling */
753 #define RBD_READ_ONLY_DEFAULT false
755 static int parse_rbd_opts_token(char *c, void *private)
757 struct rbd_options *rbd_opts = private;
758 substring_t argstr[MAX_OPT_ARGS];
759 int token, intval, ret;
761 token = match_token(c, rbd_opts_tokens, argstr);
762 if (token < Opt_last_int) {
763 ret = match_int(&argstr[0], &intval);
765 pr_err("bad mount option arg (not int) at '%s'\n", c);
768 dout("got int token %d val %d\n", token, intval);
769 } else if (token > Opt_last_int && token < Opt_last_string) {
770 dout("got string token %d val %s\n", token, argstr[0].from);
772 dout("got token %d\n", token);
777 rbd_opts->read_only = true;
780 rbd_opts->read_only = false;
783 /* libceph prints "bad option" msg */
790 static char* obj_op_name(enum obj_operation_type op_type)
805 * Get a ceph client with specific addr and configuration, if one does
806 * not exist create it. Either way, ceph_opts is consumed by this
809 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
811 struct rbd_client *rbdc;
813 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
814 rbdc = rbd_client_find(ceph_opts);
815 if (rbdc) /* using an existing client */
816 ceph_destroy_options(ceph_opts);
818 rbdc = rbd_client_create(ceph_opts);
819 mutex_unlock(&client_mutex);
825 * Destroy ceph client
827 * Caller must hold rbd_client_list_lock.
829 static void rbd_client_release(struct kref *kref)
831 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
833 dout("%s: rbdc %p\n", __func__, rbdc);
834 spin_lock(&rbd_client_list_lock);
835 list_del(&rbdc->node);
836 spin_unlock(&rbd_client_list_lock);
838 ceph_destroy_client(rbdc->client);
843 * Drop reference to ceph client node. If it's not referenced anymore, release
846 static void rbd_put_client(struct rbd_client *rbdc)
849 kref_put(&rbdc->kref, rbd_client_release);
852 static bool rbd_image_format_valid(u32 image_format)
854 return image_format == 1 || image_format == 2;
857 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
862 /* The header has to start with the magic rbd header text */
863 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
866 /* The bio layer requires at least sector-sized I/O */
868 if (ondisk->options.order < SECTOR_SHIFT)
871 /* If we use u64 in a few spots we may be able to loosen this */
873 if (ondisk->options.order > 8 * sizeof (int) - 1)
877 * The size of a snapshot header has to fit in a size_t, and
878 * that limits the number of snapshots.
880 snap_count = le32_to_cpu(ondisk->snap_count);
881 size = SIZE_MAX - sizeof (struct ceph_snap_context);
882 if (snap_count > size / sizeof (__le64))
886 * Not only that, but the size of the entire the snapshot
887 * header must also be representable in a size_t.
889 size -= snap_count * sizeof (__le64);
890 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
897 * Fill an rbd image header with information from the given format 1
900 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
901 struct rbd_image_header_ondisk *ondisk)
903 struct rbd_image_header *header = &rbd_dev->header;
904 bool first_time = header->object_prefix == NULL;
905 struct ceph_snap_context *snapc;
906 char *object_prefix = NULL;
907 char *snap_names = NULL;
908 u64 *snap_sizes = NULL;
914 /* Allocate this now to avoid having to handle failure below */
919 len = strnlen(ondisk->object_prefix,
920 sizeof (ondisk->object_prefix));
921 object_prefix = kmalloc(len + 1, GFP_KERNEL);
924 memcpy(object_prefix, ondisk->object_prefix, len);
925 object_prefix[len] = '\0';
928 /* Allocate the snapshot context and fill it in */
930 snap_count = le32_to_cpu(ondisk->snap_count);
931 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
934 snapc->seq = le64_to_cpu(ondisk->snap_seq);
936 struct rbd_image_snap_ondisk *snaps;
937 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
939 /* We'll keep a copy of the snapshot names... */
941 if (snap_names_len > (u64)SIZE_MAX)
943 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
947 /* ...as well as the array of their sizes. */
949 size = snap_count * sizeof (*header->snap_sizes);
950 snap_sizes = kmalloc(size, GFP_KERNEL);
955 * Copy the names, and fill in each snapshot's id
958 * Note that rbd_dev_v1_header_info() guarantees the
959 * ondisk buffer we're working with has
960 * snap_names_len bytes beyond the end of the
961 * snapshot id array, this memcpy() is safe.
963 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
964 snaps = ondisk->snaps;
965 for (i = 0; i < snap_count; i++) {
966 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
967 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
971 /* We won't fail any more, fill in the header */
974 header->object_prefix = object_prefix;
975 header->obj_order = ondisk->options.order;
976 header->crypt_type = ondisk->options.crypt_type;
977 header->comp_type = ondisk->options.comp_type;
978 /* The rest aren't used for format 1 images */
979 header->stripe_unit = 0;
980 header->stripe_count = 0;
981 header->features = 0;
983 ceph_put_snap_context(header->snapc);
984 kfree(header->snap_names);
985 kfree(header->snap_sizes);
988 /* The remaining fields always get updated (when we refresh) */
990 header->image_size = le64_to_cpu(ondisk->image_size);
991 header->snapc = snapc;
992 header->snap_names = snap_names;
993 header->snap_sizes = snap_sizes;
1001 ceph_put_snap_context(snapc);
1002 kfree(object_prefix);
1007 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1009 const char *snap_name;
1011 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1013 /* Skip over names until we find the one we are looking for */
1015 snap_name = rbd_dev->header.snap_names;
1017 snap_name += strlen(snap_name) + 1;
1019 return kstrdup(snap_name, GFP_KERNEL);
1023 * Snapshot id comparison function for use with qsort()/bsearch().
1024 * Note that result is for snapshots in *descending* order.
1026 static int snapid_compare_reverse(const void *s1, const void *s2)
1028 u64 snap_id1 = *(u64 *)s1;
1029 u64 snap_id2 = *(u64 *)s2;
1031 if (snap_id1 < snap_id2)
1033 return snap_id1 == snap_id2 ? 0 : -1;
1037 * Search a snapshot context to see if the given snapshot id is
1040 * Returns the position of the snapshot id in the array if it's found,
1041 * or BAD_SNAP_INDEX otherwise.
1043 * Note: The snapshot array is in kept sorted (by the osd) in
1044 * reverse order, highest snapshot id first.
1046 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1048 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1051 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1052 sizeof (snap_id), snapid_compare_reverse);
1054 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1057 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1061 const char *snap_name;
1063 which = rbd_dev_snap_index(rbd_dev, snap_id);
1064 if (which == BAD_SNAP_INDEX)
1065 return ERR_PTR(-ENOENT);
1067 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1068 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1071 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1073 if (snap_id == CEPH_NOSNAP)
1074 return RBD_SNAP_HEAD_NAME;
1076 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1077 if (rbd_dev->image_format == 1)
1078 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1080 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1083 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1086 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1087 if (snap_id == CEPH_NOSNAP) {
1088 *snap_size = rbd_dev->header.image_size;
1089 } else if (rbd_dev->image_format == 1) {
1092 which = rbd_dev_snap_index(rbd_dev, snap_id);
1093 if (which == BAD_SNAP_INDEX)
1096 *snap_size = rbd_dev->header.snap_sizes[which];
1101 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1110 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1113 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1114 if (snap_id == CEPH_NOSNAP) {
1115 *snap_features = rbd_dev->header.features;
1116 } else if (rbd_dev->image_format == 1) {
1117 *snap_features = 0; /* No features for format 1 */
1122 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1126 *snap_features = features;
1131 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1133 u64 snap_id = rbd_dev->spec->snap_id;
1138 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1141 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1145 rbd_dev->mapping.size = size;
1146 rbd_dev->mapping.features = features;
1151 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1153 rbd_dev->mapping.size = 0;
1154 rbd_dev->mapping.features = 0;
1157 static void rbd_segment_name_free(const char *name)
1159 /* The explicit cast here is needed to drop the const qualifier */
1161 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1164 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1171 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1174 segment = offset >> rbd_dev->header.obj_order;
1175 name_format = "%s.%012llx";
1176 if (rbd_dev->image_format == 2)
1177 name_format = "%s.%016llx";
1178 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1179 rbd_dev->header.object_prefix, segment);
1180 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1181 pr_err("error formatting segment name for #%llu (%d)\n",
1183 rbd_segment_name_free(name);
1190 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1192 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1194 return offset & (segment_size - 1);
1197 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1198 u64 offset, u64 length)
1200 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1202 offset &= segment_size - 1;
1204 rbd_assert(length <= U64_MAX - offset);
1205 if (offset + length > segment_size)
1206 length = segment_size - offset;
1212 * returns the size of an object in the image
1214 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1216 return 1 << header->obj_order;
1223 static void bio_chain_put(struct bio *chain)
1229 chain = chain->bi_next;
1235 * zeros a bio chain, starting at specific offset
1237 static void zero_bio_chain(struct bio *chain, int start_ofs)
1240 struct bvec_iter iter;
1241 unsigned long flags;
1246 bio_for_each_segment(bv, chain, iter) {
1247 if (pos + bv.bv_len > start_ofs) {
1248 int remainder = max(start_ofs - pos, 0);
1249 buf = bvec_kmap_irq(&bv, &flags);
1250 memset(buf + remainder, 0,
1251 bv.bv_len - remainder);
1252 flush_dcache_page(bv.bv_page);
1253 bvec_kunmap_irq(buf, &flags);
1258 chain = chain->bi_next;
1263 * similar to zero_bio_chain(), zeros data defined by a page array,
1264 * starting at the given byte offset from the start of the array and
1265 * continuing up to the given end offset. The pages array is
1266 * assumed to be big enough to hold all bytes up to the end.
1268 static void zero_pages(struct page **pages, u64 offset, u64 end)
1270 struct page **page = &pages[offset >> PAGE_SHIFT];
1272 rbd_assert(end > offset);
1273 rbd_assert(end - offset <= (u64)SIZE_MAX);
1274 while (offset < end) {
1277 unsigned long flags;
1280 page_offset = offset & ~PAGE_MASK;
1281 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1282 local_irq_save(flags);
1283 kaddr = kmap_atomic(*page);
1284 memset(kaddr + page_offset, 0, length);
1285 flush_dcache_page(*page);
1286 kunmap_atomic(kaddr);
1287 local_irq_restore(flags);
1295 * Clone a portion of a bio, starting at the given byte offset
1296 * and continuing for the number of bytes indicated.
1298 static struct bio *bio_clone_range(struct bio *bio_src,
1299 unsigned int offset,
1305 bio = bio_clone(bio_src, gfpmask);
1307 return NULL; /* ENOMEM */
1309 bio_advance(bio, offset);
1310 bio->bi_iter.bi_size = len;
1316 * Clone a portion of a bio chain, starting at the given byte offset
1317 * into the first bio in the source chain and continuing for the
1318 * number of bytes indicated. The result is another bio chain of
1319 * exactly the given length, or a null pointer on error.
1321 * The bio_src and offset parameters are both in-out. On entry they
1322 * refer to the first source bio and the offset into that bio where
1323 * the start of data to be cloned is located.
1325 * On return, bio_src is updated to refer to the bio in the source
1326 * chain that contains first un-cloned byte, and *offset will
1327 * contain the offset of that byte within that bio.
1329 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1330 unsigned int *offset,
1334 struct bio *bi = *bio_src;
1335 unsigned int off = *offset;
1336 struct bio *chain = NULL;
1339 /* Build up a chain of clone bios up to the limit */
1341 if (!bi || off >= bi->bi_iter.bi_size || !len)
1342 return NULL; /* Nothing to clone */
1346 unsigned int bi_size;
1350 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1351 goto out_err; /* EINVAL; ran out of bio's */
1353 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1354 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1356 goto out_err; /* ENOMEM */
1359 end = &bio->bi_next;
1362 if (off == bi->bi_iter.bi_size) {
1373 bio_chain_put(chain);
1379 * The default/initial value for all object request flags is 0. For
1380 * each flag, once its value is set to 1 it is never reset to 0
1383 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1385 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1386 struct rbd_device *rbd_dev;
1388 rbd_dev = obj_request->img_request->rbd_dev;
1389 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1394 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1397 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1400 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1402 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1403 struct rbd_device *rbd_dev = NULL;
1405 if (obj_request_img_data_test(obj_request))
1406 rbd_dev = obj_request->img_request->rbd_dev;
1407 rbd_warn(rbd_dev, "obj_request %p already marked done",
1412 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1415 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1419 * This sets the KNOWN flag after (possibly) setting the EXISTS
1420 * flag. The latter is set based on the "exists" value provided.
1422 * Note that for our purposes once an object exists it never goes
1423 * away again. It's possible that the response from two existence
1424 * checks are separated by the creation of the target object, and
1425 * the first ("doesn't exist") response arrives *after* the second
1426 * ("does exist"). In that case we ignore the second one.
1428 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1432 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1433 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1437 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1440 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1443 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1446 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1449 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1451 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1453 return obj_request->img_offset <
1454 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1457 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1459 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1460 atomic_read(&obj_request->kref.refcount));
1461 kref_get(&obj_request->kref);
1464 static void rbd_obj_request_destroy(struct kref *kref);
1465 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1467 rbd_assert(obj_request != NULL);
1468 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1469 atomic_read(&obj_request->kref.refcount));
1470 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1473 static void rbd_img_request_get(struct rbd_img_request *img_request)
1475 dout("%s: img %p (was %d)\n", __func__, img_request,
1476 atomic_read(&img_request->kref.refcount));
1477 kref_get(&img_request->kref);
1480 static bool img_request_child_test(struct rbd_img_request *img_request);
1481 static void rbd_parent_request_destroy(struct kref *kref);
1482 static void rbd_img_request_destroy(struct kref *kref);
1483 static void rbd_img_request_put(struct rbd_img_request *img_request)
1485 rbd_assert(img_request != NULL);
1486 dout("%s: img %p (was %d)\n", __func__, img_request,
1487 atomic_read(&img_request->kref.refcount));
1488 if (img_request_child_test(img_request))
1489 kref_put(&img_request->kref, rbd_parent_request_destroy);
1491 kref_put(&img_request->kref, rbd_img_request_destroy);
1494 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1495 struct rbd_obj_request *obj_request)
1497 rbd_assert(obj_request->img_request == NULL);
1499 /* Image request now owns object's original reference */
1500 obj_request->img_request = img_request;
1501 obj_request->which = img_request->obj_request_count;
1502 rbd_assert(!obj_request_img_data_test(obj_request));
1503 obj_request_img_data_set(obj_request);
1504 rbd_assert(obj_request->which != BAD_WHICH);
1505 img_request->obj_request_count++;
1506 list_add_tail(&obj_request->links, &img_request->obj_requests);
1507 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1508 obj_request->which);
1511 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1512 struct rbd_obj_request *obj_request)
1514 rbd_assert(obj_request->which != BAD_WHICH);
1516 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1517 obj_request->which);
1518 list_del(&obj_request->links);
1519 rbd_assert(img_request->obj_request_count > 0);
1520 img_request->obj_request_count--;
1521 rbd_assert(obj_request->which == img_request->obj_request_count);
1522 obj_request->which = BAD_WHICH;
1523 rbd_assert(obj_request_img_data_test(obj_request));
1524 rbd_assert(obj_request->img_request == img_request);
1525 obj_request->img_request = NULL;
1526 obj_request->callback = NULL;
1527 rbd_obj_request_put(obj_request);
1530 static bool obj_request_type_valid(enum obj_request_type type)
1533 case OBJ_REQUEST_NODATA:
1534 case OBJ_REQUEST_BIO:
1535 case OBJ_REQUEST_PAGES:
1542 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1543 struct rbd_obj_request *obj_request)
1545 dout("%s %p\n", __func__, obj_request);
1546 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1549 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1551 dout("%s %p\n", __func__, obj_request);
1552 ceph_osdc_cancel_request(obj_request->osd_req);
1556 * Wait for an object request to complete. If interrupted, cancel the
1557 * underlying osd request.
1559 * @timeout: in jiffies, 0 means "wait forever"
1561 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1562 unsigned long timeout)
1566 dout("%s %p\n", __func__, obj_request);
1567 ret = wait_for_completion_interruptible_timeout(
1568 &obj_request->completion,
1569 ceph_timeout_jiffies(timeout));
1573 rbd_obj_request_end(obj_request);
1578 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1582 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1584 return __rbd_obj_request_wait(obj_request, 0);
1587 static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1588 unsigned long timeout)
1590 return __rbd_obj_request_wait(obj_request, timeout);
1593 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1596 dout("%s: img %p\n", __func__, img_request);
1599 * If no error occurred, compute the aggregate transfer
1600 * count for the image request. We could instead use
1601 * atomic64_cmpxchg() to update it as each object request
1602 * completes; not clear which way is better off hand.
1604 if (!img_request->result) {
1605 struct rbd_obj_request *obj_request;
1608 for_each_obj_request(img_request, obj_request)
1609 xferred += obj_request->xferred;
1610 img_request->xferred = xferred;
1613 if (img_request->callback)
1614 img_request->callback(img_request);
1616 rbd_img_request_put(img_request);
1620 * The default/initial value for all image request flags is 0. Each
1621 * is conditionally set to 1 at image request initialization time
1622 * and currently never change thereafter.
1624 static void img_request_write_set(struct rbd_img_request *img_request)
1626 set_bit(IMG_REQ_WRITE, &img_request->flags);
1630 static bool img_request_write_test(struct rbd_img_request *img_request)
1633 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1637 * Set the discard flag when the img_request is an discard request
1639 static void img_request_discard_set(struct rbd_img_request *img_request)
1641 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1645 static bool img_request_discard_test(struct rbd_img_request *img_request)
1648 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1651 static void img_request_child_set(struct rbd_img_request *img_request)
1653 set_bit(IMG_REQ_CHILD, &img_request->flags);
1657 static void img_request_child_clear(struct rbd_img_request *img_request)
1659 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1663 static bool img_request_child_test(struct rbd_img_request *img_request)
1666 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1669 static void img_request_layered_set(struct rbd_img_request *img_request)
1671 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1675 static void img_request_layered_clear(struct rbd_img_request *img_request)
1677 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1681 static bool img_request_layered_test(struct rbd_img_request *img_request)
1684 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1687 static enum obj_operation_type
1688 rbd_img_request_op_type(struct rbd_img_request *img_request)
1690 if (img_request_write_test(img_request))
1691 return OBJ_OP_WRITE;
1692 else if (img_request_discard_test(img_request))
1693 return OBJ_OP_DISCARD;
1699 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1701 u64 xferred = obj_request->xferred;
1702 u64 length = obj_request->length;
1704 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1705 obj_request, obj_request->img_request, obj_request->result,
1708 * ENOENT means a hole in the image. We zero-fill the entire
1709 * length of the request. A short read also implies zero-fill
1710 * to the end of the request. An error requires the whole
1711 * length of the request to be reported finished with an error
1712 * to the block layer. In each case we update the xferred
1713 * count to indicate the whole request was satisfied.
1715 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1716 if (obj_request->result == -ENOENT) {
1717 if (obj_request->type == OBJ_REQUEST_BIO)
1718 zero_bio_chain(obj_request->bio_list, 0);
1720 zero_pages(obj_request->pages, 0, length);
1721 obj_request->result = 0;
1722 } else if (xferred < length && !obj_request->result) {
1723 if (obj_request->type == OBJ_REQUEST_BIO)
1724 zero_bio_chain(obj_request->bio_list, xferred);
1726 zero_pages(obj_request->pages, xferred, length);
1728 obj_request->xferred = length;
1729 obj_request_done_set(obj_request);
1732 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1734 dout("%s: obj %p cb %p\n", __func__, obj_request,
1735 obj_request->callback);
1736 if (obj_request->callback)
1737 obj_request->callback(obj_request);
1739 complete_all(&obj_request->completion);
1742 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1744 dout("%s: obj %p\n", __func__, obj_request);
1745 obj_request_done_set(obj_request);
1748 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1750 struct rbd_img_request *img_request = NULL;
1751 struct rbd_device *rbd_dev = NULL;
1752 bool layered = false;
1754 if (obj_request_img_data_test(obj_request)) {
1755 img_request = obj_request->img_request;
1756 layered = img_request && img_request_layered_test(img_request);
1757 rbd_dev = img_request->rbd_dev;
1760 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1761 obj_request, img_request, obj_request->result,
1762 obj_request->xferred, obj_request->length);
1763 if (layered && obj_request->result == -ENOENT &&
1764 obj_request->img_offset < rbd_dev->parent_overlap)
1765 rbd_img_parent_read(obj_request);
1766 else if (img_request)
1767 rbd_img_obj_request_read_callback(obj_request);
1769 obj_request_done_set(obj_request);
1772 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1774 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1775 obj_request->result, obj_request->length);
1777 * There is no such thing as a successful short write. Set
1778 * it to our originally-requested length.
1780 obj_request->xferred = obj_request->length;
1781 obj_request_done_set(obj_request);
1784 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1789 * There is no such thing as a successful short discard. Set
1790 * it to our originally-requested length.
1792 obj_request->xferred = obj_request->length;
1793 /* discarding a non-existent object is not a problem */
1794 if (obj_request->result == -ENOENT)
1795 obj_request->result = 0;
1796 obj_request_done_set(obj_request);
1800 * For a simple stat call there's nothing to do. We'll do more if
1801 * this is part of a write sequence for a layered image.
1803 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1805 dout("%s: obj %p\n", __func__, obj_request);
1806 obj_request_done_set(obj_request);
1809 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1810 struct ceph_msg *msg)
1812 struct rbd_obj_request *obj_request = osd_req->r_priv;
1815 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1816 rbd_assert(osd_req == obj_request->osd_req);
1817 if (obj_request_img_data_test(obj_request)) {
1818 rbd_assert(obj_request->img_request);
1819 rbd_assert(obj_request->which != BAD_WHICH);
1821 rbd_assert(obj_request->which == BAD_WHICH);
1824 if (osd_req->r_result < 0)
1825 obj_request->result = osd_req->r_result;
1827 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1830 * We support a 64-bit length, but ultimately it has to be
1831 * passed to the block layer, which just supports a 32-bit
1834 obj_request->xferred = osd_req->r_reply_op_len[0];
1835 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1837 opcode = osd_req->r_ops[0].op;
1839 case CEPH_OSD_OP_READ:
1840 rbd_osd_read_callback(obj_request);
1842 case CEPH_OSD_OP_SETALLOCHINT:
1843 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1845 case CEPH_OSD_OP_WRITE:
1846 rbd_osd_write_callback(obj_request);
1848 case CEPH_OSD_OP_STAT:
1849 rbd_osd_stat_callback(obj_request);
1851 case CEPH_OSD_OP_DELETE:
1852 case CEPH_OSD_OP_TRUNCATE:
1853 case CEPH_OSD_OP_ZERO:
1854 rbd_osd_discard_callback(obj_request);
1856 case CEPH_OSD_OP_CALL:
1857 case CEPH_OSD_OP_NOTIFY_ACK:
1858 case CEPH_OSD_OP_WATCH:
1859 rbd_osd_trivial_callback(obj_request);
1862 rbd_warn(NULL, "%s: unsupported op %hu",
1863 obj_request->object_name, (unsigned short) opcode);
1867 if (obj_request_done_test(obj_request))
1868 rbd_obj_request_complete(obj_request);
1871 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1873 struct rbd_img_request *img_request = obj_request->img_request;
1874 struct ceph_osd_request *osd_req = obj_request->osd_req;
1877 rbd_assert(osd_req != NULL);
1879 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1880 ceph_osdc_build_request(osd_req, obj_request->offset,
1881 NULL, snap_id, NULL);
1884 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1886 struct rbd_img_request *img_request = obj_request->img_request;
1887 struct ceph_osd_request *osd_req = obj_request->osd_req;
1888 struct ceph_snap_context *snapc;
1889 struct timespec mtime = CURRENT_TIME;
1891 rbd_assert(osd_req != NULL);
1893 snapc = img_request ? img_request->snapc : NULL;
1894 ceph_osdc_build_request(osd_req, obj_request->offset,
1895 snapc, CEPH_NOSNAP, &mtime);
1899 * Create an osd request. A read request has one osd op (read).
1900 * A write request has either one (watch) or two (hint+write) osd ops.
1901 * (All rbd data writes are prefixed with an allocation hint op, but
1902 * technically osd watch is a write request, hence this distinction.)
1904 static struct ceph_osd_request *rbd_osd_req_create(
1905 struct rbd_device *rbd_dev,
1906 enum obj_operation_type op_type,
1907 unsigned int num_ops,
1908 struct rbd_obj_request *obj_request)
1910 struct ceph_snap_context *snapc = NULL;
1911 struct ceph_osd_client *osdc;
1912 struct ceph_osd_request *osd_req;
1914 if (obj_request_img_data_test(obj_request) &&
1915 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1916 struct rbd_img_request *img_request = obj_request->img_request;
1917 if (op_type == OBJ_OP_WRITE) {
1918 rbd_assert(img_request_write_test(img_request));
1920 rbd_assert(img_request_discard_test(img_request));
1922 snapc = img_request->snapc;
1925 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1927 /* Allocate and initialize the request, for the num_ops ops */
1929 osdc = &rbd_dev->rbd_client->client->osdc;
1930 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1933 return NULL; /* ENOMEM */
1935 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1936 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1938 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1940 osd_req->r_callback = rbd_osd_req_callback;
1941 osd_req->r_priv = obj_request;
1943 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1944 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1950 * Create a copyup osd request based on the information in the object
1951 * request supplied. A copyup request has two or three osd ops, a
1952 * copyup method call, potentially a hint op, and a write or truncate
1955 static struct ceph_osd_request *
1956 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1958 struct rbd_img_request *img_request;
1959 struct ceph_snap_context *snapc;
1960 struct rbd_device *rbd_dev;
1961 struct ceph_osd_client *osdc;
1962 struct ceph_osd_request *osd_req;
1963 int num_osd_ops = 3;
1965 rbd_assert(obj_request_img_data_test(obj_request));
1966 img_request = obj_request->img_request;
1967 rbd_assert(img_request);
1968 rbd_assert(img_request_write_test(img_request) ||
1969 img_request_discard_test(img_request));
1971 if (img_request_discard_test(img_request))
1974 /* Allocate and initialize the request, for all the ops */
1976 snapc = img_request->snapc;
1977 rbd_dev = img_request->rbd_dev;
1978 osdc = &rbd_dev->rbd_client->client->osdc;
1979 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
1982 return NULL; /* ENOMEM */
1984 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1985 osd_req->r_callback = rbd_osd_req_callback;
1986 osd_req->r_priv = obj_request;
1988 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1989 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1995 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1997 ceph_osdc_put_request(osd_req);
2000 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2002 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2003 u64 offset, u64 length,
2004 enum obj_request_type type)
2006 struct rbd_obj_request *obj_request;
2010 rbd_assert(obj_request_type_valid(type));
2012 size = strlen(object_name) + 1;
2013 name = kmalloc(size, GFP_KERNEL);
2017 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
2023 obj_request->object_name = memcpy(name, object_name, size);
2024 obj_request->offset = offset;
2025 obj_request->length = length;
2026 obj_request->flags = 0;
2027 obj_request->which = BAD_WHICH;
2028 obj_request->type = type;
2029 INIT_LIST_HEAD(&obj_request->links);
2030 init_completion(&obj_request->completion);
2031 kref_init(&obj_request->kref);
2033 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2034 offset, length, (int)type, obj_request);
2039 static void rbd_obj_request_destroy(struct kref *kref)
2041 struct rbd_obj_request *obj_request;
2043 obj_request = container_of(kref, struct rbd_obj_request, kref);
2045 dout("%s: obj %p\n", __func__, obj_request);
2047 rbd_assert(obj_request->img_request == NULL);
2048 rbd_assert(obj_request->which == BAD_WHICH);
2050 if (obj_request->osd_req)
2051 rbd_osd_req_destroy(obj_request->osd_req);
2053 rbd_assert(obj_request_type_valid(obj_request->type));
2054 switch (obj_request->type) {
2055 case OBJ_REQUEST_NODATA:
2056 break; /* Nothing to do */
2057 case OBJ_REQUEST_BIO:
2058 if (obj_request->bio_list)
2059 bio_chain_put(obj_request->bio_list);
2061 case OBJ_REQUEST_PAGES:
2062 if (obj_request->pages)
2063 ceph_release_page_vector(obj_request->pages,
2064 obj_request->page_count);
2068 kfree(obj_request->object_name);
2069 obj_request->object_name = NULL;
2070 kmem_cache_free(rbd_obj_request_cache, obj_request);
2073 /* It's OK to call this for a device with no parent */
2075 static void rbd_spec_put(struct rbd_spec *spec);
2076 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2078 rbd_dev_remove_parent(rbd_dev);
2079 rbd_spec_put(rbd_dev->parent_spec);
2080 rbd_dev->parent_spec = NULL;
2081 rbd_dev->parent_overlap = 0;
2085 * Parent image reference counting is used to determine when an
2086 * image's parent fields can be safely torn down--after there are no
2087 * more in-flight requests to the parent image. When the last
2088 * reference is dropped, cleaning them up is safe.
2090 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2094 if (!rbd_dev->parent_spec)
2097 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2101 /* Last reference; clean up parent data structures */
2104 rbd_dev_unparent(rbd_dev);
2106 rbd_warn(rbd_dev, "parent reference underflow");
2110 * If an image has a non-zero parent overlap, get a reference to its
2113 * Returns true if the rbd device has a parent with a non-zero
2114 * overlap and a reference for it was successfully taken, or
2117 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2121 if (!rbd_dev->parent_spec)
2124 down_read(&rbd_dev->header_rwsem);
2125 if (rbd_dev->parent_overlap)
2126 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2127 up_read(&rbd_dev->header_rwsem);
2130 rbd_warn(rbd_dev, "parent reference overflow");
2136 * Caller is responsible for filling in the list of object requests
2137 * that comprises the image request, and the Linux request pointer
2138 * (if there is one).
2140 static struct rbd_img_request *rbd_img_request_create(
2141 struct rbd_device *rbd_dev,
2142 u64 offset, u64 length,
2143 enum obj_operation_type op_type,
2144 struct ceph_snap_context *snapc)
2146 struct rbd_img_request *img_request;
2148 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2152 img_request->rq = NULL;
2153 img_request->rbd_dev = rbd_dev;
2154 img_request->offset = offset;
2155 img_request->length = length;
2156 img_request->flags = 0;
2157 if (op_type == OBJ_OP_DISCARD) {
2158 img_request_discard_set(img_request);
2159 img_request->snapc = snapc;
2160 } else if (op_type == OBJ_OP_WRITE) {
2161 img_request_write_set(img_request);
2162 img_request->snapc = snapc;
2164 img_request->snap_id = rbd_dev->spec->snap_id;
2166 if (rbd_dev_parent_get(rbd_dev))
2167 img_request_layered_set(img_request);
2168 spin_lock_init(&img_request->completion_lock);
2169 img_request->next_completion = 0;
2170 img_request->callback = NULL;
2171 img_request->result = 0;
2172 img_request->obj_request_count = 0;
2173 INIT_LIST_HEAD(&img_request->obj_requests);
2174 kref_init(&img_request->kref);
2176 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2177 obj_op_name(op_type), offset, length, img_request);
2182 static void rbd_img_request_destroy(struct kref *kref)
2184 struct rbd_img_request *img_request;
2185 struct rbd_obj_request *obj_request;
2186 struct rbd_obj_request *next_obj_request;
2188 img_request = container_of(kref, struct rbd_img_request, kref);
2190 dout("%s: img %p\n", __func__, img_request);
2192 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2193 rbd_img_obj_request_del(img_request, obj_request);
2194 rbd_assert(img_request->obj_request_count == 0);
2196 if (img_request_layered_test(img_request)) {
2197 img_request_layered_clear(img_request);
2198 rbd_dev_parent_put(img_request->rbd_dev);
2201 if (img_request_write_test(img_request) ||
2202 img_request_discard_test(img_request))
2203 ceph_put_snap_context(img_request->snapc);
2205 kmem_cache_free(rbd_img_request_cache, img_request);
2208 static struct rbd_img_request *rbd_parent_request_create(
2209 struct rbd_obj_request *obj_request,
2210 u64 img_offset, u64 length)
2212 struct rbd_img_request *parent_request;
2213 struct rbd_device *rbd_dev;
2215 rbd_assert(obj_request->img_request);
2216 rbd_dev = obj_request->img_request->rbd_dev;
2218 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2219 length, OBJ_OP_READ, NULL);
2220 if (!parent_request)
2223 img_request_child_set(parent_request);
2224 rbd_obj_request_get(obj_request);
2225 parent_request->obj_request = obj_request;
2227 return parent_request;
2230 static void rbd_parent_request_destroy(struct kref *kref)
2232 struct rbd_img_request *parent_request;
2233 struct rbd_obj_request *orig_request;
2235 parent_request = container_of(kref, struct rbd_img_request, kref);
2236 orig_request = parent_request->obj_request;
2238 parent_request->obj_request = NULL;
2239 rbd_obj_request_put(orig_request);
2240 img_request_child_clear(parent_request);
2242 rbd_img_request_destroy(kref);
2245 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2247 struct rbd_img_request *img_request;
2248 unsigned int xferred;
2252 rbd_assert(obj_request_img_data_test(obj_request));
2253 img_request = obj_request->img_request;
2255 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2256 xferred = (unsigned int)obj_request->xferred;
2257 result = obj_request->result;
2259 struct rbd_device *rbd_dev = img_request->rbd_dev;
2260 enum obj_operation_type op_type;
2262 if (img_request_discard_test(img_request))
2263 op_type = OBJ_OP_DISCARD;
2264 else if (img_request_write_test(img_request))
2265 op_type = OBJ_OP_WRITE;
2267 op_type = OBJ_OP_READ;
2269 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2270 obj_op_name(op_type), obj_request->length,
2271 obj_request->img_offset, obj_request->offset);
2272 rbd_warn(rbd_dev, " result %d xferred %x",
2274 if (!img_request->result)
2275 img_request->result = result;
2277 * Need to end I/O on the entire obj_request worth of
2278 * bytes in case of error.
2280 xferred = obj_request->length;
2283 /* Image object requests don't own their page array */
2285 if (obj_request->type == OBJ_REQUEST_PAGES) {
2286 obj_request->pages = NULL;
2287 obj_request->page_count = 0;
2290 if (img_request_child_test(img_request)) {
2291 rbd_assert(img_request->obj_request != NULL);
2292 more = obj_request->which < img_request->obj_request_count - 1;
2294 rbd_assert(img_request->rq != NULL);
2296 more = blk_update_request(img_request->rq, result, xferred);
2298 __blk_mq_end_request(img_request->rq, result);
2304 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2306 struct rbd_img_request *img_request;
2307 u32 which = obj_request->which;
2310 rbd_assert(obj_request_img_data_test(obj_request));
2311 img_request = obj_request->img_request;
2313 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2314 rbd_assert(img_request != NULL);
2315 rbd_assert(img_request->obj_request_count > 0);
2316 rbd_assert(which != BAD_WHICH);
2317 rbd_assert(which < img_request->obj_request_count);
2319 spin_lock_irq(&img_request->completion_lock);
2320 if (which != img_request->next_completion)
2323 for_each_obj_request_from(img_request, obj_request) {
2325 rbd_assert(which < img_request->obj_request_count);
2327 if (!obj_request_done_test(obj_request))
2329 more = rbd_img_obj_end_request(obj_request);
2333 rbd_assert(more ^ (which == img_request->obj_request_count));
2334 img_request->next_completion = which;
2336 spin_unlock_irq(&img_request->completion_lock);
2337 rbd_img_request_put(img_request);
2340 rbd_img_request_complete(img_request);
2344 * Add individual osd ops to the given ceph_osd_request and prepare
2345 * them for submission. num_ops is the current number of
2346 * osd operations already to the object request.
2348 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2349 struct ceph_osd_request *osd_request,
2350 enum obj_operation_type op_type,
2351 unsigned int num_ops)
2353 struct rbd_img_request *img_request = obj_request->img_request;
2354 struct rbd_device *rbd_dev = img_request->rbd_dev;
2355 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2356 u64 offset = obj_request->offset;
2357 u64 length = obj_request->length;
2361 if (op_type == OBJ_OP_DISCARD) {
2362 if (!offset && length == object_size &&
2363 (!img_request_layered_test(img_request) ||
2364 !obj_request_overlaps_parent(obj_request))) {
2365 opcode = CEPH_OSD_OP_DELETE;
2366 } else if ((offset + length == object_size)) {
2367 opcode = CEPH_OSD_OP_TRUNCATE;
2369 down_read(&rbd_dev->header_rwsem);
2370 img_end = rbd_dev->header.image_size;
2371 up_read(&rbd_dev->header_rwsem);
2373 if (obj_request->img_offset + length == img_end)
2374 opcode = CEPH_OSD_OP_TRUNCATE;
2376 opcode = CEPH_OSD_OP_ZERO;
2378 } else if (op_type == OBJ_OP_WRITE) {
2379 opcode = CEPH_OSD_OP_WRITE;
2380 osd_req_op_alloc_hint_init(osd_request, num_ops,
2381 object_size, object_size);
2384 opcode = CEPH_OSD_OP_READ;
2387 if (opcode == CEPH_OSD_OP_DELETE)
2388 osd_req_op_init(osd_request, num_ops, opcode, 0);
2390 osd_req_op_extent_init(osd_request, num_ops, opcode,
2391 offset, length, 0, 0);
2393 if (obj_request->type == OBJ_REQUEST_BIO)
2394 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2395 obj_request->bio_list, length);
2396 else if (obj_request->type == OBJ_REQUEST_PAGES)
2397 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2398 obj_request->pages, length,
2399 offset & ~PAGE_MASK, false, false);
2401 /* Discards are also writes */
2402 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2403 rbd_osd_req_format_write(obj_request);
2405 rbd_osd_req_format_read(obj_request);
2409 * Split up an image request into one or more object requests, each
2410 * to a different object. The "type" parameter indicates whether
2411 * "data_desc" is the pointer to the head of a list of bio
2412 * structures, or the base of a page array. In either case this
2413 * function assumes data_desc describes memory sufficient to hold
2414 * all data described by the image request.
2416 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2417 enum obj_request_type type,
2420 struct rbd_device *rbd_dev = img_request->rbd_dev;
2421 struct rbd_obj_request *obj_request = NULL;
2422 struct rbd_obj_request *next_obj_request;
2423 struct bio *bio_list = NULL;
2424 unsigned int bio_offset = 0;
2425 struct page **pages = NULL;
2426 enum obj_operation_type op_type;
2430 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2431 (int)type, data_desc);
2433 img_offset = img_request->offset;
2434 resid = img_request->length;
2435 rbd_assert(resid > 0);
2436 op_type = rbd_img_request_op_type(img_request);
2438 if (type == OBJ_REQUEST_BIO) {
2439 bio_list = data_desc;
2440 rbd_assert(img_offset ==
2441 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2442 } else if (type == OBJ_REQUEST_PAGES) {
2447 struct ceph_osd_request *osd_req;
2448 const char *object_name;
2452 object_name = rbd_segment_name(rbd_dev, img_offset);
2455 offset = rbd_segment_offset(rbd_dev, img_offset);
2456 length = rbd_segment_length(rbd_dev, img_offset, resid);
2457 obj_request = rbd_obj_request_create(object_name,
2458 offset, length, type);
2459 /* object request has its own copy of the object name */
2460 rbd_segment_name_free(object_name);
2465 * set obj_request->img_request before creating the
2466 * osd_request so that it gets the right snapc
2468 rbd_img_obj_request_add(img_request, obj_request);
2470 if (type == OBJ_REQUEST_BIO) {
2471 unsigned int clone_size;
2473 rbd_assert(length <= (u64)UINT_MAX);
2474 clone_size = (unsigned int)length;
2475 obj_request->bio_list =
2476 bio_chain_clone_range(&bio_list,
2480 if (!obj_request->bio_list)
2482 } else if (type == OBJ_REQUEST_PAGES) {
2483 unsigned int page_count;
2485 obj_request->pages = pages;
2486 page_count = (u32)calc_pages_for(offset, length);
2487 obj_request->page_count = page_count;
2488 if ((offset + length) & ~PAGE_MASK)
2489 page_count--; /* more on last page */
2490 pages += page_count;
2493 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2494 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2499 obj_request->osd_req = osd_req;
2500 obj_request->callback = rbd_img_obj_callback;
2501 obj_request->img_offset = img_offset;
2503 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2505 rbd_img_request_get(img_request);
2507 img_offset += length;
2514 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2515 rbd_img_obj_request_del(img_request, obj_request);
2521 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2523 struct rbd_img_request *img_request;
2524 struct rbd_device *rbd_dev;
2525 struct page **pages;
2528 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2529 obj_request->type == OBJ_REQUEST_NODATA);
2530 rbd_assert(obj_request_img_data_test(obj_request));
2531 img_request = obj_request->img_request;
2532 rbd_assert(img_request);
2534 rbd_dev = img_request->rbd_dev;
2535 rbd_assert(rbd_dev);
2537 pages = obj_request->copyup_pages;
2538 rbd_assert(pages != NULL);
2539 obj_request->copyup_pages = NULL;
2540 page_count = obj_request->copyup_page_count;
2541 rbd_assert(page_count);
2542 obj_request->copyup_page_count = 0;
2543 ceph_release_page_vector(pages, page_count);
2546 * We want the transfer count to reflect the size of the
2547 * original write request. There is no such thing as a
2548 * successful short write, so if the request was successful
2549 * we can just set it to the originally-requested length.
2551 if (!obj_request->result)
2552 obj_request->xferred = obj_request->length;
2554 /* Finish up with the normal image object callback */
2556 rbd_img_obj_callback(obj_request);
2560 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2562 struct rbd_obj_request *orig_request;
2563 struct ceph_osd_request *osd_req;
2564 struct ceph_osd_client *osdc;
2565 struct rbd_device *rbd_dev;
2566 struct page **pages;
2567 enum obj_operation_type op_type;
2572 rbd_assert(img_request_child_test(img_request));
2574 /* First get what we need from the image request */
2576 pages = img_request->copyup_pages;
2577 rbd_assert(pages != NULL);
2578 img_request->copyup_pages = NULL;
2579 page_count = img_request->copyup_page_count;
2580 rbd_assert(page_count);
2581 img_request->copyup_page_count = 0;
2583 orig_request = img_request->obj_request;
2584 rbd_assert(orig_request != NULL);
2585 rbd_assert(obj_request_type_valid(orig_request->type));
2586 img_result = img_request->result;
2587 parent_length = img_request->length;
2588 rbd_assert(parent_length == img_request->xferred);
2589 rbd_img_request_put(img_request);
2591 rbd_assert(orig_request->img_request);
2592 rbd_dev = orig_request->img_request->rbd_dev;
2593 rbd_assert(rbd_dev);
2596 * If the overlap has become 0 (most likely because the
2597 * image has been flattened) we need to free the pages
2598 * and re-submit the original write request.
2600 if (!rbd_dev->parent_overlap) {
2601 struct ceph_osd_client *osdc;
2603 ceph_release_page_vector(pages, page_count);
2604 osdc = &rbd_dev->rbd_client->client->osdc;
2605 img_result = rbd_obj_request_submit(osdc, orig_request);
2614 * The original osd request is of no use to use any more.
2615 * We need a new one that can hold the three ops in a copyup
2616 * request. Allocate the new copyup osd request for the
2617 * original request, and release the old one.
2619 img_result = -ENOMEM;
2620 osd_req = rbd_osd_req_create_copyup(orig_request);
2623 rbd_osd_req_destroy(orig_request->osd_req);
2624 orig_request->osd_req = osd_req;
2625 orig_request->copyup_pages = pages;
2626 orig_request->copyup_page_count = page_count;
2628 /* Initialize the copyup op */
2630 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2631 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2634 /* Add the other op(s) */
2636 op_type = rbd_img_request_op_type(orig_request->img_request);
2637 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2639 /* All set, send it off. */
2641 orig_request->callback = rbd_img_obj_copyup_callback;
2642 osdc = &rbd_dev->rbd_client->client->osdc;
2643 img_result = rbd_obj_request_submit(osdc, orig_request);
2647 /* Record the error code and complete the request */
2649 orig_request->result = img_result;
2650 orig_request->xferred = 0;
2651 obj_request_done_set(orig_request);
2652 rbd_obj_request_complete(orig_request);
2656 * Read from the parent image the range of data that covers the
2657 * entire target of the given object request. This is used for
2658 * satisfying a layered image write request when the target of an
2659 * object request from the image request does not exist.
2661 * A page array big enough to hold the returned data is allocated
2662 * and supplied to rbd_img_request_fill() as the "data descriptor."
2663 * When the read completes, this page array will be transferred to
2664 * the original object request for the copyup operation.
2666 * If an error occurs, record it as the result of the original
2667 * object request and mark it done so it gets completed.
2669 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2671 struct rbd_img_request *img_request = NULL;
2672 struct rbd_img_request *parent_request = NULL;
2673 struct rbd_device *rbd_dev;
2676 struct page **pages = NULL;
2680 rbd_assert(obj_request_img_data_test(obj_request));
2681 rbd_assert(obj_request_type_valid(obj_request->type));
2683 img_request = obj_request->img_request;
2684 rbd_assert(img_request != NULL);
2685 rbd_dev = img_request->rbd_dev;
2686 rbd_assert(rbd_dev->parent != NULL);
2689 * Determine the byte range covered by the object in the
2690 * child image to which the original request was to be sent.
2692 img_offset = obj_request->img_offset - obj_request->offset;
2693 length = (u64)1 << rbd_dev->header.obj_order;
2696 * There is no defined parent data beyond the parent
2697 * overlap, so limit what we read at that boundary if
2700 if (img_offset + length > rbd_dev->parent_overlap) {
2701 rbd_assert(img_offset < rbd_dev->parent_overlap);
2702 length = rbd_dev->parent_overlap - img_offset;
2706 * Allocate a page array big enough to receive the data read
2709 page_count = (u32)calc_pages_for(0, length);
2710 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2711 if (IS_ERR(pages)) {
2712 result = PTR_ERR(pages);
2718 parent_request = rbd_parent_request_create(obj_request,
2719 img_offset, length);
2720 if (!parent_request)
2723 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2726 parent_request->copyup_pages = pages;
2727 parent_request->copyup_page_count = page_count;
2729 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2730 result = rbd_img_request_submit(parent_request);
2734 parent_request->copyup_pages = NULL;
2735 parent_request->copyup_page_count = 0;
2736 parent_request->obj_request = NULL;
2737 rbd_obj_request_put(obj_request);
2740 ceph_release_page_vector(pages, page_count);
2742 rbd_img_request_put(parent_request);
2743 obj_request->result = result;
2744 obj_request->xferred = 0;
2745 obj_request_done_set(obj_request);
2750 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2752 struct rbd_obj_request *orig_request;
2753 struct rbd_device *rbd_dev;
2756 rbd_assert(!obj_request_img_data_test(obj_request));
2759 * All we need from the object request is the original
2760 * request and the result of the STAT op. Grab those, then
2761 * we're done with the request.
2763 orig_request = obj_request->obj_request;
2764 obj_request->obj_request = NULL;
2765 rbd_obj_request_put(orig_request);
2766 rbd_assert(orig_request);
2767 rbd_assert(orig_request->img_request);
2769 result = obj_request->result;
2770 obj_request->result = 0;
2772 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2773 obj_request, orig_request, result,
2774 obj_request->xferred, obj_request->length);
2775 rbd_obj_request_put(obj_request);
2778 * If the overlap has become 0 (most likely because the
2779 * image has been flattened) we need to free the pages
2780 * and re-submit the original write request.
2782 rbd_dev = orig_request->img_request->rbd_dev;
2783 if (!rbd_dev->parent_overlap) {
2784 struct ceph_osd_client *osdc;
2786 osdc = &rbd_dev->rbd_client->client->osdc;
2787 result = rbd_obj_request_submit(osdc, orig_request);
2793 * Our only purpose here is to determine whether the object
2794 * exists, and we don't want to treat the non-existence as
2795 * an error. If something else comes back, transfer the
2796 * error to the original request and complete it now.
2799 obj_request_existence_set(orig_request, true);
2800 } else if (result == -ENOENT) {
2801 obj_request_existence_set(orig_request, false);
2802 } else if (result) {
2803 orig_request->result = result;
2808 * Resubmit the original request now that we have recorded
2809 * whether the target object exists.
2811 orig_request->result = rbd_img_obj_request_submit(orig_request);
2813 if (orig_request->result)
2814 rbd_obj_request_complete(orig_request);
2817 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2819 struct rbd_obj_request *stat_request;
2820 struct rbd_device *rbd_dev;
2821 struct ceph_osd_client *osdc;
2822 struct page **pages = NULL;
2828 * The response data for a STAT call consists of:
2835 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2836 page_count = (u32)calc_pages_for(0, size);
2837 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2839 return PTR_ERR(pages);
2842 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2847 rbd_obj_request_get(obj_request);
2848 stat_request->obj_request = obj_request;
2849 stat_request->pages = pages;
2850 stat_request->page_count = page_count;
2852 rbd_assert(obj_request->img_request);
2853 rbd_dev = obj_request->img_request->rbd_dev;
2854 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2856 if (!stat_request->osd_req)
2858 stat_request->callback = rbd_img_obj_exists_callback;
2860 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2861 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2863 rbd_osd_req_format_read(stat_request);
2865 osdc = &rbd_dev->rbd_client->client->osdc;
2866 ret = rbd_obj_request_submit(osdc, stat_request);
2869 rbd_obj_request_put(obj_request);
2874 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2876 struct rbd_img_request *img_request;
2877 struct rbd_device *rbd_dev;
2879 rbd_assert(obj_request_img_data_test(obj_request));
2881 img_request = obj_request->img_request;
2882 rbd_assert(img_request);
2883 rbd_dev = img_request->rbd_dev;
2886 if (!img_request_write_test(img_request) &&
2887 !img_request_discard_test(img_request))
2890 /* Non-layered writes */
2891 if (!img_request_layered_test(img_request))
2895 * Layered writes outside of the parent overlap range don't
2896 * share any data with the parent.
2898 if (!obj_request_overlaps_parent(obj_request))
2902 * Entire-object layered writes - we will overwrite whatever
2903 * parent data there is anyway.
2905 if (!obj_request->offset &&
2906 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2910 * If the object is known to already exist, its parent data has
2911 * already been copied.
2913 if (obj_request_known_test(obj_request) &&
2914 obj_request_exists_test(obj_request))
2920 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2922 if (img_obj_request_simple(obj_request)) {
2923 struct rbd_device *rbd_dev;
2924 struct ceph_osd_client *osdc;
2926 rbd_dev = obj_request->img_request->rbd_dev;
2927 osdc = &rbd_dev->rbd_client->client->osdc;
2929 return rbd_obj_request_submit(osdc, obj_request);
2933 * It's a layered write. The target object might exist but
2934 * we may not know that yet. If we know it doesn't exist,
2935 * start by reading the data for the full target object from
2936 * the parent so we can use it for a copyup to the target.
2938 if (obj_request_known_test(obj_request))
2939 return rbd_img_obj_parent_read_full(obj_request);
2941 /* We don't know whether the target exists. Go find out. */
2943 return rbd_img_obj_exists_submit(obj_request);
2946 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2948 struct rbd_obj_request *obj_request;
2949 struct rbd_obj_request *next_obj_request;
2951 dout("%s: img %p\n", __func__, img_request);
2952 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2955 ret = rbd_img_obj_request_submit(obj_request);
2963 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2965 struct rbd_obj_request *obj_request;
2966 struct rbd_device *rbd_dev;
2971 rbd_assert(img_request_child_test(img_request));
2973 /* First get what we need from the image request and release it */
2975 obj_request = img_request->obj_request;
2976 img_xferred = img_request->xferred;
2977 img_result = img_request->result;
2978 rbd_img_request_put(img_request);
2981 * If the overlap has become 0 (most likely because the
2982 * image has been flattened) we need to re-submit the
2985 rbd_assert(obj_request);
2986 rbd_assert(obj_request->img_request);
2987 rbd_dev = obj_request->img_request->rbd_dev;
2988 if (!rbd_dev->parent_overlap) {
2989 struct ceph_osd_client *osdc;
2991 osdc = &rbd_dev->rbd_client->client->osdc;
2992 img_result = rbd_obj_request_submit(osdc, obj_request);
2997 obj_request->result = img_result;
2998 if (obj_request->result)
3002 * We need to zero anything beyond the parent overlap
3003 * boundary. Since rbd_img_obj_request_read_callback()
3004 * will zero anything beyond the end of a short read, an
3005 * easy way to do this is to pretend the data from the
3006 * parent came up short--ending at the overlap boundary.
3008 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3009 obj_end = obj_request->img_offset + obj_request->length;
3010 if (obj_end > rbd_dev->parent_overlap) {
3013 if (obj_request->img_offset < rbd_dev->parent_overlap)
3014 xferred = rbd_dev->parent_overlap -
3015 obj_request->img_offset;
3017 obj_request->xferred = min(img_xferred, xferred);
3019 obj_request->xferred = img_xferred;
3022 rbd_img_obj_request_read_callback(obj_request);
3023 rbd_obj_request_complete(obj_request);
3026 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3028 struct rbd_img_request *img_request;
3031 rbd_assert(obj_request_img_data_test(obj_request));
3032 rbd_assert(obj_request->img_request != NULL);
3033 rbd_assert(obj_request->result == (s32) -ENOENT);
3034 rbd_assert(obj_request_type_valid(obj_request->type));
3036 /* rbd_read_finish(obj_request, obj_request->length); */
3037 img_request = rbd_parent_request_create(obj_request,
3038 obj_request->img_offset,
3039 obj_request->length);
3044 if (obj_request->type == OBJ_REQUEST_BIO)
3045 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3046 obj_request->bio_list);
3048 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3049 obj_request->pages);
3053 img_request->callback = rbd_img_parent_read_callback;
3054 result = rbd_img_request_submit(img_request);
3061 rbd_img_request_put(img_request);
3062 obj_request->result = result;
3063 obj_request->xferred = 0;
3064 obj_request_done_set(obj_request);
3067 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3069 struct rbd_obj_request *obj_request;
3070 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3073 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3074 OBJ_REQUEST_NODATA);
3079 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3081 if (!obj_request->osd_req)
3084 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3086 rbd_osd_req_format_read(obj_request);
3088 ret = rbd_obj_request_submit(osdc, obj_request);
3091 ret = rbd_obj_request_wait(obj_request);
3093 rbd_obj_request_put(obj_request);
3098 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3100 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3106 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3107 rbd_dev->header_name, (unsigned long long)notify_id,
3108 (unsigned int)opcode);
3111 * Until adequate refresh error handling is in place, there is
3112 * not much we can do here, except warn.
3114 * See http://tracker.ceph.com/issues/5040
3116 ret = rbd_dev_refresh(rbd_dev);
3118 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3120 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3122 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3126 * Send a (un)watch request and wait for the ack. Return a request
3127 * with a ref held on success or error.
3129 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3130 struct rbd_device *rbd_dev,
3133 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3134 struct ceph_options *opts = osdc->client->options;
3135 struct rbd_obj_request *obj_request;
3138 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3139 OBJ_REQUEST_NODATA);
3141 return ERR_PTR(-ENOMEM);
3143 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3145 if (!obj_request->osd_req) {
3150 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3151 rbd_dev->watch_event->cookie, 0, watch);
3152 rbd_osd_req_format_write(obj_request);
3155 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3157 ret = rbd_obj_request_submit(osdc, obj_request);
3161 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3165 ret = obj_request->result;
3168 rbd_obj_request_end(obj_request);
3175 rbd_obj_request_put(obj_request);
3176 return ERR_PTR(ret);
3180 * Initiate a watch request, synchronously.
3182 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3184 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3185 struct rbd_obj_request *obj_request;
3188 rbd_assert(!rbd_dev->watch_event);
3189 rbd_assert(!rbd_dev->watch_request);
3191 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3192 &rbd_dev->watch_event);
3196 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3197 if (IS_ERR(obj_request)) {
3198 ceph_osdc_cancel_event(rbd_dev->watch_event);
3199 rbd_dev->watch_event = NULL;
3200 return PTR_ERR(obj_request);
3204 * A watch request is set to linger, so the underlying osd
3205 * request won't go away until we unregister it. We retain
3206 * a pointer to the object request during that time (in
3207 * rbd_dev->watch_request), so we'll keep a reference to it.
3208 * We'll drop that reference after we've unregistered it in
3209 * rbd_dev_header_unwatch_sync().
3211 rbd_dev->watch_request = obj_request;
3217 * Tear down a watch request, synchronously.
3219 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3221 struct rbd_obj_request *obj_request;
3223 rbd_assert(rbd_dev->watch_event);
3224 rbd_assert(rbd_dev->watch_request);
3226 rbd_obj_request_end(rbd_dev->watch_request);
3227 rbd_obj_request_put(rbd_dev->watch_request);
3228 rbd_dev->watch_request = NULL;
3230 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3231 if (!IS_ERR(obj_request))
3232 rbd_obj_request_put(obj_request);
3234 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3235 PTR_ERR(obj_request));
3237 ceph_osdc_cancel_event(rbd_dev->watch_event);
3238 rbd_dev->watch_event = NULL;
3242 * Synchronous osd object method call. Returns the number of bytes
3243 * returned in the outbound buffer, or a negative error code.
3245 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3246 const char *object_name,
3247 const char *class_name,
3248 const char *method_name,
3249 const void *outbound,
3250 size_t outbound_size,
3252 size_t inbound_size)
3254 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3255 struct rbd_obj_request *obj_request;
3256 struct page **pages;
3261 * Method calls are ultimately read operations. The result
3262 * should placed into the inbound buffer provided. They
3263 * also supply outbound data--parameters for the object
3264 * method. Currently if this is present it will be a
3267 page_count = (u32)calc_pages_for(0, inbound_size);
3268 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3270 return PTR_ERR(pages);
3273 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3278 obj_request->pages = pages;
3279 obj_request->page_count = page_count;
3281 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3283 if (!obj_request->osd_req)
3286 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3287 class_name, method_name);
3288 if (outbound_size) {
3289 struct ceph_pagelist *pagelist;
3291 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3295 ceph_pagelist_init(pagelist);
3296 ceph_pagelist_append(pagelist, outbound, outbound_size);
3297 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3300 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3301 obj_request->pages, inbound_size,
3303 rbd_osd_req_format_read(obj_request);
3305 ret = rbd_obj_request_submit(osdc, obj_request);
3308 ret = rbd_obj_request_wait(obj_request);
3312 ret = obj_request->result;
3316 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3317 ret = (int)obj_request->xferred;
3318 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3321 rbd_obj_request_put(obj_request);
3323 ceph_release_page_vector(pages, page_count);
3328 static void rbd_queue_workfn(struct work_struct *work)
3330 struct request *rq = blk_mq_rq_from_pdu(work);
3331 struct rbd_device *rbd_dev = rq->q->queuedata;
3332 struct rbd_img_request *img_request;
3333 struct ceph_snap_context *snapc = NULL;
3334 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3335 u64 length = blk_rq_bytes(rq);
3336 enum obj_operation_type op_type;
3340 if (rq->cmd_type != REQ_TYPE_FS) {
3341 dout("%s: non-fs request type %d\n", __func__,
3342 (int) rq->cmd_type);
3347 if (rq->cmd_flags & REQ_DISCARD)
3348 op_type = OBJ_OP_DISCARD;
3349 else if (rq->cmd_flags & REQ_WRITE)
3350 op_type = OBJ_OP_WRITE;
3352 op_type = OBJ_OP_READ;
3354 /* Ignore/skip any zero-length requests */
3357 dout("%s: zero-length request\n", __func__);
3362 /* Only reads are allowed to a read-only device */
3364 if (op_type != OBJ_OP_READ) {
3365 if (rbd_dev->mapping.read_only) {
3369 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3373 * Quit early if the mapped snapshot no longer exists. It's
3374 * still possible the snapshot will have disappeared by the
3375 * time our request arrives at the osd, but there's no sense in
3376 * sending it if we already know.
3378 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3379 dout("request for non-existent snapshot");
3380 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3385 if (offset && length > U64_MAX - offset + 1) {
3386 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3389 goto err_rq; /* Shouldn't happen */
3392 blk_mq_start_request(rq);
3394 down_read(&rbd_dev->header_rwsem);
3395 mapping_size = rbd_dev->mapping.size;
3396 if (op_type != OBJ_OP_READ) {
3397 snapc = rbd_dev->header.snapc;
3398 ceph_get_snap_context(snapc);
3400 up_read(&rbd_dev->header_rwsem);
3402 if (offset + length > mapping_size) {
3403 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3404 length, mapping_size);
3409 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3415 img_request->rq = rq;
3417 if (op_type == OBJ_OP_DISCARD)
3418 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3421 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3424 goto err_img_request;
3426 result = rbd_img_request_submit(img_request);
3428 goto err_img_request;
3433 rbd_img_request_put(img_request);
3436 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3437 obj_op_name(op_type), length, offset, result);
3438 ceph_put_snap_context(snapc);
3440 blk_mq_end_request(rq, result);
3443 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3444 const struct blk_mq_queue_data *bd)
3446 struct request *rq = bd->rq;
3447 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3449 queue_work(rbd_wq, work);
3450 return BLK_MQ_RQ_QUEUE_OK;
3454 * a queue callback. Makes sure that we don't create a bio that spans across
3455 * multiple osd objects. One exception would be with a single page bios,
3456 * which we handle later at bio_chain_clone_range()
3458 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3459 struct bio_vec *bvec)
3461 struct rbd_device *rbd_dev = q->queuedata;
3462 sector_t sector_offset;
3463 sector_t sectors_per_obj;
3464 sector_t obj_sector_offset;
3468 * Find how far into its rbd object the partition-relative
3469 * bio start sector is to offset relative to the enclosing
3472 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3473 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3474 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3477 * Compute the number of bytes from that offset to the end
3478 * of the object. Account for what's already used by the bio.
3480 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3481 if (ret > bmd->bi_size)
3482 ret -= bmd->bi_size;
3487 * Don't send back more than was asked for. And if the bio
3488 * was empty, let the whole thing through because: "Note
3489 * that a block device *must* allow a single page to be
3490 * added to an empty bio."
3492 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3493 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3494 ret = (int) bvec->bv_len;
3499 static void rbd_free_disk(struct rbd_device *rbd_dev)
3501 struct gendisk *disk = rbd_dev->disk;
3506 rbd_dev->disk = NULL;
3507 if (disk->flags & GENHD_FL_UP) {
3510 blk_cleanup_queue(disk->queue);
3511 blk_mq_free_tag_set(&rbd_dev->tag_set);
3516 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3517 const char *object_name,
3518 u64 offset, u64 length, void *buf)
3521 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3522 struct rbd_obj_request *obj_request;
3523 struct page **pages = NULL;
3528 page_count = (u32) calc_pages_for(offset, length);
3529 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3531 return PTR_ERR(pages);
3534 obj_request = rbd_obj_request_create(object_name, offset, length,
3539 obj_request->pages = pages;
3540 obj_request->page_count = page_count;
3542 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3544 if (!obj_request->osd_req)
3547 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3548 offset, length, 0, 0);
3549 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3551 obj_request->length,
3552 obj_request->offset & ~PAGE_MASK,
3554 rbd_osd_req_format_read(obj_request);
3556 ret = rbd_obj_request_submit(osdc, obj_request);
3559 ret = rbd_obj_request_wait(obj_request);
3563 ret = obj_request->result;
3567 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3568 size = (size_t) obj_request->xferred;
3569 ceph_copy_from_page_vector(pages, buf, 0, size);
3570 rbd_assert(size <= (size_t)INT_MAX);
3574 rbd_obj_request_put(obj_request);
3576 ceph_release_page_vector(pages, page_count);
3582 * Read the complete header for the given rbd device. On successful
3583 * return, the rbd_dev->header field will contain up-to-date
3584 * information about the image.
3586 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3588 struct rbd_image_header_ondisk *ondisk = NULL;
3595 * The complete header will include an array of its 64-bit
3596 * snapshot ids, followed by the names of those snapshots as
3597 * a contiguous block of NUL-terminated strings. Note that
3598 * the number of snapshots could change by the time we read
3599 * it in, in which case we re-read it.
3606 size = sizeof (*ondisk);
3607 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3609 ondisk = kmalloc(size, GFP_KERNEL);
3613 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3617 if ((size_t)ret < size) {
3619 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3623 if (!rbd_dev_ondisk_valid(ondisk)) {
3625 rbd_warn(rbd_dev, "invalid header");
3629 names_size = le64_to_cpu(ondisk->snap_names_len);
3630 want_count = snap_count;
3631 snap_count = le32_to_cpu(ondisk->snap_count);
3632 } while (snap_count != want_count);
3634 ret = rbd_header_from_disk(rbd_dev, ondisk);
3642 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3643 * has disappeared from the (just updated) snapshot context.
3645 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3649 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3652 snap_id = rbd_dev->spec->snap_id;
3653 if (snap_id == CEPH_NOSNAP)
3656 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3657 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3660 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3666 * Don't hold the lock while doing disk operations,
3667 * or lock ordering will conflict with the bdev mutex via:
3668 * rbd_add() -> blkdev_get() -> rbd_open()
3670 spin_lock_irq(&rbd_dev->lock);
3671 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3672 spin_unlock_irq(&rbd_dev->lock);
3674 * If the device is being removed, rbd_dev->disk has
3675 * been destroyed, so don't try to update its size
3678 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3679 dout("setting size to %llu sectors", (unsigned long long)size);
3680 set_capacity(rbd_dev->disk, size);
3681 revalidate_disk(rbd_dev->disk);
3685 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3690 down_write(&rbd_dev->header_rwsem);
3691 mapping_size = rbd_dev->mapping.size;
3693 ret = rbd_dev_header_info(rbd_dev);
3698 * If there is a parent, see if it has disappeared due to the
3699 * mapped image getting flattened.
3701 if (rbd_dev->parent) {
3702 ret = rbd_dev_v2_parent_info(rbd_dev);
3707 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3708 rbd_dev->mapping.size = rbd_dev->header.image_size;
3710 /* validate mapped snapshot's EXISTS flag */
3711 rbd_exists_validate(rbd_dev);
3715 up_write(&rbd_dev->header_rwsem);
3716 if (!ret && mapping_size != rbd_dev->mapping.size)
3717 rbd_dev_update_size(rbd_dev);
3722 static int rbd_init_request(void *data, struct request *rq,
3723 unsigned int hctx_idx, unsigned int request_idx,
3724 unsigned int numa_node)
3726 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3728 INIT_WORK(work, rbd_queue_workfn);
3732 static struct blk_mq_ops rbd_mq_ops = {
3733 .queue_rq = rbd_queue_rq,
3734 .map_queue = blk_mq_map_queue,
3735 .init_request = rbd_init_request,
3738 static int rbd_init_disk(struct rbd_device *rbd_dev)
3740 struct gendisk *disk;
3741 struct request_queue *q;
3745 /* create gendisk info */
3746 disk = alloc_disk(single_major ?
3747 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3748 RBD_MINORS_PER_MAJOR);
3752 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3754 disk->major = rbd_dev->major;
3755 disk->first_minor = rbd_dev->minor;
3757 disk->flags |= GENHD_FL_EXT_DEVT;
3758 disk->fops = &rbd_bd_ops;
3759 disk->private_data = rbd_dev;
3761 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3762 rbd_dev->tag_set.ops = &rbd_mq_ops;
3763 rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
3764 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3765 rbd_dev->tag_set.flags =
3766 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3767 rbd_dev->tag_set.nr_hw_queues = 1;
3768 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3770 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3774 q = blk_mq_init_queue(&rbd_dev->tag_set);
3780 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3781 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3783 /* set io sizes to object size */
3784 segment_size = rbd_obj_bytes(&rbd_dev->header);
3785 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3786 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
3787 blk_queue_max_segment_size(q, segment_size);
3788 blk_queue_io_min(q, segment_size);
3789 blk_queue_io_opt(q, segment_size);
3791 /* enable the discard support */
3792 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3793 q->limits.discard_granularity = segment_size;
3794 q->limits.discard_alignment = segment_size;
3795 q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
3796 q->limits.discard_zeroes_data = 1;
3798 blk_queue_merge_bvec(q, rbd_merge_bvec);
3801 q->queuedata = rbd_dev;
3803 rbd_dev->disk = disk;
3807 blk_mq_free_tag_set(&rbd_dev->tag_set);
3817 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3819 return container_of(dev, struct rbd_device, dev);
3822 static ssize_t rbd_size_show(struct device *dev,
3823 struct device_attribute *attr, char *buf)
3825 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3827 return sprintf(buf, "%llu\n",
3828 (unsigned long long)rbd_dev->mapping.size);
3832 * Note this shows the features for whatever's mapped, which is not
3833 * necessarily the base image.
3835 static ssize_t rbd_features_show(struct device *dev,
3836 struct device_attribute *attr, char *buf)
3838 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3840 return sprintf(buf, "0x%016llx\n",
3841 (unsigned long long)rbd_dev->mapping.features);
3844 static ssize_t rbd_major_show(struct device *dev,
3845 struct device_attribute *attr, char *buf)
3847 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3850 return sprintf(buf, "%d\n", rbd_dev->major);
3852 return sprintf(buf, "(none)\n");
3855 static ssize_t rbd_minor_show(struct device *dev,
3856 struct device_attribute *attr, char *buf)
3858 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3860 return sprintf(buf, "%d\n", rbd_dev->minor);
3863 static ssize_t rbd_client_id_show(struct device *dev,
3864 struct device_attribute *attr, char *buf)
3866 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3868 return sprintf(buf, "client%lld\n",
3869 ceph_client_id(rbd_dev->rbd_client->client));
3872 static ssize_t rbd_pool_show(struct device *dev,
3873 struct device_attribute *attr, char *buf)
3875 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3877 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3880 static ssize_t rbd_pool_id_show(struct device *dev,
3881 struct device_attribute *attr, char *buf)
3883 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3885 return sprintf(buf, "%llu\n",
3886 (unsigned long long) rbd_dev->spec->pool_id);
3889 static ssize_t rbd_name_show(struct device *dev,
3890 struct device_attribute *attr, char *buf)
3892 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3894 if (rbd_dev->spec->image_name)
3895 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3897 return sprintf(buf, "(unknown)\n");
3900 static ssize_t rbd_image_id_show(struct device *dev,
3901 struct device_attribute *attr, char *buf)
3903 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3905 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3909 * Shows the name of the currently-mapped snapshot (or
3910 * RBD_SNAP_HEAD_NAME for the base image).
3912 static ssize_t rbd_snap_show(struct device *dev,
3913 struct device_attribute *attr,
3916 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3918 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3922 * For a v2 image, shows the chain of parent images, separated by empty
3923 * lines. For v1 images or if there is no parent, shows "(no parent
3926 static ssize_t rbd_parent_show(struct device *dev,
3927 struct device_attribute *attr,
3930 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3933 if (!rbd_dev->parent)
3934 return sprintf(buf, "(no parent image)\n");
3936 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3937 struct rbd_spec *spec = rbd_dev->parent_spec;
3939 count += sprintf(&buf[count], "%s"
3940 "pool_id %llu\npool_name %s\n"
3941 "image_id %s\nimage_name %s\n"
3942 "snap_id %llu\nsnap_name %s\n"
3944 !count ? "" : "\n", /* first? */
3945 spec->pool_id, spec->pool_name,
3946 spec->image_id, spec->image_name ?: "(unknown)",
3947 spec->snap_id, spec->snap_name,
3948 rbd_dev->parent_overlap);
3954 static ssize_t rbd_image_refresh(struct device *dev,
3955 struct device_attribute *attr,
3959 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3962 ret = rbd_dev_refresh(rbd_dev);
3969 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3970 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3971 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3972 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3973 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3974 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3975 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3976 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3977 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3978 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3979 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3980 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3982 static struct attribute *rbd_attrs[] = {
3983 &dev_attr_size.attr,
3984 &dev_attr_features.attr,
3985 &dev_attr_major.attr,
3986 &dev_attr_minor.attr,
3987 &dev_attr_client_id.attr,
3988 &dev_attr_pool.attr,
3989 &dev_attr_pool_id.attr,
3990 &dev_attr_name.attr,
3991 &dev_attr_image_id.attr,
3992 &dev_attr_current_snap.attr,
3993 &dev_attr_parent.attr,
3994 &dev_attr_refresh.attr,
3998 static struct attribute_group rbd_attr_group = {
4002 static const struct attribute_group *rbd_attr_groups[] = {
4007 static void rbd_sysfs_dev_release(struct device *dev)
4011 static struct device_type rbd_device_type = {
4013 .groups = rbd_attr_groups,
4014 .release = rbd_sysfs_dev_release,
4017 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4019 kref_get(&spec->kref);
4024 static void rbd_spec_free(struct kref *kref);
4025 static void rbd_spec_put(struct rbd_spec *spec)
4028 kref_put(&spec->kref, rbd_spec_free);
4031 static struct rbd_spec *rbd_spec_alloc(void)
4033 struct rbd_spec *spec;
4035 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4039 spec->pool_id = CEPH_NOPOOL;
4040 spec->snap_id = CEPH_NOSNAP;
4041 kref_init(&spec->kref);
4046 static void rbd_spec_free(struct kref *kref)
4048 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4050 kfree(spec->pool_name);
4051 kfree(spec->image_id);
4052 kfree(spec->image_name);
4053 kfree(spec->snap_name);
4057 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4058 struct rbd_spec *spec)
4060 struct rbd_device *rbd_dev;
4062 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4066 spin_lock_init(&rbd_dev->lock);
4068 atomic_set(&rbd_dev->parent_ref, 0);
4069 INIT_LIST_HEAD(&rbd_dev->node);
4070 init_rwsem(&rbd_dev->header_rwsem);
4072 rbd_dev->spec = spec;
4073 rbd_dev->rbd_client = rbdc;
4075 /* Initialize the layout used for all rbd requests */
4077 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4078 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4079 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4080 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4085 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4087 rbd_put_client(rbd_dev->rbd_client);
4088 rbd_spec_put(rbd_dev->spec);
4093 * Get the size and object order for an image snapshot, or if
4094 * snap_id is CEPH_NOSNAP, gets this information for the base
4097 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4098 u8 *order, u64 *snap_size)
4100 __le64 snapid = cpu_to_le64(snap_id);
4105 } __attribute__ ((packed)) size_buf = { 0 };
4107 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4109 &snapid, sizeof (snapid),
4110 &size_buf, sizeof (size_buf));
4111 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4114 if (ret < sizeof (size_buf))
4118 *order = size_buf.order;
4119 dout(" order %u", (unsigned int)*order);
4121 *snap_size = le64_to_cpu(size_buf.size);
4123 dout(" snap_id 0x%016llx snap_size = %llu\n",
4124 (unsigned long long)snap_id,
4125 (unsigned long long)*snap_size);
4130 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4132 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4133 &rbd_dev->header.obj_order,
4134 &rbd_dev->header.image_size);
4137 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4143 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4147 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4148 "rbd", "get_object_prefix", NULL, 0,
4149 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4150 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4155 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4156 p + ret, NULL, GFP_NOIO);
4159 if (IS_ERR(rbd_dev->header.object_prefix)) {
4160 ret = PTR_ERR(rbd_dev->header.object_prefix);
4161 rbd_dev->header.object_prefix = NULL;
4163 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4171 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4174 __le64 snapid = cpu_to_le64(snap_id);
4178 } __attribute__ ((packed)) features_buf = { 0 };
4182 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4183 "rbd", "get_features",
4184 &snapid, sizeof (snapid),
4185 &features_buf, sizeof (features_buf));
4186 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4189 if (ret < sizeof (features_buf))
4192 incompat = le64_to_cpu(features_buf.incompat);
4193 if (incompat & ~RBD_FEATURES_SUPPORTED)
4196 *snap_features = le64_to_cpu(features_buf.features);
4198 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4199 (unsigned long long)snap_id,
4200 (unsigned long long)*snap_features,
4201 (unsigned long long)le64_to_cpu(features_buf.incompat));
4206 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4208 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4209 &rbd_dev->header.features);
4212 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4214 struct rbd_spec *parent_spec;
4216 void *reply_buf = NULL;
4226 parent_spec = rbd_spec_alloc();
4230 size = sizeof (__le64) + /* pool_id */
4231 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4232 sizeof (__le64) + /* snap_id */
4233 sizeof (__le64); /* overlap */
4234 reply_buf = kmalloc(size, GFP_KERNEL);
4240 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4241 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4242 "rbd", "get_parent",
4243 &snapid, sizeof (snapid),
4245 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4250 end = reply_buf + ret;
4252 ceph_decode_64_safe(&p, end, pool_id, out_err);
4253 if (pool_id == CEPH_NOPOOL) {
4255 * Either the parent never existed, or we have
4256 * record of it but the image got flattened so it no
4257 * longer has a parent. When the parent of a
4258 * layered image disappears we immediately set the
4259 * overlap to 0. The effect of this is that all new
4260 * requests will be treated as if the image had no
4263 if (rbd_dev->parent_overlap) {
4264 rbd_dev->parent_overlap = 0;
4265 rbd_dev_parent_put(rbd_dev);
4266 pr_info("%s: clone image has been flattened\n",
4267 rbd_dev->disk->disk_name);
4270 goto out; /* No parent? No problem. */
4273 /* The ceph file layout needs to fit pool id in 32 bits */
4276 if (pool_id > (u64)U32_MAX) {
4277 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4278 (unsigned long long)pool_id, U32_MAX);
4282 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4283 if (IS_ERR(image_id)) {
4284 ret = PTR_ERR(image_id);
4287 ceph_decode_64_safe(&p, end, snap_id, out_err);
4288 ceph_decode_64_safe(&p, end, overlap, out_err);
4291 * The parent won't change (except when the clone is
4292 * flattened, already handled that). So we only need to
4293 * record the parent spec we have not already done so.
4295 if (!rbd_dev->parent_spec) {
4296 parent_spec->pool_id = pool_id;
4297 parent_spec->image_id = image_id;
4298 parent_spec->snap_id = snap_id;
4299 rbd_dev->parent_spec = parent_spec;
4300 parent_spec = NULL; /* rbd_dev now owns this */
4306 * We always update the parent overlap. If it's zero we issue
4307 * a warning, as we will proceed as if there was no parent.
4311 /* refresh, careful to warn just once */
4312 if (rbd_dev->parent_overlap)
4314 "clone now standalone (overlap became 0)");
4317 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4320 rbd_dev->parent_overlap = overlap;
4326 rbd_spec_put(parent_spec);
4331 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4335 __le64 stripe_count;
4336 } __attribute__ ((packed)) striping_info_buf = { 0 };
4337 size_t size = sizeof (striping_info_buf);
4344 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4345 "rbd", "get_stripe_unit_count", NULL, 0,
4346 (char *)&striping_info_buf, size);
4347 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4354 * We don't actually support the "fancy striping" feature
4355 * (STRIPINGV2) yet, but if the striping sizes are the
4356 * defaults the behavior is the same as before. So find
4357 * out, and only fail if the image has non-default values.
4360 obj_size = (u64)1 << rbd_dev->header.obj_order;
4361 p = &striping_info_buf;
4362 stripe_unit = ceph_decode_64(&p);
4363 if (stripe_unit != obj_size) {
4364 rbd_warn(rbd_dev, "unsupported stripe unit "
4365 "(got %llu want %llu)",
4366 stripe_unit, obj_size);
4369 stripe_count = ceph_decode_64(&p);
4370 if (stripe_count != 1) {
4371 rbd_warn(rbd_dev, "unsupported stripe count "
4372 "(got %llu want 1)", stripe_count);
4375 rbd_dev->header.stripe_unit = stripe_unit;
4376 rbd_dev->header.stripe_count = stripe_count;
4381 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4383 size_t image_id_size;
4388 void *reply_buf = NULL;
4390 char *image_name = NULL;
4393 rbd_assert(!rbd_dev->spec->image_name);
4395 len = strlen(rbd_dev->spec->image_id);
4396 image_id_size = sizeof (__le32) + len;
4397 image_id = kmalloc(image_id_size, GFP_KERNEL);
4402 end = image_id + image_id_size;
4403 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4405 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4406 reply_buf = kmalloc(size, GFP_KERNEL);
4410 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4411 "rbd", "dir_get_name",
4412 image_id, image_id_size,
4417 end = reply_buf + ret;
4419 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4420 if (IS_ERR(image_name))
4423 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4431 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4433 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4434 const char *snap_name;
4437 /* Skip over names until we find the one we are looking for */
4439 snap_name = rbd_dev->header.snap_names;
4440 while (which < snapc->num_snaps) {
4441 if (!strcmp(name, snap_name))
4442 return snapc->snaps[which];
4443 snap_name += strlen(snap_name) + 1;
4449 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4451 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4456 for (which = 0; !found && which < snapc->num_snaps; which++) {
4457 const char *snap_name;
4459 snap_id = snapc->snaps[which];
4460 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4461 if (IS_ERR(snap_name)) {
4462 /* ignore no-longer existing snapshots */
4463 if (PTR_ERR(snap_name) == -ENOENT)
4468 found = !strcmp(name, snap_name);
4471 return found ? snap_id : CEPH_NOSNAP;
4475 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4476 * no snapshot by that name is found, or if an error occurs.
4478 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4480 if (rbd_dev->image_format == 1)
4481 return rbd_v1_snap_id_by_name(rbd_dev, name);
4483 return rbd_v2_snap_id_by_name(rbd_dev, name);
4487 * An image being mapped will have everything but the snap id.
4489 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4491 struct rbd_spec *spec = rbd_dev->spec;
4493 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4494 rbd_assert(spec->image_id && spec->image_name);
4495 rbd_assert(spec->snap_name);
4497 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4500 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4501 if (snap_id == CEPH_NOSNAP)
4504 spec->snap_id = snap_id;
4506 spec->snap_id = CEPH_NOSNAP;
4513 * A parent image will have all ids but none of the names.
4515 * All names in an rbd spec are dynamically allocated. It's OK if we
4516 * can't figure out the name for an image id.
4518 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4520 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4521 struct rbd_spec *spec = rbd_dev->spec;
4522 const char *pool_name;
4523 const char *image_name;
4524 const char *snap_name;
4527 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4528 rbd_assert(spec->image_id);
4529 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4531 /* Get the pool name; we have to make our own copy of this */
4533 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4535 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4538 pool_name = kstrdup(pool_name, GFP_KERNEL);
4542 /* Fetch the image name; tolerate failure here */
4544 image_name = rbd_dev_image_name(rbd_dev);
4546 rbd_warn(rbd_dev, "unable to get image name");
4548 /* Fetch the snapshot name */
4550 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4551 if (IS_ERR(snap_name)) {
4552 ret = PTR_ERR(snap_name);
4556 spec->pool_name = pool_name;
4557 spec->image_name = image_name;
4558 spec->snap_name = snap_name;
4568 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4577 struct ceph_snap_context *snapc;
4581 * We'll need room for the seq value (maximum snapshot id),
4582 * snapshot count, and array of that many snapshot ids.
4583 * For now we have a fixed upper limit on the number we're
4584 * prepared to receive.
4586 size = sizeof (__le64) + sizeof (__le32) +
4587 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4588 reply_buf = kzalloc(size, GFP_KERNEL);
4592 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4593 "rbd", "get_snapcontext", NULL, 0,
4595 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4600 end = reply_buf + ret;
4602 ceph_decode_64_safe(&p, end, seq, out);
4603 ceph_decode_32_safe(&p, end, snap_count, out);
4606 * Make sure the reported number of snapshot ids wouldn't go
4607 * beyond the end of our buffer. But before checking that,
4608 * make sure the computed size of the snapshot context we
4609 * allocate is representable in a size_t.
4611 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4616 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4620 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4626 for (i = 0; i < snap_count; i++)
4627 snapc->snaps[i] = ceph_decode_64(&p);
4629 ceph_put_snap_context(rbd_dev->header.snapc);
4630 rbd_dev->header.snapc = snapc;
4632 dout(" snap context seq = %llu, snap_count = %u\n",
4633 (unsigned long long)seq, (unsigned int)snap_count);
4640 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4651 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4652 reply_buf = kmalloc(size, GFP_KERNEL);
4654 return ERR_PTR(-ENOMEM);
4656 snapid = cpu_to_le64(snap_id);
4657 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4658 "rbd", "get_snapshot_name",
4659 &snapid, sizeof (snapid),
4661 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4663 snap_name = ERR_PTR(ret);
4668 end = reply_buf + ret;
4669 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4670 if (IS_ERR(snap_name))
4673 dout(" snap_id 0x%016llx snap_name = %s\n",
4674 (unsigned long long)snap_id, snap_name);
4681 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4683 bool first_time = rbd_dev->header.object_prefix == NULL;
4686 ret = rbd_dev_v2_image_size(rbd_dev);
4691 ret = rbd_dev_v2_header_onetime(rbd_dev);
4696 ret = rbd_dev_v2_snap_context(rbd_dev);
4697 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4702 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4704 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4706 if (rbd_dev->image_format == 1)
4707 return rbd_dev_v1_header_info(rbd_dev);
4709 return rbd_dev_v2_header_info(rbd_dev);
4712 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4717 dev = &rbd_dev->dev;
4718 dev->bus = &rbd_bus_type;
4719 dev->type = &rbd_device_type;
4720 dev->parent = &rbd_root_dev;
4721 dev->release = rbd_dev_device_release;
4722 dev_set_name(dev, "%d", rbd_dev->dev_id);
4723 ret = device_register(dev);
4728 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4730 device_unregister(&rbd_dev->dev);
4734 * Get a unique rbd identifier for the given new rbd_dev, and add
4735 * the rbd_dev to the global list.
4737 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4741 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4742 0, minor_to_rbd_dev_id(1 << MINORBITS),
4747 rbd_dev->dev_id = new_dev_id;
4749 spin_lock(&rbd_dev_list_lock);
4750 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4751 spin_unlock(&rbd_dev_list_lock);
4753 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4759 * Remove an rbd_dev from the global list, and record that its
4760 * identifier is no longer in use.
4762 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4764 spin_lock(&rbd_dev_list_lock);
4765 list_del_init(&rbd_dev->node);
4766 spin_unlock(&rbd_dev_list_lock);
4768 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4770 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4774 * Skips over white space at *buf, and updates *buf to point to the
4775 * first found non-space character (if any). Returns the length of
4776 * the token (string of non-white space characters) found. Note
4777 * that *buf must be terminated with '\0'.
4779 static inline size_t next_token(const char **buf)
4782 * These are the characters that produce nonzero for
4783 * isspace() in the "C" and "POSIX" locales.
4785 const char *spaces = " \f\n\r\t\v";
4787 *buf += strspn(*buf, spaces); /* Find start of token */
4789 return strcspn(*buf, spaces); /* Return token length */
4793 * Finds the next token in *buf, dynamically allocates a buffer big
4794 * enough to hold a copy of it, and copies the token into the new
4795 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4796 * that a duplicate buffer is created even for a zero-length token.
4798 * Returns a pointer to the newly-allocated duplicate, or a null
4799 * pointer if memory for the duplicate was not available. If
4800 * the lenp argument is a non-null pointer, the length of the token
4801 * (not including the '\0') is returned in *lenp.
4803 * If successful, the *buf pointer will be updated to point beyond
4804 * the end of the found token.
4806 * Note: uses GFP_KERNEL for allocation.
4808 static inline char *dup_token(const char **buf, size_t *lenp)
4813 len = next_token(buf);
4814 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4817 *(dup + len) = '\0';
4827 * Parse the options provided for an "rbd add" (i.e., rbd image
4828 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4829 * and the data written is passed here via a NUL-terminated buffer.
4830 * Returns 0 if successful or an error code otherwise.
4832 * The information extracted from these options is recorded in
4833 * the other parameters which return dynamically-allocated
4836 * The address of a pointer that will refer to a ceph options
4837 * structure. Caller must release the returned pointer using
4838 * ceph_destroy_options() when it is no longer needed.
4840 * Address of an rbd options pointer. Fully initialized by
4841 * this function; caller must release with kfree().
4843 * Address of an rbd image specification pointer. Fully
4844 * initialized by this function based on parsed options.
4845 * Caller must release with rbd_spec_put().
4847 * The options passed take this form:
4848 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4851 * A comma-separated list of one or more monitor addresses.
4852 * A monitor address is an ip address, optionally followed
4853 * by a port number (separated by a colon).
4854 * I.e.: ip1[:port1][,ip2[:port2]...]
4856 * A comma-separated list of ceph and/or rbd options.
4858 * The name of the rados pool containing the rbd image.
4860 * The name of the image in that pool to map.
4862 * An optional snapshot id. If provided, the mapping will
4863 * present data from the image at the time that snapshot was
4864 * created. The image head is used if no snapshot id is
4865 * provided. Snapshot mappings are always read-only.
4867 static int rbd_add_parse_args(const char *buf,
4868 struct ceph_options **ceph_opts,
4869 struct rbd_options **opts,
4870 struct rbd_spec **rbd_spec)
4874 const char *mon_addrs;
4876 size_t mon_addrs_size;
4877 struct rbd_spec *spec = NULL;
4878 struct rbd_options *rbd_opts = NULL;
4879 struct ceph_options *copts;
4882 /* The first four tokens are required */
4884 len = next_token(&buf);
4886 rbd_warn(NULL, "no monitor address(es) provided");
4890 mon_addrs_size = len + 1;
4894 options = dup_token(&buf, NULL);
4898 rbd_warn(NULL, "no options provided");
4902 spec = rbd_spec_alloc();
4906 spec->pool_name = dup_token(&buf, NULL);
4907 if (!spec->pool_name)
4909 if (!*spec->pool_name) {
4910 rbd_warn(NULL, "no pool name provided");
4914 spec->image_name = dup_token(&buf, NULL);
4915 if (!spec->image_name)
4917 if (!*spec->image_name) {
4918 rbd_warn(NULL, "no image name provided");
4923 * Snapshot name is optional; default is to use "-"
4924 * (indicating the head/no snapshot).
4926 len = next_token(&buf);
4928 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4929 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4930 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4931 ret = -ENAMETOOLONG;
4934 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4937 *(snap_name + len) = '\0';
4938 spec->snap_name = snap_name;
4940 /* Initialize all rbd options to the defaults */
4942 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4946 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4948 copts = ceph_parse_options(options, mon_addrs,
4949 mon_addrs + mon_addrs_size - 1,
4950 parse_rbd_opts_token, rbd_opts);
4951 if (IS_ERR(copts)) {
4952 ret = PTR_ERR(copts);
4973 * Return pool id (>= 0) or a negative error code.
4975 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4977 struct ceph_options *opts = rbdc->client->options;
4983 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4984 if (ret == -ENOENT && tries++ < 1) {
4985 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4990 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4991 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4992 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4994 opts->mount_timeout);
4997 /* the osdmap we have is new enough */
5006 * An rbd format 2 image has a unique identifier, distinct from the
5007 * name given to it by the user. Internally, that identifier is
5008 * what's used to specify the names of objects related to the image.
5010 * A special "rbd id" object is used to map an rbd image name to its
5011 * id. If that object doesn't exist, then there is no v2 rbd image
5012 * with the supplied name.
5014 * This function will record the given rbd_dev's image_id field if
5015 * it can be determined, and in that case will return 0. If any
5016 * errors occur a negative errno will be returned and the rbd_dev's
5017 * image_id field will be unchanged (and should be NULL).
5019 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5028 * When probing a parent image, the image id is already
5029 * known (and the image name likely is not). There's no
5030 * need to fetch the image id again in this case. We
5031 * do still need to set the image format though.
5033 if (rbd_dev->spec->image_id) {
5034 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5040 * First, see if the format 2 image id file exists, and if
5041 * so, get the image's persistent id from it.
5043 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5044 object_name = kmalloc(size, GFP_NOIO);
5047 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5048 dout("rbd id object name is %s\n", object_name);
5050 /* Response will be an encoded string, which includes a length */
5052 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5053 response = kzalloc(size, GFP_NOIO);
5059 /* If it doesn't exist we'll assume it's a format 1 image */
5061 ret = rbd_obj_method_sync(rbd_dev, object_name,
5062 "rbd", "get_id", NULL, 0,
5063 response, RBD_IMAGE_ID_LEN_MAX);
5064 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5065 if (ret == -ENOENT) {
5066 image_id = kstrdup("", GFP_KERNEL);
5067 ret = image_id ? 0 : -ENOMEM;
5069 rbd_dev->image_format = 1;
5070 } else if (ret >= 0) {
5073 image_id = ceph_extract_encoded_string(&p, p + ret,
5075 ret = PTR_ERR_OR_ZERO(image_id);
5077 rbd_dev->image_format = 2;
5081 rbd_dev->spec->image_id = image_id;
5082 dout("image_id is %s\n", image_id);
5092 * Undo whatever state changes are made by v1 or v2 header info
5095 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5097 struct rbd_image_header *header;
5099 rbd_dev_parent_put(rbd_dev);
5101 /* Free dynamic fields from the header, then zero it out */
5103 header = &rbd_dev->header;
5104 ceph_put_snap_context(header->snapc);
5105 kfree(header->snap_sizes);
5106 kfree(header->snap_names);
5107 kfree(header->object_prefix);
5108 memset(header, 0, sizeof (*header));
5111 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5115 ret = rbd_dev_v2_object_prefix(rbd_dev);
5120 * Get the and check features for the image. Currently the
5121 * features are assumed to never change.
5123 ret = rbd_dev_v2_features(rbd_dev);
5127 /* If the image supports fancy striping, get its parameters */
5129 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5130 ret = rbd_dev_v2_striping_info(rbd_dev);
5134 /* No support for crypto and compression type format 2 images */
5138 rbd_dev->header.features = 0;
5139 kfree(rbd_dev->header.object_prefix);
5140 rbd_dev->header.object_prefix = NULL;
5145 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
5147 struct rbd_device *parent = NULL;
5148 struct rbd_spec *parent_spec;
5149 struct rbd_client *rbdc;
5152 if (!rbd_dev->parent_spec)
5155 * We need to pass a reference to the client and the parent
5156 * spec when creating the parent rbd_dev. Images related by
5157 * parent/child relationships always share both.
5159 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5160 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5163 parent = rbd_dev_create(rbdc, parent_spec);
5167 ret = rbd_dev_image_probe(parent, false);
5170 rbd_dev->parent = parent;
5171 atomic_set(&rbd_dev->parent_ref, 1);
5176 rbd_dev_unparent(rbd_dev);
5177 kfree(rbd_dev->header_name);
5178 rbd_dev_destroy(parent);
5180 rbd_put_client(rbdc);
5181 rbd_spec_put(parent_spec);
5187 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191 /* Get an id and fill in device name. */
5193 ret = rbd_dev_id_get(rbd_dev);
5197 BUILD_BUG_ON(DEV_NAME_LEN
5198 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5199 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5201 /* Record our major and minor device numbers. */
5203 if (!single_major) {
5204 ret = register_blkdev(0, rbd_dev->name);
5208 rbd_dev->major = ret;
5211 rbd_dev->major = rbd_major;
5212 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5215 /* Set up the blkdev mapping. */
5217 ret = rbd_init_disk(rbd_dev);
5219 goto err_out_blkdev;
5221 ret = rbd_dev_mapping_set(rbd_dev);
5225 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5226 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5228 ret = rbd_bus_add_dev(rbd_dev);
5230 goto err_out_mapping;
5232 /* Everything's ready. Announce the disk to the world. */
5234 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5235 add_disk(rbd_dev->disk);
5237 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5238 (unsigned long long) rbd_dev->mapping.size);
5243 rbd_dev_mapping_clear(rbd_dev);
5245 rbd_free_disk(rbd_dev);
5248 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5250 rbd_dev_id_put(rbd_dev);
5251 rbd_dev_mapping_clear(rbd_dev);
5256 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5258 struct rbd_spec *spec = rbd_dev->spec;
5261 /* Record the header object name for this rbd image. */
5263 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5265 if (rbd_dev->image_format == 1)
5266 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5268 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5270 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5271 if (!rbd_dev->header_name)
5274 if (rbd_dev->image_format == 1)
5275 sprintf(rbd_dev->header_name, "%s%s",
5276 spec->image_name, RBD_SUFFIX);
5278 sprintf(rbd_dev->header_name, "%s%s",
5279 RBD_HEADER_PREFIX, spec->image_id);
5283 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5285 rbd_dev_unprobe(rbd_dev);
5286 kfree(rbd_dev->header_name);
5287 rbd_dev->header_name = NULL;
5288 rbd_dev->image_format = 0;
5289 kfree(rbd_dev->spec->image_id);
5290 rbd_dev->spec->image_id = NULL;
5292 rbd_dev_destroy(rbd_dev);
5296 * Probe for the existence of the header object for the given rbd
5297 * device. If this image is the one being mapped (i.e., not a
5298 * parent), initiate a watch on its header object before using that
5299 * object to get detailed information about the rbd image.
5301 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5306 * Get the id from the image id object. Unless there's an
5307 * error, rbd_dev->spec->image_id will be filled in with
5308 * a dynamically-allocated string, and rbd_dev->image_format
5309 * will be set to either 1 or 2.
5311 ret = rbd_dev_image_id(rbd_dev);
5315 ret = rbd_dev_header_name(rbd_dev);
5317 goto err_out_format;
5320 ret = rbd_dev_header_watch_sync(rbd_dev);
5323 pr_info("image %s/%s does not exist\n",
5324 rbd_dev->spec->pool_name,
5325 rbd_dev->spec->image_name);
5326 goto out_header_name;
5330 ret = rbd_dev_header_info(rbd_dev);
5335 * If this image is the one being mapped, we have pool name and
5336 * id, image name and id, and snap name - need to fill snap id.
5337 * Otherwise this is a parent image, identified by pool, image
5338 * and snap ids - need to fill in names for those ids.
5341 ret = rbd_spec_fill_snap_id(rbd_dev);
5343 ret = rbd_spec_fill_names(rbd_dev);
5346 pr_info("snap %s/%s@%s does not exist\n",
5347 rbd_dev->spec->pool_name,
5348 rbd_dev->spec->image_name,
5349 rbd_dev->spec->snap_name);
5353 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5354 ret = rbd_dev_v2_parent_info(rbd_dev);
5359 * Need to warn users if this image is the one being
5360 * mapped and has a parent.
5362 if (mapping && rbd_dev->parent_spec)
5364 "WARNING: kernel layering is EXPERIMENTAL!");
5367 ret = rbd_dev_probe_parent(rbd_dev);
5371 dout("discovered format %u image, header name is %s\n",
5372 rbd_dev->image_format, rbd_dev->header_name);
5376 rbd_dev_unprobe(rbd_dev);
5379 rbd_dev_header_unwatch_sync(rbd_dev);
5381 kfree(rbd_dev->header_name);
5382 rbd_dev->header_name = NULL;
5384 rbd_dev->image_format = 0;
5385 kfree(rbd_dev->spec->image_id);
5386 rbd_dev->spec->image_id = NULL;
5390 static ssize_t do_rbd_add(struct bus_type *bus,
5394 struct rbd_device *rbd_dev = NULL;
5395 struct ceph_options *ceph_opts = NULL;
5396 struct rbd_options *rbd_opts = NULL;
5397 struct rbd_spec *spec = NULL;
5398 struct rbd_client *rbdc;
5402 if (!try_module_get(THIS_MODULE))
5405 /* parse add command */
5406 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5408 goto err_out_module;
5409 read_only = rbd_opts->read_only;
5411 rbd_opts = NULL; /* done with this */
5413 rbdc = rbd_get_client(ceph_opts);
5420 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5423 pr_info("pool %s does not exist\n", spec->pool_name);
5424 goto err_out_client;
5426 spec->pool_id = (u64)rc;
5428 /* The ceph file layout needs to fit pool id in 32 bits */
5430 if (spec->pool_id > (u64)U32_MAX) {
5431 rbd_warn(NULL, "pool id too large (%llu > %u)",
5432 (unsigned long long)spec->pool_id, U32_MAX);
5434 goto err_out_client;
5437 rbd_dev = rbd_dev_create(rbdc, spec);
5439 goto err_out_client;
5440 rbdc = NULL; /* rbd_dev now owns this */
5441 spec = NULL; /* rbd_dev now owns this */
5443 rc = rbd_dev_image_probe(rbd_dev, true);
5445 goto err_out_rbd_dev;
5447 /* If we are mapping a snapshot it must be marked read-only */
5449 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5451 rbd_dev->mapping.read_only = read_only;
5453 rc = rbd_dev_device_setup(rbd_dev);
5456 * rbd_dev_header_unwatch_sync() can't be moved into
5457 * rbd_dev_image_release() without refactoring, see
5458 * commit 1f3ef78861ac.
5460 rbd_dev_header_unwatch_sync(rbd_dev);
5461 rbd_dev_image_release(rbd_dev);
5462 goto err_out_module;
5468 rbd_dev_destroy(rbd_dev);
5470 rbd_put_client(rbdc);
5474 module_put(THIS_MODULE);
5476 dout("Error adding device %s\n", buf);
5481 static ssize_t rbd_add(struct bus_type *bus,
5488 return do_rbd_add(bus, buf, count);
5491 static ssize_t rbd_add_single_major(struct bus_type *bus,
5495 return do_rbd_add(bus, buf, count);
5498 static void rbd_dev_device_release(struct device *dev)
5500 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5502 rbd_free_disk(rbd_dev);
5503 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5504 rbd_dev_mapping_clear(rbd_dev);
5506 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5507 rbd_dev_id_put(rbd_dev);
5508 rbd_dev_mapping_clear(rbd_dev);
5511 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5513 while (rbd_dev->parent) {
5514 struct rbd_device *first = rbd_dev;
5515 struct rbd_device *second = first->parent;
5516 struct rbd_device *third;
5519 * Follow to the parent with no grandparent and
5522 while (second && (third = second->parent)) {
5527 rbd_dev_image_release(second);
5528 first->parent = NULL;
5529 first->parent_overlap = 0;
5531 rbd_assert(first->parent_spec);
5532 rbd_spec_put(first->parent_spec);
5533 first->parent_spec = NULL;
5537 static ssize_t do_rbd_remove(struct bus_type *bus,
5541 struct rbd_device *rbd_dev = NULL;
5542 struct list_head *tmp;
5545 bool already = false;
5548 ret = kstrtoul(buf, 10, &ul);
5552 /* convert to int; abort if we lost anything in the conversion */
5558 spin_lock(&rbd_dev_list_lock);
5559 list_for_each(tmp, &rbd_dev_list) {
5560 rbd_dev = list_entry(tmp, struct rbd_device, node);
5561 if (rbd_dev->dev_id == dev_id) {
5567 spin_lock_irq(&rbd_dev->lock);
5568 if (rbd_dev->open_count)
5571 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5573 spin_unlock_irq(&rbd_dev->lock);
5575 spin_unlock(&rbd_dev_list_lock);
5576 if (ret < 0 || already)
5579 rbd_dev_header_unwatch_sync(rbd_dev);
5581 * flush remaining watch callbacks - these must be complete
5582 * before the osd_client is shutdown
5584 dout("%s: flushing notifies", __func__);
5585 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5588 * Don't free anything from rbd_dev->disk until after all
5589 * notifies are completely processed. Otherwise
5590 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5591 * in a potential use after free of rbd_dev->disk or rbd_dev.
5593 rbd_bus_del_dev(rbd_dev);
5594 rbd_dev_image_release(rbd_dev);
5595 module_put(THIS_MODULE);
5600 static ssize_t rbd_remove(struct bus_type *bus,
5607 return do_rbd_remove(bus, buf, count);
5610 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5614 return do_rbd_remove(bus, buf, count);
5618 * create control files in sysfs
5621 static int rbd_sysfs_init(void)
5625 ret = device_register(&rbd_root_dev);
5629 ret = bus_register(&rbd_bus_type);
5631 device_unregister(&rbd_root_dev);
5636 static void rbd_sysfs_cleanup(void)
5638 bus_unregister(&rbd_bus_type);
5639 device_unregister(&rbd_root_dev);
5642 static int rbd_slab_init(void)
5644 rbd_assert(!rbd_img_request_cache);
5645 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5646 sizeof (struct rbd_img_request),
5647 __alignof__(struct rbd_img_request),
5649 if (!rbd_img_request_cache)
5652 rbd_assert(!rbd_obj_request_cache);
5653 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5654 sizeof (struct rbd_obj_request),
5655 __alignof__(struct rbd_obj_request),
5657 if (!rbd_obj_request_cache)
5660 rbd_assert(!rbd_segment_name_cache);
5661 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5662 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5663 if (rbd_segment_name_cache)
5666 if (rbd_obj_request_cache) {
5667 kmem_cache_destroy(rbd_obj_request_cache);
5668 rbd_obj_request_cache = NULL;
5671 kmem_cache_destroy(rbd_img_request_cache);
5672 rbd_img_request_cache = NULL;
5677 static void rbd_slab_exit(void)
5679 rbd_assert(rbd_segment_name_cache);
5680 kmem_cache_destroy(rbd_segment_name_cache);
5681 rbd_segment_name_cache = NULL;
5683 rbd_assert(rbd_obj_request_cache);
5684 kmem_cache_destroy(rbd_obj_request_cache);
5685 rbd_obj_request_cache = NULL;
5687 rbd_assert(rbd_img_request_cache);
5688 kmem_cache_destroy(rbd_img_request_cache);
5689 rbd_img_request_cache = NULL;
5692 static int __init rbd_init(void)
5696 if (!libceph_compatible(NULL)) {
5697 rbd_warn(NULL, "libceph incompatibility (quitting)");
5701 rc = rbd_slab_init();
5706 * The number of active work items is limited by the number of
5707 * rbd devices * queue depth, so leave @max_active at default.
5709 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5716 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5717 if (rbd_major < 0) {
5723 rc = rbd_sysfs_init();
5725 goto err_out_blkdev;
5728 pr_info("loaded (major %d)\n", rbd_major);
5730 pr_info("loaded\n");
5736 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5738 destroy_workqueue(rbd_wq);
5744 static void __exit rbd_exit(void)
5746 ida_destroy(&rbd_dev_id_ida);
5747 rbd_sysfs_cleanup();
5749 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5750 destroy_workqueue(rbd_wq);
5754 module_init(rbd_init);
5755 module_exit(rbd_exit);
5757 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5758 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5759 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5760 /* following authorship retained from original osdblk.c */
5761 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5763 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5764 MODULE_LICENSE("GPL");