1 /* Xenbus code for blkif backend
2 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3 Copyright (C) 2005 XenSource Ltd
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <xen/events.h>
21 #include <xen/grant_table.h>
25 struct xenbus_device *dev;
26 struct xen_blkif *blkif;
27 struct xenbus_watch backend_watch;
33 static struct kmem_cache *xen_blkif_cachep;
34 static void connect(struct backend_info *);
35 static int connect_ring(struct backend_info *);
36 static void backend_changed(struct xenbus_watch *, const char **,
39 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
44 static int blkback_name(struct xen_blkif *blkif, char *buf)
46 char *devpath, *devname;
47 struct xenbus_device *dev = blkif->be->dev;
49 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
51 return PTR_ERR(devpath);
53 devname = strstr(devpath, "/dev/");
55 devname += strlen("/dev/");
59 snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
65 static void xen_update_blkif_status(struct xen_blkif *blkif)
68 char name[TASK_COMM_LEN];
70 /* Not ready to connect? */
71 if (!blkif->irq || !blkif->vbd.bdev)
74 /* Already connected? */
75 if (blkif->be->dev->state == XenbusStateConnected)
78 /* Attempt to connect: exit if we fail to. */
80 if (blkif->be->dev->state != XenbusStateConnected)
83 err = blkback_name(blkif, name);
85 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
89 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
91 xenbus_dev_error(blkif->be->dev, err, "block flush");
94 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
96 blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name);
97 if (IS_ERR(blkif->xenblkd)) {
98 err = PTR_ERR(blkif->xenblkd);
99 blkif->xenblkd = NULL;
100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
105 static struct xen_blkif *xen_blkif_alloc(domid_t domid)
107 struct xen_blkif *blkif;
108 struct pending_req *req, *n;
111 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
113 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
115 return ERR_PTR(-ENOMEM);
117 blkif->domid = domid;
118 spin_lock_init(&blkif->blk_ring_lock);
119 atomic_set(&blkif->refcnt, 1);
120 init_waitqueue_head(&blkif->wq);
121 init_completion(&blkif->drain_complete);
122 atomic_set(&blkif->drain, 0);
123 blkif->st_print = jiffies;
124 init_waitqueue_head(&blkif->waiting_to_free);
125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages);
128 INIT_LIST_HEAD(&blkif->persistent_purge_list);
129 blkif->free_pages_num = 0;
130 atomic_set(&blkif->persistent_gnt_in_use, 0);
131 atomic_set(&blkif->inflight, 0);
133 INIT_LIST_HEAD(&blkif->pending_free);
135 for (i = 0; i < XEN_BLKIF_REQS; i++) {
136 req = kzalloc(sizeof(*req), GFP_KERNEL);
139 list_add_tail(&req->free_list,
140 &blkif->pending_free);
141 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
142 req->segments[j] = kzalloc(sizeof(*req->segments[0]),
144 if (!req->segments[j])
147 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
148 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
150 if (!req->indirect_pages[j])
154 spin_lock_init(&blkif->pending_free_lock);
155 init_waitqueue_head(&blkif->pending_free_wq);
156 init_waitqueue_head(&blkif->shutdown_wq);
161 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
162 list_del(&req->free_list);
163 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
164 if (!req->segments[j])
166 kfree(req->segments[j]);
168 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
169 if (!req->indirect_pages[j])
171 kfree(req->indirect_pages[j]);
176 kmem_cache_free(xen_blkif_cachep, blkif);
178 return ERR_PTR(-ENOMEM);
181 static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
186 /* Already connected through? */
190 err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring);
194 switch (blkif->blk_protocol) {
195 case BLKIF_PROTOCOL_NATIVE:
197 struct blkif_sring *sring;
198 sring = (struct blkif_sring *)blkif->blk_ring;
199 BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
202 case BLKIF_PROTOCOL_X86_32:
204 struct blkif_x86_32_sring *sring_x86_32;
205 sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
206 BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
209 case BLKIF_PROTOCOL_X86_64:
211 struct blkif_x86_64_sring *sring_x86_64;
212 sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
213 BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
220 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
222 "blkif-backend", blkif);
224 xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
225 blkif->blk_rings.common.sring = NULL;
233 static void xen_blkif_disconnect(struct xen_blkif *blkif)
235 if (blkif->xenblkd) {
236 kthread_stop(blkif->xenblkd);
237 wake_up(&blkif->shutdown_wq);
238 blkif->xenblkd = NULL;
241 atomic_dec(&blkif->refcnt);
242 wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
243 atomic_inc(&blkif->refcnt);
246 unbind_from_irqhandler(blkif->irq, blkif);
250 if (blkif->blk_rings.common.sring) {
251 xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
252 blkif->blk_rings.common.sring = NULL;
256 static void xen_blkif_free(struct xen_blkif *blkif)
258 struct pending_req *req, *n;
261 if (!atomic_dec_and_test(&blkif->refcnt))
264 /* Remove all persistent grants and the cache of ballooned pages. */
265 xen_blkbk_free_caches(blkif);
267 /* Make sure everything is drained before shutting down */
268 BUG_ON(blkif->persistent_gnt_c != 0);
269 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
270 BUG_ON(blkif->free_pages_num != 0);
271 BUG_ON(!list_empty(&blkif->persistent_purge_list));
272 BUG_ON(!list_empty(&blkif->free_pages));
273 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
275 /* Check that there is no request in use */
276 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
277 list_del(&req->free_list);
279 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
280 kfree(req->segments[j]);
282 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
283 kfree(req->indirect_pages[j]);
289 WARN_ON(i != XEN_BLKIF_REQS);
291 kmem_cache_free(xen_blkif_cachep, blkif);
294 int __init xen_blkif_interface_init(void)
296 xen_blkif_cachep = kmem_cache_create("blkif_cache",
297 sizeof(struct xen_blkif),
299 if (!xen_blkif_cachep)
306 * sysfs interface for VBD I/O requests
309 #define VBD_SHOW(name, format, args...) \
310 static ssize_t show_##name(struct device *_dev, \
311 struct device_attribute *attr, \
314 struct xenbus_device *dev = to_xenbus_device(_dev); \
315 struct backend_info *be = dev_get_drvdata(&dev->dev); \
317 return sprintf(buf, format, ##args); \
319 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
321 VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req);
322 VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req);
323 VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req);
324 VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req);
325 VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req);
326 VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
327 VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
329 static struct attribute *xen_vbdstat_attrs[] = {
330 &dev_attr_oo_req.attr,
331 &dev_attr_rd_req.attr,
332 &dev_attr_wr_req.attr,
333 &dev_attr_f_req.attr,
334 &dev_attr_ds_req.attr,
335 &dev_attr_rd_sect.attr,
336 &dev_attr_wr_sect.attr,
340 static struct attribute_group xen_vbdstat_group = {
341 .name = "statistics",
342 .attrs = xen_vbdstat_attrs,
345 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
346 VBD_SHOW(mode, "%s\n", be->mode);
348 static int xenvbd_sysfs_addif(struct xenbus_device *dev)
352 error = device_create_file(&dev->dev, &dev_attr_physical_device);
356 error = device_create_file(&dev->dev, &dev_attr_mode);
360 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
366 fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
367 fail2: device_remove_file(&dev->dev, &dev_attr_mode);
368 fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
372 static void xenvbd_sysfs_delif(struct xenbus_device *dev)
374 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
375 device_remove_file(&dev->dev, &dev_attr_mode);
376 device_remove_file(&dev->dev, &dev_attr_physical_device);
380 static void xen_vbd_free(struct xen_vbd *vbd)
383 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
387 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
388 unsigned major, unsigned minor, int readonly,
392 struct block_device *bdev;
393 struct request_queue *q;
396 vbd->handle = handle;
397 vbd->readonly = readonly;
400 vbd->pdevice = MKDEV(major, minor);
402 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
403 FMODE_READ : FMODE_WRITE, NULL);
406 DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
412 if (vbd->bdev->bd_disk == NULL) {
413 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
418 vbd->size = vbd_sz(vbd);
420 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
421 vbd->type |= VDISK_CDROM;
422 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
423 vbd->type |= VDISK_REMOVABLE;
425 q = bdev_get_queue(bdev);
426 if (q && q->flush_flags)
427 vbd->flush_support = true;
429 if (q && blk_queue_secdiscard(q))
430 vbd->discard_secure = true;
432 DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
433 handle, blkif->domid);
436 static int xen_blkbk_remove(struct xenbus_device *dev)
438 struct backend_info *be = dev_get_drvdata(&dev->dev);
442 if (be->major || be->minor)
443 xenvbd_sysfs_delif(dev);
445 if (be->backend_watch.node) {
446 unregister_xenbus_watch(&be->backend_watch);
447 kfree(be->backend_watch.node);
448 be->backend_watch.node = NULL;
452 xen_blkif_disconnect(be->blkif);
453 xen_vbd_free(&be->blkif->vbd);
454 xen_blkif_free(be->blkif);
460 dev_set_drvdata(&dev->dev, NULL);
464 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
465 struct backend_info *be, int state)
467 struct xenbus_device *dev = be->dev;
470 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
473 dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
478 static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
480 struct xenbus_device *dev = be->dev;
481 struct xen_blkif *blkif = be->blkif;
484 struct block_device *bdev = be->blkif->vbd.bdev;
485 struct request_queue *q = bdev_get_queue(bdev);
487 if (blk_queue_discard(q)) {
488 err = xenbus_printf(xbt, dev->nodename,
489 "discard-granularity", "%u",
490 q->limits.discard_granularity);
492 dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
495 err = xenbus_printf(xbt, dev->nodename,
496 "discard-alignment", "%u",
497 q->limits.discard_alignment);
499 dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
504 err = xenbus_printf(xbt, dev->nodename,
505 "discard-secure", "%d",
506 blkif->vbd.discard_secure);
508 dev_warn(&dev->dev, "writing discard-secure (%d)", err);
512 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
515 dev_warn(&dev->dev, "writing feature-discard (%d)", err);
517 int xen_blkbk_barrier(struct xenbus_transaction xbt,
518 struct backend_info *be, int state)
520 struct xenbus_device *dev = be->dev;
523 err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
526 dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
532 * Entry point to this code when a new device is created. Allocate the basic
533 * structures, and watch the store waiting for the hotplug scripts to tell us
534 * the device's physical major and minor numbers. Switch to InitWait.
536 static int xen_blkbk_probe(struct xenbus_device *dev,
537 const struct xenbus_device_id *id)
540 struct backend_info *be = kzalloc(sizeof(struct backend_info),
543 xenbus_dev_fatal(dev, -ENOMEM,
544 "allocating backend structure");
548 dev_set_drvdata(&dev->dev, be);
550 be->blkif = xen_blkif_alloc(dev->otherend_id);
551 if (IS_ERR(be->blkif)) {
552 err = PTR_ERR(be->blkif);
554 xenbus_dev_fatal(dev, err, "creating block interface");
558 /* setup back pointer */
561 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
562 "%s/%s", dev->nodename, "physical-device");
566 err = xenbus_switch_state(dev, XenbusStateInitWait);
574 xen_blkbk_remove(dev);
580 * Callback received when the hotplug scripts have placed the physical-device
581 * node. Read it and the mode node, and create a vbd. If the frontend is
584 static void backend_changed(struct xenbus_watch *watch,
585 const char **vec, unsigned int len)
590 struct backend_info *be
591 = container_of(watch, struct backend_info, backend_watch);
592 struct xenbus_device *dev = be->dev;
594 unsigned long handle;
599 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
601 if (XENBUS_EXIST_ERR(err)) {
603 * Since this watch will fire once immediately after it is
604 * registered, we expect this. Ignore it, and wait for the
610 xenbus_dev_fatal(dev, err, "reading physical-device");
614 if (be->major | be->minor) {
615 if (be->major != major || be->minor != minor)
616 pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
617 be->major, be->minor, major, minor);
621 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
622 if (IS_ERR(be->mode)) {
623 err = PTR_ERR(be->mode);
625 xenbus_dev_fatal(dev, err, "reading mode");
629 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
630 if (!IS_ERR(device_type)) {
631 cdrom = strcmp(device_type, "cdrom") == 0;
635 /* Front end dir is a number, which is used as the handle. */
636 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
643 err = xen_vbd_create(be->blkif, handle, major, minor,
644 !strchr(be->mode, 'w'), cdrom);
647 xenbus_dev_fatal(dev, err, "creating vbd structure");
649 err = xenvbd_sysfs_addif(dev);
651 xen_vbd_free(&be->blkif->vbd);
652 xenbus_dev_fatal(dev, err, "creating sysfs entries");
662 /* We're potentially connected now */
663 xen_update_blkif_status(be->blkif);
669 * Callback received when the frontend's state changes.
671 static void frontend_changed(struct xenbus_device *dev,
672 enum xenbus_state frontend_state)
674 struct backend_info *be = dev_get_drvdata(&dev->dev);
677 DPRINTK("%s", xenbus_strstate(frontend_state));
679 switch (frontend_state) {
680 case XenbusStateInitialising:
681 if (dev->state == XenbusStateClosed) {
682 pr_info(DRV_PFX "%s: prepare for reconnect\n",
684 xenbus_switch_state(dev, XenbusStateInitWait);
688 case XenbusStateInitialised:
689 case XenbusStateConnected:
691 * Ensure we connect even when two watches fire in
692 * close succession and we miss the intermediate value
695 if (dev->state == XenbusStateConnected)
699 * Enforce precondition before potential leak point.
700 * xen_blkif_disconnect() is idempotent.
702 xen_blkif_disconnect(be->blkif);
704 err = connect_ring(be);
707 xen_update_blkif_status(be->blkif);
710 case XenbusStateClosing:
711 xenbus_switch_state(dev, XenbusStateClosing);
714 case XenbusStateClosed:
715 xen_blkif_disconnect(be->blkif);
716 xenbus_switch_state(dev, XenbusStateClosed);
717 if (xenbus_dev_is_online(dev))
719 /* fall through if not online */
720 case XenbusStateUnknown:
721 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
722 device_unregister(&dev->dev);
726 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
733 /* ** Connection ** */
737 * Write the physical details regarding the block device to the store, and
738 * switch to Connected state.
740 static void connect(struct backend_info *be)
742 struct xenbus_transaction xbt;
744 struct xenbus_device *dev = be->dev;
746 DPRINTK("%s", dev->otherend);
748 /* Supply the information about the device the frontend needs */
750 err = xenbus_transaction_start(&xbt);
752 xenbus_dev_fatal(dev, err, "starting transaction");
756 /* If we can't advertise it is OK. */
757 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
759 xen_blkbk_discard(xbt, be);
761 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
763 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
765 xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
769 err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
770 MAX_INDIRECT_SEGMENTS);
772 dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
775 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
776 (unsigned long long)vbd_sz(&be->blkif->vbd));
778 xenbus_dev_fatal(dev, err, "writing %s/sectors",
783 /* FIXME: use a typename instead */
784 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
785 be->blkif->vbd.type |
786 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
788 xenbus_dev_fatal(dev, err, "writing %s/info",
792 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
794 bdev_logical_block_size(be->blkif->vbd.bdev));
796 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
800 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
801 bdev_physical_block_size(be->blkif->vbd.bdev));
803 xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
806 err = xenbus_transaction_end(xbt, 0);
810 xenbus_dev_fatal(dev, err, "ending transaction");
812 err = xenbus_switch_state(dev, XenbusStateConnected);
814 xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
819 xenbus_transaction_end(xbt, 1);
823 static int connect_ring(struct backend_info *be)
825 struct xenbus_device *dev = be->dev;
826 unsigned long ring_ref;
828 unsigned int pers_grants;
829 char protocol[64] = "";
832 DPRINTK("%s", dev->otherend);
834 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
835 &ring_ref, "event-channel", "%u", &evtchn, NULL);
837 xenbus_dev_fatal(dev, err,
838 "reading %s/ring-ref and event-channel",
843 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
844 err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
845 "%63s", protocol, NULL);
847 strcpy(protocol, "unspecified, assuming native");
848 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
849 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
850 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
851 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
852 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
853 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
855 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
858 err = xenbus_gather(XBT_NIL, dev->otherend,
859 "feature-persistent", "%u",
864 be->blkif->vbd.feature_gnt_persistent = pers_grants;
865 be->blkif->vbd.overflow_max_grants = 0;
867 pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
868 ring_ref, evtchn, be->blkif->blk_protocol, protocol,
869 pers_grants ? "persistent grants" : "");
871 /* Map the shared frame, irq etc. */
872 err = xen_blkif_map(be->blkif, ring_ref, evtchn);
874 xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
883 /* ** Driver Registration ** */
886 static const struct xenbus_device_id xen_blkbk_ids[] = {
892 static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
893 .probe = xen_blkbk_probe,
894 .remove = xen_blkbk_remove,
895 .otherend_changed = frontend_changed
899 int xen_blkif_xenbus_init(void)
901 return xenbus_register_backend(&xen_blkbk_driver);