1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #define pr_fmt(fmt) "xen-blkback: " fmt
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
46 #include <xen/events.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
56 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
77 * When the list of persistent grants is full we clean it up using a LRU
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
87 * The LRU mechanism to clean the lists of persistent grants needs to
88 * be executed periodically. The time interval between consecutive executions
89 * of the purge mechanism is set in ms.
91 #define LRU_INTERVAL 100
94 * When the persistent grants list is full we will remove unused grants
95 * from the list. The percent number of grants to be removed at each LRU
98 #define LRU_PERCENT_CLEAN 5
100 /* Run-time switchable: /sys/module/blkback/parameters/ */
101 static unsigned int log_stats;
102 module_param(log_stats, int, 0644);
104 #define BLKBACK_INVALID_HANDLE (~0)
106 /* Number of free pages to remove on each call to gnttab_free_pages */
107 #define NUM_BATCH_FREE_PAGES 10
109 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
113 spin_lock_irqsave(&blkif->free_pages_lock, flags);
114 if (list_empty(&blkif->free_pages)) {
115 BUG_ON(blkif->free_pages_num != 0);
116 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
117 return gnttab_alloc_pages(1, page);
119 BUG_ON(blkif->free_pages_num == 0);
120 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
121 list_del(&page[0]->lru);
122 blkif->free_pages_num--;
123 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
128 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
134 spin_lock_irqsave(&blkif->free_pages_lock, flags);
135 for (i = 0; i < num; i++)
136 list_add(&page[i]->lru, &blkif->free_pages);
137 blkif->free_pages_num += num;
138 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
141 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
143 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
144 struct page *page[NUM_BATCH_FREE_PAGES];
145 unsigned int num_pages = 0;
148 spin_lock_irqsave(&blkif->free_pages_lock, flags);
149 while (blkif->free_pages_num > num) {
150 BUG_ON(list_empty(&blkif->free_pages));
151 page[num_pages] = list_first_entry(&blkif->free_pages,
153 list_del(&page[num_pages]->lru);
154 blkif->free_pages_num--;
155 if (++num_pages == NUM_BATCH_FREE_PAGES) {
156 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
157 gnttab_free_pages(num_pages, page);
158 spin_lock_irqsave(&blkif->free_pages_lock, flags);
162 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
164 gnttab_free_pages(num_pages, page);
167 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
169 static int do_block_io_op(struct xen_blkif *blkif);
170 static int dispatch_rw_block_io(struct xen_blkif *blkif,
171 struct blkif_request *req,
172 struct pending_req *pending_req);
173 static void make_response(struct xen_blkif *blkif, u64 id,
174 unsigned short op, int st);
176 #define foreach_grant_safe(pos, n, rbtree, node) \
177 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
179 &(pos)->node != NULL; \
180 (pos) = container_of(n, typeof(*(pos)), node), \
181 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
185 * We don't need locking around the persistent grant helpers
186 * because blkback uses a single-thread for each backed, so we
187 * can be sure that this functions will never be called recursively.
189 * The only exception to that is put_persistent_grant, that can be called
190 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
191 * bit operations to modify the flags of a persistent grant and to count
192 * the number of used grants.
194 static int add_persistent_gnt(struct xen_blkif *blkif,
195 struct persistent_gnt *persistent_gnt)
197 struct rb_node **new = NULL, *parent = NULL;
198 struct persistent_gnt *this;
200 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
201 if (!blkif->vbd.overflow_max_grants)
202 blkif->vbd.overflow_max_grants = 1;
205 /* Figure out where to put new node */
206 new = &blkif->persistent_gnts.rb_node;
208 this = container_of(*new, struct persistent_gnt, node);
211 if (persistent_gnt->gnt < this->gnt)
212 new = &((*new)->rb_left);
213 else if (persistent_gnt->gnt > this->gnt)
214 new = &((*new)->rb_right);
216 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
221 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
222 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
223 /* Add new node and rebalance tree. */
224 rb_link_node(&(persistent_gnt->node), parent, new);
225 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
226 blkif->persistent_gnt_c++;
227 atomic_inc(&blkif->persistent_gnt_in_use);
231 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
234 struct persistent_gnt *data;
235 struct rb_node *node = NULL;
237 node = blkif->persistent_gnts.rb_node;
239 data = container_of(node, struct persistent_gnt, node);
241 if (gref < data->gnt)
242 node = node->rb_left;
243 else if (gref > data->gnt)
244 node = node->rb_right;
246 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
247 pr_alert_ratelimited("requesting a grant already in use\n");
250 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
251 atomic_inc(&blkif->persistent_gnt_in_use);
258 static void put_persistent_gnt(struct xen_blkif *blkif,
259 struct persistent_gnt *persistent_gnt)
261 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
262 pr_alert_ratelimited("freeing a grant already unused\n");
263 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
264 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
265 atomic_dec(&blkif->persistent_gnt_in_use);
268 static void free_persistent_gnts_unmap_callback(int result,
269 struct gntab_unmap_queue_data *data)
271 struct completion *c = data->data;
273 /* BUG_ON used to reproduce existing behaviour,
274 but is this the best way to deal with this? */
279 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
282 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
283 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
284 struct persistent_gnt *persistent_gnt;
286 int segs_to_unmap = 0;
287 struct gntab_unmap_queue_data unmap_data;
288 struct completion unmap_completion;
290 init_completion(&unmap_completion);
292 unmap_data.data = &unmap_completion;
293 unmap_data.done = &free_persistent_gnts_unmap_callback;
294 unmap_data.pages = pages;
295 unmap_data.unmap_ops = unmap;
296 unmap_data.kunmap_ops = NULL;
298 foreach_grant_safe(persistent_gnt, n, root, node) {
299 BUG_ON(persistent_gnt->handle ==
300 BLKBACK_INVALID_HANDLE);
301 gnttab_set_unmap_op(&unmap[segs_to_unmap],
302 (unsigned long) pfn_to_kaddr(page_to_pfn(
303 persistent_gnt->page)),
305 persistent_gnt->handle);
307 pages[segs_to_unmap] = persistent_gnt->page;
309 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
310 !rb_next(&persistent_gnt->node)) {
312 unmap_data.count = segs_to_unmap;
313 gnttab_unmap_refs_async(&unmap_data);
314 wait_for_completion(&unmap_completion);
316 put_free_pages(blkif, pages, segs_to_unmap);
320 rb_erase(&persistent_gnt->node, root);
321 kfree(persistent_gnt);
327 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
329 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
330 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331 struct persistent_gnt *persistent_gnt;
332 int ret, segs_to_unmap = 0;
333 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
335 while(!list_empty(&blkif->persistent_purge_list)) {
336 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
337 struct persistent_gnt,
339 list_del(&persistent_gnt->remove_node);
341 gnttab_set_unmap_op(&unmap[segs_to_unmap],
342 vaddr(persistent_gnt->page),
344 persistent_gnt->handle);
346 pages[segs_to_unmap] = persistent_gnt->page;
348 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
349 ret = gnttab_unmap_refs(unmap, NULL, pages,
352 put_free_pages(blkif, pages, segs_to_unmap);
355 kfree(persistent_gnt);
357 if (segs_to_unmap > 0) {
358 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
360 put_free_pages(blkif, pages, segs_to_unmap);
364 static void purge_persistent_gnt(struct xen_blkif *blkif)
366 struct persistent_gnt *persistent_gnt;
368 unsigned int num_clean, total;
369 bool scan_used = false, clean_used = false;
370 struct rb_root *root;
372 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
373 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
374 !blkif->vbd.overflow_max_grants)) {
378 if (work_pending(&blkif->persistent_purge_work)) {
379 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
383 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
384 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
385 num_clean = min(blkif->persistent_gnt_c, num_clean);
386 if ((num_clean == 0) ||
387 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
391 * At this point, we can assure that there will be no calls
392 * to get_persistent_grant (because we are executing this code from
393 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
394 * which means that the number of currently used grants will go down,
395 * but never up, so we will always be able to remove the requested
401 pr_debug("Going to purge %u persistent grants\n", num_clean);
403 BUG_ON(!list_empty(&blkif->persistent_purge_list));
404 root = &blkif->persistent_gnts;
406 foreach_grant_safe(persistent_gnt, n, root, node) {
407 BUG_ON(persistent_gnt->handle ==
408 BLKBACK_INVALID_HANDLE);
411 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
415 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
418 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
421 rb_erase(&persistent_gnt->node, root);
422 list_add(&persistent_gnt->remove_node,
423 &blkif->persistent_purge_list);
424 if (--num_clean == 0)
428 * If we get here it means we also need to start cleaning
429 * grants that were used since last purge in order to cope
430 * with the requested num
432 if (!scan_used && !clean_used) {
433 pr_debug("Still missing %u purged frames\n", num_clean);
439 pr_debug("Finished scanning for grants to clean, removing used flag\n");
444 blkif->persistent_gnt_c -= (total - num_clean);
445 blkif->vbd.overflow_max_grants = 0;
447 /* We can defer this work */
448 schedule_work(&blkif->persistent_purge_work);
449 pr_debug("Purged %u/%u\n", (total - num_clean), total);
454 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
456 static struct pending_req *alloc_req(struct xen_blkif *blkif)
458 struct pending_req *req = NULL;
461 spin_lock_irqsave(&blkif->pending_free_lock, flags);
462 if (!list_empty(&blkif->pending_free)) {
463 req = list_entry(blkif->pending_free.next, struct pending_req,
465 list_del(&req->free_list);
467 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
472 * Return the 'pending_req' structure back to the freepool. We also
473 * wake up the thread if it was waiting for a free page.
475 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
480 spin_lock_irqsave(&blkif->pending_free_lock, flags);
481 was_empty = list_empty(&blkif->pending_free);
482 list_add(&req->free_list, &blkif->pending_free);
483 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
485 wake_up(&blkif->pending_free_wq);
489 * Routines for managing virtual block devices (vbds).
491 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
494 struct xen_vbd *vbd = &blkif->vbd;
497 if ((operation != READ) && vbd->readonly)
500 if (likely(req->nr_sects)) {
501 blkif_sector_t end = req->sector_number + req->nr_sects;
503 if (unlikely(end < req->sector_number))
505 if (unlikely(end > vbd_sz(vbd)))
509 req->dev = vbd->pdevice;
510 req->bdev = vbd->bdev;
517 static void xen_vbd_resize(struct xen_blkif *blkif)
519 struct xen_vbd *vbd = &blkif->vbd;
520 struct xenbus_transaction xbt;
522 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
523 unsigned long long new_size = vbd_sz(vbd);
525 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
526 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
527 pr_info("VBD Resize: new size %llu\n", new_size);
528 vbd->size = new_size;
530 err = xenbus_transaction_start(&xbt);
532 pr_warn("Error starting transaction\n");
535 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
536 (unsigned long long)vbd_sz(vbd));
538 pr_warn("Error writing new size\n");
542 * Write the current state; we will use this to synchronize
543 * the front-end. If the current state is "connected" the
544 * front-end will get the new size information online.
546 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
548 pr_warn("Error writing the state\n");
552 err = xenbus_transaction_end(xbt, 0);
556 pr_warn("Error ending transaction\n");
559 xenbus_transaction_end(xbt, 1);
563 * Notification from the guest OS.
565 static void blkif_notify_work(struct xen_blkif *blkif)
567 blkif->waiting_reqs = 1;
571 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
573 blkif_notify_work(dev_id);
578 * SCHEDULER FUNCTIONS
581 static void print_stats(struct xen_blkif *blkif)
583 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
584 " | ds %4llu | pg: %4u/%4d\n",
585 current->comm, blkif->st_oo_req,
586 blkif->st_rd_req, blkif->st_wr_req,
587 blkif->st_f_req, blkif->st_ds_req,
588 blkif->persistent_gnt_c,
589 xen_blkif_max_pgrants);
590 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
591 blkif->st_rd_req = 0;
592 blkif->st_wr_req = 0;
593 blkif->st_oo_req = 0;
594 blkif->st_ds_req = 0;
597 int xen_blkif_schedule(void *arg)
599 struct xen_blkif *blkif = arg;
600 struct xen_vbd *vbd = &blkif->vbd;
601 unsigned long timeout;
604 xen_blkif_get(blkif);
606 while (!kthread_should_stop()) {
609 if (unlikely(vbd->size != vbd_sz(vbd)))
610 xen_vbd_resize(blkif);
612 timeout = msecs_to_jiffies(LRU_INTERVAL);
614 timeout = wait_event_interruptible_timeout(
616 blkif->waiting_reqs || kthread_should_stop(),
620 timeout = wait_event_interruptible_timeout(
621 blkif->pending_free_wq,
622 !list_empty(&blkif->pending_free) ||
623 kthread_should_stop(),
628 blkif->waiting_reqs = 0;
629 smp_mb(); /* clear flag *before* checking for work */
631 ret = do_block_io_op(blkif);
633 blkif->waiting_reqs = 1;
635 wait_event_interruptible(blkif->shutdown_wq,
636 kthread_should_stop());
639 if (blkif->vbd.feature_gnt_persistent &&
640 time_after(jiffies, blkif->next_lru)) {
641 purge_persistent_gnt(blkif);
642 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
645 /* Shrink if we have more than xen_blkif_max_buffer_pages */
646 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
648 if (log_stats && time_after(jiffies, blkif->st_print))
652 /* Drain pending purge work */
653 flush_work(&blkif->persistent_purge_work);
658 blkif->xenblkd = NULL;
659 xen_blkif_put(blkif);
665 * Remove persistent grants and empty the pool of free pages
667 void xen_blkbk_free_caches(struct xen_blkif *blkif)
669 /* Free all persistent grant pages */
670 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
671 free_persistent_gnts(blkif, &blkif->persistent_gnts,
672 blkif->persistent_gnt_c);
674 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
675 blkif->persistent_gnt_c = 0;
677 /* Since we are shutting down remove all pages from the buffer */
678 shrink_free_pagepool(blkif, 0 /* All */);
681 static unsigned int xen_blkbk_unmap_prepare(
682 struct xen_blkif *blkif,
683 struct grant_page **pages,
685 struct gnttab_unmap_grant_ref *unmap_ops,
686 struct page **unmap_pages)
688 unsigned int i, invcount = 0;
690 for (i = 0; i < num; i++) {
691 if (pages[i]->persistent_gnt != NULL) {
692 put_persistent_gnt(blkif, pages[i]->persistent_gnt);
695 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
697 unmap_pages[invcount] = pages[i]->page;
698 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
699 GNTMAP_host_map, pages[i]->handle);
700 pages[i]->handle = BLKBACK_INVALID_HANDLE;
707 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
709 struct pending_req* pending_req = (struct pending_req*) (data->data);
710 struct xen_blkif *blkif = pending_req->blkif;
712 /* BUG_ON used to reproduce existing behaviour,
713 but is this the best way to deal with this? */
716 put_free_pages(blkif, data->pages, data->count);
717 make_response(blkif, pending_req->id,
718 pending_req->operation, pending_req->status);
719 free_req(blkif, pending_req);
721 * Make sure the request is freed before releasing blkif,
722 * or there could be a race between free_req and the
723 * cleanup done in xen_blkif_free during shutdown.
725 * NB: The fact that we might try to wake up pending_free_wq
726 * before drain_complete (in case there's a drain going on)
727 * it's not a problem with our current implementation
728 * because we can assure there's no thread waiting on
729 * pending_free_wq if there's a drain going on, but it has
730 * to be taken into account if the current model is changed.
732 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
733 complete(&blkif->drain_complete);
735 xen_blkif_put(blkif);
738 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
740 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
741 struct xen_blkif *blkif = req->blkif;
742 struct grant_page **pages = req->segments;
743 unsigned int invcount;
745 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
746 req->unmap, req->unmap_pages);
749 work->done = xen_blkbk_unmap_and_respond_callback;
750 work->unmap_ops = req->unmap;
751 work->kunmap_ops = NULL;
752 work->pages = req->unmap_pages;
753 work->count = invcount;
755 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
760 * Unmap the grant references.
762 * This could accumulate ops up to the batch size to reduce the number
763 * of hypercalls, but since this is only used in error paths there's
766 static void xen_blkbk_unmap(struct xen_blkif *blkif,
767 struct grant_page *pages[],
770 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
771 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
772 unsigned int invcount = 0;
776 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
778 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
781 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
783 put_free_pages(blkif, unmap_pages, invcount);
790 static int xen_blkbk_map(struct xen_blkif *blkif,
791 struct grant_page *pages[],
794 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
795 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
796 struct persistent_gnt *persistent_gnt = NULL;
797 phys_addr_t addr = 0;
798 int i, seg_idx, new_map_idx;
801 int last_map = 0, map_until = 0;
802 int use_persistent_gnts;
804 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
807 * Fill out preq.nr_sects with proper amount of sectors, and setup
808 * assign map[..] with the PFN of the page in our domain with the
809 * corresponding grant reference for each page.
812 for (i = map_until; i < num; i++) {
815 if (use_persistent_gnts)
816 persistent_gnt = get_persistent_gnt(
820 if (persistent_gnt) {
822 * We are using persistent grants and
823 * the grant is already mapped
825 pages[i]->page = persistent_gnt->page;
826 pages[i]->persistent_gnt = persistent_gnt;
828 if (get_free_page(blkif, &pages[i]->page))
830 addr = vaddr(pages[i]->page);
831 pages_to_gnt[segs_to_map] = pages[i]->page;
832 pages[i]->persistent_gnt = NULL;
833 flags = GNTMAP_host_map;
834 if (!use_persistent_gnts && ro)
835 flags |= GNTMAP_readonly;
836 gnttab_set_map_op(&map[segs_to_map++], addr,
837 flags, pages[i]->gref,
841 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
846 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
851 * Now swizzle the MFN in our domain with the MFN from the other domain
852 * so that when we access vaddr(pending_req,i) it has the contents of
853 * the page from the other domain.
855 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
856 if (!pages[seg_idx]->persistent_gnt) {
857 /* This is a newly mapped grant */
858 BUG_ON(new_map_idx >= segs_to_map);
859 if (unlikely(map[new_map_idx].status != 0)) {
860 pr_debug("invalid buffer -- could not remap it\n");
861 put_free_pages(blkif, &pages[seg_idx]->page, 1);
862 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
866 pages[seg_idx]->handle = map[new_map_idx].handle;
870 if (use_persistent_gnts &&
871 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
873 * We are using persistent grants, the grant is
874 * not mapped but we might have room for it.
876 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
878 if (!persistent_gnt) {
880 * If we don't have enough memory to
881 * allocate the persistent_gnt struct
882 * map this grant non-persistenly
886 persistent_gnt->gnt = map[new_map_idx].ref;
887 persistent_gnt->handle = map[new_map_idx].handle;
888 persistent_gnt->page = pages[seg_idx]->page;
889 if (add_persistent_gnt(blkif,
891 kfree(persistent_gnt);
892 persistent_gnt = NULL;
895 pages[seg_idx]->persistent_gnt = persistent_gnt;
896 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
897 persistent_gnt->gnt, blkif->persistent_gnt_c,
898 xen_blkif_max_pgrants);
901 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
902 blkif->vbd.overflow_max_grants = 1;
903 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
904 blkif->domid, blkif->vbd.handle);
907 * We could not map this grant persistently, so use it as
908 * a non-persistent grant.
914 last_map = map_until;
915 if (map_until != num)
921 pr_alert("%s: out of memory\n", __func__);
922 put_free_pages(blkif, pages_to_gnt, segs_to_map);
926 static int xen_blkbk_map_seg(struct pending_req *pending_req)
930 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
931 pending_req->nr_pages,
932 (pending_req->operation != BLKIF_OP_READ));
937 static int xen_blkbk_parse_indirect(struct blkif_request *req,
938 struct pending_req *pending_req,
939 struct seg_buf seg[],
940 struct phys_req *preq)
942 struct grant_page **pages = pending_req->indirect_pages;
943 struct xen_blkif *blkif = pending_req->blkif;
944 int indirect_grefs, rc, n, nseg, i;
945 struct blkif_request_segment *segments = NULL;
947 nseg = pending_req->nr_pages;
948 indirect_grefs = INDIRECT_PAGES(nseg);
949 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
951 for (i = 0; i < indirect_grefs; i++)
952 pages[i]->gref = req->u.indirect.indirect_grefs[i];
954 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
958 for (n = 0, i = 0; n < nseg; n++) {
959 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
960 /* Map indirect segments */
962 kunmap_atomic(segments);
963 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
965 i = n % SEGS_PER_INDIRECT_FRAME;
966 pending_req->segments[n]->gref = segments[i].gref;
967 seg[n].nsec = segments[i].last_sect -
968 segments[i].first_sect + 1;
969 seg[n].offset = (segments[i].first_sect << 9);
970 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
971 (segments[i].last_sect < segments[i].first_sect)) {
975 preq->nr_sects += seg[n].nsec;
980 kunmap_atomic(segments);
981 xen_blkbk_unmap(blkif, pages, indirect_grefs);
985 static int dispatch_discard_io(struct xen_blkif *blkif,
986 struct blkif_request *req)
989 int status = BLKIF_RSP_OKAY;
990 struct block_device *bdev = blkif->vbd.bdev;
991 unsigned long secure;
992 struct phys_req preq;
994 xen_blkif_get(blkif);
996 preq.sector_number = req->u.discard.sector_number;
997 preq.nr_sects = req->u.discard.nr_sectors;
999 err = xen_vbd_translate(&preq, blkif, WRITE);
1001 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1003 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1008 secure = (blkif->vbd.discard_secure &&
1009 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1010 BLKDEV_DISCARD_SECURE : 0;
1012 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1013 req->u.discard.nr_sectors,
1014 GFP_KERNEL, secure);
1016 if (err == -EOPNOTSUPP) {
1017 pr_debug("discard op failed, not supported\n");
1018 status = BLKIF_RSP_EOPNOTSUPP;
1020 status = BLKIF_RSP_ERROR;
1022 make_response(blkif, req->u.discard.id, req->operation, status);
1023 xen_blkif_put(blkif);
1027 static int dispatch_other_io(struct xen_blkif *blkif,
1028 struct blkif_request *req,
1029 struct pending_req *pending_req)
1031 free_req(blkif, pending_req);
1032 make_response(blkif, req->u.other.id, req->operation,
1033 BLKIF_RSP_EOPNOTSUPP);
1037 static void xen_blk_drain_io(struct xen_blkif *blkif)
1039 atomic_set(&blkif->drain, 1);
1041 if (atomic_read(&blkif->inflight) == 0)
1043 wait_for_completion_interruptible_timeout(
1044 &blkif->drain_complete, HZ);
1046 if (!atomic_read(&blkif->drain))
1048 } while (!kthread_should_stop());
1049 atomic_set(&blkif->drain, 0);
1053 * Completion callback on the bio's. Called as bh->b_end_io()
1056 static void __end_block_io_op(struct pending_req *pending_req, int error)
1058 /* An error fails the entire request. */
1059 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1060 (error == -EOPNOTSUPP)) {
1061 pr_debug("flush diskcache op failed, not supported\n");
1062 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
1063 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1064 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1065 (error == -EOPNOTSUPP)) {
1066 pr_debug("write barrier op failed, not supported\n");
1067 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
1068 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1070 pr_debug("Buffer not up-to-date at end of operation,"
1071 " error=%d\n", error);
1072 pending_req->status = BLKIF_RSP_ERROR;
1076 * If all of the bio's have completed it is time to unmap
1077 * the grant references associated with 'request' and provide
1078 * the proper response on the ring.
1080 if (atomic_dec_and_test(&pending_req->pendcnt))
1081 xen_blkbk_unmap_and_respond(pending_req);
1087 static void end_block_io_op(struct bio *bio, int error)
1089 __end_block_io_op(bio->bi_private, error);
1096 * Function to copy the from the ring buffer the 'struct blkif_request'
1097 * (which has the sectors we want, number of them, grant references, etc),
1098 * and transmute it to the block API to hand it over to the proper block disk.
1101 __do_block_io_op(struct xen_blkif *blkif)
1103 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1104 struct blkif_request req;
1105 struct pending_req *pending_req;
1109 rc = blk_rings->common.req_cons;
1110 rp = blk_rings->common.sring->req_prod;
1111 rmb(); /* Ensure we see queued requests up to 'rp'. */
1113 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1114 rc = blk_rings->common.rsp_prod_pvt;
1115 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1116 rp, rc, rp - rc, blkif->vbd.pdevice);
1121 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1124 if (kthread_should_stop()) {
1129 pending_req = alloc_req(blkif);
1130 if (NULL == pending_req) {
1136 switch (blkif->blk_protocol) {
1137 case BLKIF_PROTOCOL_NATIVE:
1138 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1140 case BLKIF_PROTOCOL_X86_32:
1141 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1143 case BLKIF_PROTOCOL_X86_64:
1144 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1149 blk_rings->common.req_cons = ++rc; /* before make_response() */
1151 /* Apply all sanity checks to /private copy/ of request. */
1154 switch (req.operation) {
1156 case BLKIF_OP_WRITE:
1157 case BLKIF_OP_WRITE_BARRIER:
1158 case BLKIF_OP_FLUSH_DISKCACHE:
1159 case BLKIF_OP_INDIRECT:
1160 if (dispatch_rw_block_io(blkif, &req, pending_req))
1163 case BLKIF_OP_DISCARD:
1164 free_req(blkif, pending_req);
1165 if (dispatch_discard_io(blkif, &req))
1169 if (dispatch_other_io(blkif, &req, pending_req))
1174 /* Yield point for this unbounded loop. */
1182 do_block_io_op(struct xen_blkif *blkif)
1184 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1188 more_to_do = __do_block_io_op(blkif);
1192 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1193 } while (more_to_do);
1198 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1199 * and call the 'submit_bio' to pass it to the underlying storage.
1201 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1202 struct blkif_request *req,
1203 struct pending_req *pending_req)
1205 struct phys_req preq;
1206 struct seg_buf *seg = pending_req->seg;
1208 struct bio *bio = NULL;
1209 struct bio **biolist = pending_req->biolist;
1212 struct blk_plug plug;
1214 struct grant_page **pages = pending_req->segments;
1215 unsigned short req_operation;
1217 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1218 req->u.indirect.indirect_op : req->operation;
1219 if ((req->operation == BLKIF_OP_INDIRECT) &&
1220 (req_operation != BLKIF_OP_READ) &&
1221 (req_operation != BLKIF_OP_WRITE)) {
1222 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1226 switch (req_operation) {
1231 case BLKIF_OP_WRITE:
1233 operation = WRITE_ODIRECT;
1235 case BLKIF_OP_WRITE_BARRIER:
1237 case BLKIF_OP_FLUSH_DISKCACHE:
1239 operation = WRITE_FLUSH;
1242 operation = 0; /* make gcc happy */
1247 /* Check that the number of segments is sane. */
1248 nseg = req->operation == BLKIF_OP_INDIRECT ?
1249 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1251 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1252 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1253 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1254 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1255 (nseg > MAX_INDIRECT_SEGMENTS))) {
1256 pr_debug("Bad number of segments in request (%d)\n", nseg);
1257 /* Haven't submitted any bio's yet. */
1263 pending_req->blkif = blkif;
1264 pending_req->id = req->u.rw.id;
1265 pending_req->operation = req_operation;
1266 pending_req->status = BLKIF_RSP_OKAY;
1267 pending_req->nr_pages = nseg;
1269 if (req->operation != BLKIF_OP_INDIRECT) {
1270 preq.dev = req->u.rw.handle;
1271 preq.sector_number = req->u.rw.sector_number;
1272 for (i = 0; i < nseg; i++) {
1273 pages[i]->gref = req->u.rw.seg[i].gref;
1274 seg[i].nsec = req->u.rw.seg[i].last_sect -
1275 req->u.rw.seg[i].first_sect + 1;
1276 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1277 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1278 (req->u.rw.seg[i].last_sect <
1279 req->u.rw.seg[i].first_sect))
1281 preq.nr_sects += seg[i].nsec;
1284 preq.dev = req->u.indirect.handle;
1285 preq.sector_number = req->u.indirect.sector_number;
1286 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1291 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1292 operation == READ ? "read" : "write",
1294 preq.sector_number + preq.nr_sects,
1295 blkif->vbd.pdevice);
1300 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1303 for (i = 0; i < nseg; i++) {
1304 if (((int)preq.sector_number|(int)seg[i].nsec) &
1305 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1306 pr_debug("Misaligned I/O request from domain %d\n",
1312 /* Wait on all outstanding I/O's and once that has been completed
1313 * issue the WRITE_FLUSH.
1316 xen_blk_drain_io(pending_req->blkif);
1319 * If we have failed at this point, we need to undo the M2P override,
1320 * set gnttab_set_unmap_op on all of the grant references and perform
1321 * the hypercall to unmap the grants - that is all done in
1324 if (xen_blkbk_map_seg(pending_req))
1328 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1329 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1331 xen_blkif_get(blkif);
1332 atomic_inc(&blkif->inflight);
1334 for (i = 0; i < nseg; i++) {
1335 while ((bio == NULL) ||
1339 seg[i].offset) == 0)) {
1341 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1342 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1343 if (unlikely(bio == NULL))
1346 biolist[nbio++] = bio;
1347 bio->bi_bdev = preq.bdev;
1348 bio->bi_private = pending_req;
1349 bio->bi_end_io = end_block_io_op;
1350 bio->bi_iter.bi_sector = preq.sector_number;
1353 preq.sector_number += seg[i].nsec;
1356 /* This will be hit if the operation was a flush or discard. */
1358 BUG_ON(operation != WRITE_FLUSH);
1360 bio = bio_alloc(GFP_KERNEL, 0);
1361 if (unlikely(bio == NULL))
1364 biolist[nbio++] = bio;
1365 bio->bi_bdev = preq.bdev;
1366 bio->bi_private = pending_req;
1367 bio->bi_end_io = end_block_io_op;
1370 atomic_set(&pending_req->pendcnt, nbio);
1371 blk_start_plug(&plug);
1373 for (i = 0; i < nbio; i++)
1374 submit_bio(operation, biolist[i]);
1376 /* Let the I/Os go.. */
1377 blk_finish_plug(&plug);
1379 if (operation == READ)
1380 blkif->st_rd_sect += preq.nr_sects;
1381 else if (operation & WRITE)
1382 blkif->st_wr_sect += preq.nr_sects;
1387 xen_blkbk_unmap(blkif, pending_req->segments,
1388 pending_req->nr_pages);
1390 /* Haven't submitted any bio's yet. */
1391 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1392 free_req(blkif, pending_req);
1393 msleep(1); /* back off a bit */
1397 for (i = 0; i < nbio; i++)
1398 bio_put(biolist[i]);
1399 atomic_set(&pending_req->pendcnt, 1);
1400 __end_block_io_op(pending_req, -EINVAL);
1401 msleep(1); /* back off a bit */
1408 * Put a response on the ring on how the operation fared.
1410 static void make_response(struct xen_blkif *blkif, u64 id,
1411 unsigned short op, int st)
1413 struct blkif_response resp;
1414 unsigned long flags;
1415 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1419 resp.operation = op;
1422 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1423 /* Place on the response ring for the relevant domain. */
1424 switch (blkif->blk_protocol) {
1425 case BLKIF_PROTOCOL_NATIVE:
1426 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1427 &resp, sizeof(resp));
1429 case BLKIF_PROTOCOL_X86_32:
1430 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1431 &resp, sizeof(resp));
1433 case BLKIF_PROTOCOL_X86_64:
1434 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1435 &resp, sizeof(resp));
1440 blk_rings->common.rsp_prod_pvt++;
1441 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1442 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1444 notify_remote_via_irq(blkif->irq);
1447 static int __init xen_blkif_init(void)
1454 rc = xen_blkif_interface_init();
1458 rc = xen_blkif_xenbus_init();
1466 module_init(xen_blkif_init);
1468 MODULE_LICENSE("Dual BSD/GPL");
1469 MODULE_ALIAS("xen-backend:vbd");