4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/drbd.h>
34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
36 /* Update disk stats at start of I/O request */
37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
39 const int rw = bio_data_dir(req->master_bio);
41 cpu = part_stat_lock();
42 part_round_stats(cpu, &device->vdisk->part0);
43 part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
44 part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
45 (void) cpu; /* The macro invocations above want the cpu argument, I do not like
46 the compiler warning about cpu only assigned but never used... */
47 part_inc_in_flight(&device->vdisk->part0, rw);
51 /* Update disk stats when completing request upwards */
52 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
54 int rw = bio_data_dir(req->master_bio);
55 unsigned long duration = jiffies - req->start_jif;
57 cpu = part_stat_lock();
58 part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
59 part_round_stats(cpu, &device->vdisk->part0);
60 part_dec_in_flight(&device->vdisk->part0, rw);
64 static struct drbd_request *drbd_req_new(struct drbd_device *device,
67 struct drbd_request *req;
69 req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO);
73 drbd_req_make_private_bio(req, bio_src);
74 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
76 req->master_bio = bio_src;
79 drbd_clear_interval(&req->i);
80 req->i.sector = bio_src->bi_iter.bi_sector;
81 req->i.size = bio_src->bi_iter.bi_size;
83 req->i.waiting = false;
85 INIT_LIST_HEAD(&req->tl_requests);
86 INIT_LIST_HEAD(&req->w.list);
87 INIT_LIST_HEAD(&req->req_pending_master_completion);
88 INIT_LIST_HEAD(&req->req_pending_local);
90 /* one reference to be put by __drbd_make_request */
91 atomic_set(&req->completion_ref, 1);
92 /* one kref as long as completion_ref > 0 */
93 kref_init(&req->kref);
97 static void drbd_remove_request_interval(struct rb_root *root,
98 struct drbd_request *req)
100 struct drbd_device *device = req->device;
101 struct drbd_interval *i = &req->i;
103 drbd_remove_interval(root, i);
105 /* Wake up any processes waiting for this request to complete. */
107 wake_up(&device->misc_wait);
110 void drbd_req_destroy(struct kref *kref)
112 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
113 struct drbd_device *device = req->device;
114 const unsigned s = req->rq_state;
116 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
117 atomic_read(&req->completion_ref) ||
118 (s & RQ_LOCAL_PENDING) ||
119 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
120 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
121 s, atomic_read(&req->completion_ref));
125 /* If called from mod_rq_state (expected normal case) or
126 * drbd_send_and_submit (the less likely normal path), this holds the
127 * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
128 * though it may be still empty (never added to the transfer log).
130 * If called from do_retry(), we do NOT hold the req_lock, but we are
131 * still allowed to unconditionally list_del(&req->tl_requests),
132 * because it will be on a local on-stack list only. */
133 list_del_init(&req->tl_requests);
135 /* finally remove the request from the conflict detection
136 * respective block_id verification interval tree. */
137 if (!drbd_interval_empty(&req->i)) {
138 struct rb_root *root;
141 root = &device->write_requests;
143 root = &device->read_requests;
144 drbd_remove_request_interval(root, req);
145 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
146 drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
147 s, (unsigned long long)req->i.sector, req->i.size);
149 /* if it was a write, we may have to set the corresponding
150 * bit(s) out-of-sync first. If it had a local part, we need to
151 * release the reference to the activity log. */
153 /* Set out-of-sync unless both OK flags are set
154 * (local only or remote failed).
155 * Other places where we set out-of-sync:
156 * READ with local io-error */
158 /* There is a special case:
159 * we may notice late that IO was suspended,
160 * and postpone, or schedule for retry, a write,
161 * before it even was submitted or sent.
162 * In that case we do not want to touch the bitmap at all.
164 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
165 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
166 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
168 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
169 drbd_set_in_sync(device, req->i.sector, req->i.size);
172 /* one might be tempted to move the drbd_al_complete_io
173 * to the local io completion callback drbd_request_endio.
174 * but, if this was a mirror write, we may only
175 * drbd_al_complete_io after this is RQ_NET_DONE,
176 * otherwise the extent could be dropped from the al
177 * before it has actually been written on the peer.
178 * if we crash before our peer knows about the request,
179 * but after the extent has been dropped from the al,
180 * we would forget to resync the corresponding extent.
182 if (s & RQ_IN_ACT_LOG) {
183 if (get_ldev_if_state(device, D_FAILED)) {
184 drbd_al_complete_io(device, &req->i);
186 } else if (__ratelimit(&drbd_ratelimit_state)) {
187 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
188 "but my Disk seems to have failed :(\n",
189 (unsigned long long) req->i.sector, req->i.size);
194 mempool_free(req, drbd_request_mempool);
197 static void wake_all_senders(struct drbd_connection *connection)
199 wake_up(&connection->sender_work.q_wait);
202 /* must hold resource->req_lock */
203 void start_new_tl_epoch(struct drbd_connection *connection)
205 /* no point closing an epoch, if it is empty, anyways. */
206 if (connection->current_tle_writes == 0)
209 connection->current_tle_writes = 0;
210 atomic_inc(&connection->current_tle_nr);
211 wake_all_senders(connection);
214 void complete_master_bio(struct drbd_device *device,
215 struct bio_and_error *m)
217 bio_endio(m->bio, m->error);
222 /* Helper for __req_mod().
223 * Set m->bio to the master bio, if it is fit to be completed,
224 * or leave it alone (it is initialized to NULL in __req_mod),
225 * if it has already been completed, or cannot be completed yet.
226 * If m->bio is set, the error status to be returned is placed in m->error.
229 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
231 const unsigned s = req->rq_state;
232 struct drbd_device *device = req->device;
236 /* we must not complete the master bio, while it is
237 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
238 * not yet acknowledged by the peer
239 * not yet completed by the local io subsystem
240 * these flags may get cleared in any order by
243 * the bio_endio completion callbacks.
245 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
246 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
247 (s & RQ_COMPLETION_SUSP)) {
248 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
252 if (!req->master_bio) {
253 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
257 rw = bio_rw(req->master_bio);
260 * figure out whether to report success or failure.
262 * report success when at least one of the operations succeeded.
263 * or, to put the other way,
264 * only report failure, when both operations failed.
266 * what to do about the failures is handled elsewhere.
267 * what we need to do here is just: complete the master_bio.
269 * local completion error, if any, has been stored as ERR_PTR
270 * in private_bio within drbd_request_endio.
272 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
273 error = PTR_ERR(req->private_bio);
275 /* Before we can signal completion to the upper layers,
276 * we may need to close the current transfer log epoch.
277 * We are within the request lock, so we can simply compare
278 * the request epoch number with the current transfer log
279 * epoch number. If they match, increase the current_tle_nr,
280 * and reset the transfer log epoch write_cnt.
283 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
284 start_new_tl_epoch(first_peer_device(device)->connection);
286 /* Update disk stats */
287 _drbd_end_io_acct(device, req);
290 * have it be pushed back to the retry work queue,
291 * so it will re-enter __drbd_make_request(),
292 * and be re-assigned to a suitable local or remote path,
293 * or failed if we do not have access to good data anymore.
295 * Unless it was failed early by __drbd_make_request(),
296 * because no path was available, in which case
297 * it was not even added to the transfer_log.
299 * READA may fail, and will not be retried.
301 * WRITE should have used all available paths already.
303 if (!ok && rw == READ && !list_empty(&req->tl_requests))
304 req->rq_state |= RQ_POSTPONED;
306 if (!(req->rq_state & RQ_POSTPONED)) {
307 m->error = ok ? 0 : (error ?: -EIO);
308 m->bio = req->master_bio;
309 req->master_bio = NULL;
310 /* We leave it in the tree, to be able to verify later
311 * write-acks in protocol != C during resync.
312 * But we mark it as "complete", so it won't be counted as
313 * conflict in a multi-primary setup. */
314 req->i.completed = true;
318 wake_up(&device->misc_wait);
320 /* Either we are about to complete to upper layers,
321 * or we will restart this request.
322 * In either case, the request object will be destroyed soon,
323 * so better remove it from all lists. */
324 list_del_init(&req->req_pending_master_completion);
327 /* still holds resource->req_lock */
328 static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
330 struct drbd_device *device = req->device;
331 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
333 if (!atomic_sub_and_test(put, &req->completion_ref))
336 drbd_req_complete(req, m);
338 if (req->rq_state & RQ_POSTPONED) {
339 /* don't destroy the req object just yet,
340 * but queue it for retry */
341 drbd_restart_request(req);
348 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
350 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
353 if (connection->req_next == NULL)
354 connection->req_next = req;
357 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
359 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
362 if (connection->req_next != req)
364 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
365 const unsigned s = req->rq_state;
366 if (s & RQ_NET_QUEUED)
369 if (&req->tl_requests == &connection->transfer_log)
371 connection->req_next = req;
374 static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
376 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
379 if (connection->req_ack_pending == NULL)
380 connection->req_ack_pending = req;
383 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
385 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
388 if (connection->req_ack_pending != req)
390 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
391 const unsigned s = req->rq_state;
392 if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
395 if (&req->tl_requests == &connection->transfer_log)
397 connection->req_ack_pending = req;
400 static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
402 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
405 if (connection->req_not_net_done == NULL)
406 connection->req_not_net_done = req;
409 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
411 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
414 if (connection->req_not_net_done != req)
416 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
417 const unsigned s = req->rq_state;
418 if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
421 if (&req->tl_requests == &connection->transfer_log)
423 connection->req_not_net_done = req;
426 /* I'd like this to be the only place that manipulates
427 * req->completion_ref and req->kref. */
428 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
431 struct drbd_device *device = req->device;
432 struct drbd_peer_device *peer_device = first_peer_device(device);
433 unsigned s = req->rq_state;
437 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
438 set |= RQ_COMPLETION_SUSP;
442 req->rq_state &= ~clear;
443 req->rq_state |= set;
446 if (req->rq_state == s)
449 /* intent: get references */
451 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
452 atomic_inc(&req->completion_ref);
454 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
455 inc_ap_pending(device);
456 atomic_inc(&req->completion_ref);
459 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
460 atomic_inc(&req->completion_ref);
461 set_if_null_req_next(peer_device, req);
464 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
465 kref_get(&req->kref); /* wait for the DONE */
467 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
468 /* potentially already completed in the asender thread */
469 if (!(s & RQ_NET_DONE)) {
470 atomic_add(req->i.size >> 9, &device->ap_in_flight);
471 set_if_null_req_not_net_done(peer_device, req);
473 if (s & RQ_NET_PENDING)
474 set_if_null_req_ack_pending(peer_device, req);
477 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
478 atomic_inc(&req->completion_ref);
480 /* progress: put references */
482 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
485 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
486 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
487 /* local completion may still come in later,
488 * we need to keep the req object around. */
489 kref_get(&req->kref);
493 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
494 if (req->rq_state & RQ_LOCAL_ABORTED)
498 list_del_init(&req->req_pending_local);
501 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
502 dec_ap_pending(device);
504 req->acked_jif = jiffies;
505 advance_conn_req_ack_pending(peer_device, req);
508 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
510 advance_conn_req_next(peer_device, req);
513 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
515 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
516 if (s & RQ_EXP_BARR_ACK)
518 req->net_done_jif = jiffies;
520 /* in ahead/behind mode, or just in case,
521 * before we finally destroy this request,
522 * the caching pointers must not reference it anymore */
523 advance_conn_req_next(peer_device, req);
524 advance_conn_req_ack_pending(peer_device, req);
525 advance_conn_req_not_net_done(peer_device, req);
528 /* potentially complete and destroy */
530 if (k_put || c_put) {
531 /* Completion does it's own kref_put. If we are going to
532 * kref_sub below, we need req to be still around then. */
533 int at_least = k_put + !!c_put;
534 int refcount = atomic_read(&req->kref.refcount);
535 if (refcount < at_least)
537 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
538 s, req->rq_state, refcount, at_least);
541 /* If we made progress, retry conflicting peer requests, if any. */
543 wake_up(&device->misc_wait);
546 k_put += drbd_req_put_completion_ref(req, m, c_put);
548 kref_sub(&req->kref, k_put, drbd_req_destroy);
551 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
553 char b[BDEVNAME_SIZE];
555 if (!__ratelimit(&drbd_ratelimit_state))
558 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
559 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
560 (unsigned long long)req->i.sector,
562 bdevname(device->ldev->backing_bdev, b));
565 /* Helper for HANDED_OVER_TO_NETWORK.
566 * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
567 * Is it also still "PENDING"?
568 * --> If so, clear PENDING and set NET_OK below.
569 * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
570 * (and we must not set RQ_NET_OK) */
571 static inline bool is_pending_write_protocol_A(struct drbd_request *req)
573 return (req->rq_state &
574 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
575 == (RQ_WRITE|RQ_NET_PENDING);
578 /* obviously this could be coded as many single functions
579 * instead of one huge switch,
580 * or by putting the code directly in the respective locations
581 * (as it has been before).
583 * but having it this way
584 * enforces that it is all in this one place, where it is easier to audit,
585 * it makes it obvious that whatever "event" "happens" to a request should
586 * happen "atomically" within the req_lock,
587 * and it enforces that we have to think in a very structured manner
588 * about the "events" that may happen to a request during its life time ...
590 int __req_mod(struct drbd_request *req, enum drbd_req_event what,
591 struct bio_and_error *m)
593 struct drbd_device *const device = req->device;
594 struct drbd_peer_device *const peer_device = first_peer_device(device);
595 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
604 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
607 /* does not happen...
608 * initialization done in drbd_req_new
613 case TO_BE_SENT: /* via network */
614 /* reached via __drbd_make_request
615 * and from w_read_retry_remote */
616 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
618 nc = rcu_dereference(connection->net_conf);
619 p = nc->wire_protocol;
622 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
623 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
624 mod_rq_state(req, m, 0, RQ_NET_PENDING);
627 case TO_BE_SUBMITTED: /* locally */
628 /* reached via __drbd_make_request */
629 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
630 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
634 if (req->rq_state & RQ_WRITE)
635 device->writ_cnt += req->i.size >> 9;
637 device->read_cnt += req->i.size >> 9;
639 mod_rq_state(req, m, RQ_LOCAL_PENDING,
640 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
644 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
647 case WRITE_COMPLETED_WITH_ERROR:
648 drbd_report_io_error(device, req);
649 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
650 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
653 case READ_COMPLETED_WITH_ERROR:
654 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
655 drbd_report_io_error(device, req);
656 __drbd_chk_io_error(device, DRBD_READ_ERROR);
658 case READ_AHEAD_COMPLETED_WITH_ERROR:
659 /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
660 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
663 case DISCARD_COMPLETED_NOTSUPP:
664 case DISCARD_COMPLETED_WITH_ERROR:
665 /* I'd rather not detach from local disk just because it
666 * failed a REQ_DISCARD. */
667 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
670 case QUEUE_FOR_NET_READ:
671 /* READ or READA, and
673 * or target area marked as invalid,
674 * or just got an io-error. */
675 /* from __drbd_make_request
676 * or from bio_endio during read io-error recovery */
678 /* So we can verify the handle in the answer packet.
679 * Corresponding drbd_remove_request_interval is in
680 * drbd_req_complete() */
681 D_ASSERT(device, drbd_interval_empty(&req->i));
682 drbd_insert_interval(&device->read_requests, &req->i);
684 set_bit(UNPLUG_REMOTE, &device->flags);
686 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
687 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
688 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
689 req->w.cb = w_send_read_req;
690 drbd_queue_work(&connection->sender_work,
694 case QUEUE_FOR_NET_WRITE:
695 /* assert something? */
696 /* from __drbd_make_request only */
698 /* Corresponding drbd_remove_request_interval is in
699 * drbd_req_complete() */
700 D_ASSERT(device, drbd_interval_empty(&req->i));
701 drbd_insert_interval(&device->write_requests, &req->i);
704 * In case the req ended up on the transfer log before being
705 * queued on the worker, it could lead to this request being
706 * missed during cleanup after connection loss.
707 * So we have to do both operations here,
708 * within the same lock that protects the transfer log.
710 * _req_add_to_epoch(req); this has to be after the
711 * _maybe_start_new_epoch(req); which happened in
712 * __drbd_make_request, because we now may set the bit
713 * again ourselves to close the current epoch.
715 * Add req to the (now) current epoch (barrier). */
717 /* otherwise we may lose an unplug, which may cause some remote
718 * io-scheduler timeout to expire, increasing maximum latency,
719 * hurting performance. */
720 set_bit(UNPLUG_REMOTE, &device->flags);
722 /* queue work item to send data */
723 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
724 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
725 req->w.cb = w_send_dblock;
726 drbd_queue_work(&connection->sender_work,
729 /* close the epoch, in case it outgrew the limit */
731 nc = rcu_dereference(connection->net_conf);
732 p = nc->max_epoch_size;
734 if (connection->current_tle_writes >= p)
735 start_new_tl_epoch(connection);
739 case QUEUE_FOR_SEND_OOS:
740 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
741 req->w.cb = w_send_out_of_sync;
742 drbd_queue_work(&connection->sender_work,
746 case READ_RETRY_REMOTE_CANCELED:
749 /* real cleanup will be done from tl_clear. just update flags
750 * so it is no longer marked as on the worker queue */
751 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
754 case HANDED_OVER_TO_NETWORK:
755 /* assert something? */
756 if (is_pending_write_protocol_A(req))
757 /* this is what is dangerous about protocol A:
758 * pretend it was successfully written on the peer. */
759 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
760 RQ_NET_SENT|RQ_NET_OK);
762 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
763 /* It is still not yet RQ_NET_DONE until the
764 * corresponding epoch barrier got acked as well,
765 * so we know what to dirty on connection loss. */
768 case OOS_HANDED_TO_NETWORK:
769 /* Was not set PENDING, no longer QUEUED, so is now DONE
770 * as far as this connection is concerned. */
771 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
774 case CONNECTION_LOST_WHILE_PENDING:
775 /* transfer log cleanup after connection loss */
777 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
781 case CONFLICT_RESOLVED:
782 /* for superseded conflicting writes of multiple primaries,
783 * there is no need to keep anything in the tl, potential
784 * node crashes are covered by the activity log.
786 * If this request had been marked as RQ_POSTPONED before,
787 * it will actually not be completed, but "restarted",
788 * resubmitted from the retry worker context. */
789 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
790 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
791 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
794 case WRITE_ACKED_BY_PEER_AND_SIS:
795 req->rq_state |= RQ_NET_SIS;
796 case WRITE_ACKED_BY_PEER:
797 /* Normal operation protocol C: successfully written on peer.
798 * During resync, even in protocol != C,
799 * we requested an explicit write ack anyways.
800 * Which means we cannot even assert anything here.
801 * Nothing more to do here.
802 * We want to keep the tl in place for all protocols, to cater
803 * for volatile write-back caches on lower level devices. */
805 case RECV_ACKED_BY_PEER:
806 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
807 /* protocol B; pretends to be successfully written on peer.
808 * see also notes above in HANDED_OVER_TO_NETWORK about
811 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
815 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
816 /* If this node has already detected the write conflict, the
817 * worker will be waiting on misc_wait. Wake it up once this
818 * request has completed locally.
820 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
821 req->rq_state |= RQ_POSTPONED;
823 wake_up(&device->misc_wait);
824 /* Do not clear RQ_NET_PENDING. This request will make further
825 * progress via restart_conflicting_writes() or
826 * fail_postponed_requests(). Hopefully. */
830 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
833 case FAIL_FROZEN_DISK_IO:
834 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
836 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
839 case RESTART_FROZEN_DISK_IO:
840 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
844 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
848 if (bio_data_dir(req->master_bio) == WRITE)
851 get_ldev(device); /* always succeeds in this call path */
852 req->w.cb = w_restart_disk_io;
853 drbd_queue_work(&connection->sender_work,
858 /* Simply complete (local only) READs. */
859 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
860 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
864 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
865 before the connection loss (B&C only); only P_BARRIER_ACK
866 (or the local completion?) was missing when we suspended.
867 Throwing them out of the TL here by pretending we got a BARRIER_ACK.
868 During connection handshake, we ensure that the peer was not rebooted. */
869 if (!(req->rq_state & RQ_NET_OK)) {
870 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
871 * in that case we must not set RQ_NET_PENDING. */
873 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
875 /* w.cb expected to be w_send_dblock, or w_send_read_req */
876 drbd_queue_work(&connection->sender_work,
878 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
879 } /* else: FIXME can this happen? */
882 /* else, fall through to BARRIER_ACKED */
885 /* barrier ack for READ requests does not make sense */
886 if (!(req->rq_state & RQ_WRITE))
889 if (req->rq_state & RQ_NET_PENDING) {
890 /* barrier came in before all requests were acked.
891 * this is bad, because if the connection is lost now,
892 * we won't be able to clean them up... */
893 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
895 /* Allowed to complete requests, even while suspended.
896 * As this is called for all requests within a matching epoch,
897 * we need to filter, and only set RQ_NET_DONE for those that
898 * have actually been on the wire. */
899 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
900 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
904 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
905 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
908 case QUEUE_AS_DRBD_BARRIER:
909 start_new_tl_epoch(connection);
910 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
917 /* we may do a local read if:
918 * - we are consistent (of course),
919 * - or we are generally inconsistent,
920 * BUT we are still/already IN SYNC for this area.
921 * since size may be bigger than BM_BLOCK_SIZE,
922 * we may need to check several bits.
924 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
926 unsigned long sbnr, ebnr;
927 sector_t esector, nr_sectors;
929 if (device->state.disk == D_UP_TO_DATE)
931 if (device->state.disk != D_INCONSISTENT)
933 esector = sector + (size >> 9) - 1;
934 nr_sectors = drbd_get_capacity(device->this_bdev);
935 D_ASSERT(device, sector < nr_sectors);
936 D_ASSERT(device, esector < nr_sectors);
938 sbnr = BM_SECT_TO_BIT(sector);
939 ebnr = BM_SECT_TO_BIT(esector);
941 return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
944 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
945 enum drbd_read_balancing rbm)
947 struct backing_dev_info *bdi;
951 case RB_CONGESTED_REMOTE:
952 bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
953 return bdi_read_congested(bdi);
954 case RB_LEAST_PENDING:
955 return atomic_read(&device->local_cnt) >
956 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
957 case RB_32K_STRIPING: /* stripe_shift = 15 */
958 case RB_64K_STRIPING:
959 case RB_128K_STRIPING:
960 case RB_256K_STRIPING:
961 case RB_512K_STRIPING:
962 case RB_1M_STRIPING: /* stripe_shift = 20 */
963 stripe_shift = (rbm - RB_32K_STRIPING + 15);
964 return (sector >> (stripe_shift - 9)) & 1;
966 return test_and_change_bit(READ_BALANCE_RR, &device->flags);
967 case RB_PREFER_REMOTE:
969 case RB_PREFER_LOCAL:
976 * complete_conflicting_writes - wait for any conflicting write requests
978 * The write_requests tree contains all active write requests which we
979 * currently know about. Wait for any requests to complete which conflict with
982 * Only way out: remove the conflicting intervals from the tree.
984 static void complete_conflicting_writes(struct drbd_request *req)
987 struct drbd_device *device = req->device;
988 struct drbd_interval *i;
989 sector_t sector = req->i.sector;
990 int size = req->i.size;
992 i = drbd_find_overlap(&device->write_requests, sector, size);
997 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
998 i = drbd_find_overlap(&device->write_requests, sector, size);
1001 /* Indicate to wake up device->misc_wait on progress. */
1003 spin_unlock_irq(&device->resource->req_lock);
1005 spin_lock_irq(&device->resource->req_lock);
1007 finish_wait(&device->misc_wait, &wait);
1010 /* called within req_lock and rcu_read_lock() */
1011 static void maybe_pull_ahead(struct drbd_device *device)
1013 struct drbd_connection *connection = first_peer_device(device)->connection;
1014 struct net_conf *nc;
1015 bool congested = false;
1016 enum drbd_on_congestion on_congestion;
1019 nc = rcu_dereference(connection->net_conf);
1020 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
1022 if (on_congestion == OC_BLOCK ||
1023 connection->agreed_pro_version < 96)
1026 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
1027 return; /* nothing to do ... */
1029 /* If I don't even have good local storage, we can not reasonably try
1030 * to pull ahead of the peer. We also need the local reference to make
1031 * sure device->act_log is there.
1033 if (!get_ldev_if_state(device, D_UP_TO_DATE))
1036 if (nc->cong_fill &&
1037 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
1038 drbd_info(device, "Congestion-fill threshold reached\n");
1042 if (device->act_log->used >= nc->cong_extents) {
1043 drbd_info(device, "Congestion-extents threshold reached\n");
1048 /* start a new epoch for non-mirrored writes */
1049 start_new_tl_epoch(first_peer_device(device)->connection);
1051 if (on_congestion == OC_PULL_AHEAD)
1052 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
1053 else /*nc->on_congestion == OC_DISCONNECT */
1054 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
1059 /* If this returns false, and req->private_bio is still set,
1060 * this should be submitted locally.
1062 * If it returns false, but req->private_bio is not set,
1063 * we do not have access to good data :(
1065 * Otherwise, this destroys req->private_bio, if any,
1068 static bool do_remote_read(struct drbd_request *req)
1070 struct drbd_device *device = req->device;
1071 enum drbd_read_balancing rbm;
1073 if (req->private_bio) {
1074 if (!drbd_may_do_local_read(device,
1075 req->i.sector, req->i.size)) {
1076 bio_put(req->private_bio);
1077 req->private_bio = NULL;
1082 if (device->state.pdsk != D_UP_TO_DATE)
1085 if (req->private_bio == NULL)
1088 /* TODO: improve read balancing decisions, take into account drbd
1089 * protocol, pending requests etc. */
1092 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
1095 if (rbm == RB_PREFER_LOCAL && req->private_bio)
1096 return false; /* submit locally */
1098 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
1099 if (req->private_bio) {
1100 bio_put(req->private_bio);
1101 req->private_bio = NULL;
1110 /* returns number of connections (== 1, for drbd 8.4)
1111 * expected to actually write this data,
1112 * which does NOT include those that we are L_AHEAD for. */
1113 static int drbd_process_write_request(struct drbd_request *req)
1115 struct drbd_device *device = req->device;
1116 int remote, send_oos;
1118 remote = drbd_should_do_remote(device->state);
1119 send_oos = drbd_should_send_out_of_sync(device->state);
1121 /* Need to replicate writes. Unless it is an empty flush,
1122 * which is better mapped to a DRBD P_BARRIER packet,
1123 * also for drbd wire protocol compatibility reasons.
1124 * If this was a flush, just start a new epoch.
1125 * Unless the current epoch was empty anyways, or we are not currently
1126 * replicating, in which case there is no point. */
1127 if (unlikely(req->i.size == 0)) {
1128 /* The only size==0 bios we expect are empty flushes. */
1129 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
1131 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1135 if (!remote && !send_oos)
1138 D_ASSERT(device, !(remote && send_oos));
1141 _req_mod(req, TO_BE_SENT);
1142 _req_mod(req, QUEUE_FOR_NET_WRITE);
1143 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
1144 _req_mod(req, QUEUE_FOR_SEND_OOS);
1150 drbd_submit_req_private_bio(struct drbd_request *req)
1152 struct drbd_device *device = req->device;
1153 struct bio *bio = req->private_bio;
1154 const int rw = bio_rw(bio);
1156 bio->bi_bdev = device->ldev->backing_bdev;
1158 /* State may have changed since we grabbed our reference on the
1159 * ->ldev member. Double check, and short-circuit to endio.
1160 * In case the last activity log transaction failed to get on
1161 * stable storage, and this is a WRITE, we may not even submit
1163 if (get_ldev(device)) {
1164 req->pre_submit_jif = jiffies;
1165 if (drbd_insert_fault(device,
1166 rw == WRITE ? DRBD_FAULT_DT_WR
1167 : rw == READ ? DRBD_FAULT_DT_RD
1168 : DRBD_FAULT_DT_RA))
1169 bio_endio(bio, -EIO);
1171 generic_make_request(bio);
1174 bio_endio(bio, -EIO);
1177 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1179 spin_lock_irq(&device->resource->req_lock);
1180 list_add_tail(&req->tl_requests, &device->submit.writes);
1181 list_add_tail(&req->req_pending_master_completion,
1182 &device->pending_master_completion[1 /* WRITE */]);
1183 spin_unlock_irq(&device->resource->req_lock);
1184 queue_work(device->submit.wq, &device->submit.worker);
1185 /* do_submit() may sleep internally on al_wait, too */
1186 wake_up(&device->al_wait);
1189 /* returns the new drbd_request pointer, if the caller is expected to
1190 * drbd_send_and_submit() it (to save latency), or NULL if we queued the
1191 * request on the submitter thread.
1192 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1194 static struct drbd_request *
1195 drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1197 const int rw = bio_data_dir(bio);
1198 struct drbd_request *req;
1200 /* allocate outside of all locks; */
1201 req = drbd_req_new(device, bio);
1204 /* only pass the error to the upper layers.
1205 * if user cannot handle io errors, that's not our business. */
1206 drbd_err(device, "could not kmalloc() req\n");
1207 bio_endio(bio, -ENOMEM);
1208 return ERR_PTR(-ENOMEM);
1210 req->start_jif = start_jif;
1212 if (!get_ldev(device)) {
1213 bio_put(req->private_bio);
1214 req->private_bio = NULL;
1217 /* Update disk stats */
1218 _drbd_start_io_acct(device, req);
1220 if (rw == WRITE && req->private_bio && req->i.size
1221 && !test_bit(AL_SUSPENDED, &device->flags)) {
1222 if (!drbd_al_begin_io_fastpath(device, &req->i)) {
1223 atomic_inc(&device->ap_actlog_cnt);
1224 drbd_queue_write(device, req);
1227 req->rq_state |= RQ_IN_ACT_LOG;
1228 req->in_actlog_jif = jiffies;
1234 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1236 struct drbd_resource *resource = device->resource;
1237 const int rw = bio_rw(req->master_bio);
1238 struct bio_and_error m = { NULL, };
1239 bool no_remote = false;
1240 bool submit_private_bio = false;
1242 spin_lock_irq(&resource->req_lock);
1244 /* This may temporarily give up the req_lock,
1245 * but will re-aquire it before it returns here.
1246 * Needs to be before the check on drbd_suspended() */
1247 complete_conflicting_writes(req);
1248 /* no more giving up req_lock from now on! */
1250 /* check for congestion, and potentially stop sending
1251 * full data updates, but start sending "dirty bits" only. */
1252 maybe_pull_ahead(device);
1256 if (drbd_suspended(device)) {
1257 /* push back and retry: */
1258 req->rq_state |= RQ_POSTPONED;
1259 if (req->private_bio) {
1260 bio_put(req->private_bio);
1261 req->private_bio = NULL;
1267 /* We fail READ/READA early, if we can not serve it.
1268 * We must do this before req is registered on any lists.
1269 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
1271 if (!do_remote_read(req) && !req->private_bio)
1275 /* which transfer log epoch does this belong to? */
1276 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1278 /* no point in adding empty flushes to the transfer log,
1279 * they are mapped to drbd barriers already. */
1280 if (likely(req->i.size!=0)) {
1282 first_peer_device(device)->connection->current_tle_writes++;
1284 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1288 if (!drbd_process_write_request(req))
1291 /* We either have a private_bio, or we can read from remote.
1292 * Otherwise we had done the goto nodata above. */
1293 if (req->private_bio == NULL) {
1294 _req_mod(req, TO_BE_SENT);
1295 _req_mod(req, QUEUE_FOR_NET_READ);
1300 /* If it took the fast path in drbd_request_prepare, add it here.
1301 * The slow path has added it already. */
1302 if (list_empty(&req->req_pending_master_completion))
1303 list_add_tail(&req->req_pending_master_completion,
1304 &device->pending_master_completion[rw == WRITE]);
1305 if (req->private_bio) {
1306 /* needs to be marked within the same spinlock */
1307 list_add_tail(&req->req_pending_local,
1308 &device->pending_completion[rw == WRITE]);
1309 _req_mod(req, TO_BE_SUBMITTED);
1310 /* but we need to give up the spinlock to submit */
1311 submit_private_bio = true;
1312 } else if (no_remote) {
1314 if (__ratelimit(&drbd_ratelimit_state))
1315 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1316 (unsigned long long)req->i.sector, req->i.size >> 9);
1317 /* A write may have been queued for send_oos, however.
1318 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
1322 if (drbd_req_put_completion_ref(req, &m, 1))
1323 kref_put(&req->kref, drbd_req_destroy);
1324 spin_unlock_irq(&resource->req_lock);
1326 /* Even though above is a kref_put(), this is safe.
1327 * As long as we still need to submit our private bio,
1328 * we hold a completion ref, and the request cannot disappear.
1329 * If however this request did not even have a private bio to submit
1330 * (e.g. remote read), req may already be invalid now.
1331 * That's why we cannot check on req->private_bio. */
1332 if (submit_private_bio)
1333 drbd_submit_req_private_bio(req);
1335 complete_master_bio(device, &m);
1338 void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1340 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
1341 if (IS_ERR_OR_NULL(req))
1343 drbd_send_and_submit(device, req);
1346 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
1348 struct drbd_request *req, *tmp;
1349 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1350 const int rw = bio_data_dir(req->master_bio);
1352 if (rw == WRITE /* rw != WRITE should not even end up here! */
1353 && req->private_bio && req->i.size
1354 && !test_bit(AL_SUSPENDED, &device->flags)) {
1355 if (!drbd_al_begin_io_fastpath(device, &req->i))
1358 req->rq_state |= RQ_IN_ACT_LOG;
1359 req->in_actlog_jif = jiffies;
1360 atomic_dec(&device->ap_actlog_cnt);
1363 list_del_init(&req->tl_requests);
1364 drbd_send_and_submit(device, req);
1368 static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1369 struct list_head *incoming,
1370 struct list_head *pending,
1371 struct list_head *later)
1373 struct drbd_request *req, *tmp;
1377 spin_lock_irq(&device->al_lock);
1378 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1379 err = drbd_al_begin_io_nonblock(device, &req->i);
1380 if (err == -ENOBUFS)
1385 list_move_tail(&req->tl_requests, later);
1387 list_move_tail(&req->tl_requests, pending);
1389 spin_unlock_irq(&device->al_lock);
1391 wake_up(&device->al_wait);
1392 return !list_empty(pending);
1395 void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1397 struct drbd_request *req, *tmp;
1399 list_for_each_entry_safe(req, tmp, pending, tl_requests) {
1400 req->rq_state |= RQ_IN_ACT_LOG;
1401 req->in_actlog_jif = jiffies;
1402 atomic_dec(&device->ap_actlog_cnt);
1403 list_del_init(&req->tl_requests);
1404 drbd_send_and_submit(device, req);
1408 void do_submit(struct work_struct *ws)
1410 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
1411 LIST_HEAD(incoming); /* from drbd_make_request() */
1412 LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
1413 LIST_HEAD(busy); /* blocked by resync requests */
1415 /* grab new incoming requests */
1416 spin_lock_irq(&device->resource->req_lock);
1417 list_splice_tail_init(&device->submit.writes, &incoming);
1418 spin_unlock_irq(&device->resource->req_lock);
1423 /* move used-to-be-busy back to front of incoming */
1424 list_splice_init(&busy, &incoming);
1425 submit_fast_path(device, &incoming);
1426 if (list_empty(&incoming))
1430 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1432 list_splice_init(&busy, &incoming);
1433 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1434 if (!list_empty(&pending))
1439 /* If all currently "hot" activity log extents are kept busy by
1440 * incoming requests, we still must not totally starve new
1441 * requests to "cold" extents.
1442 * Something left on &incoming means there had not been
1443 * enough update slots available, and the activity log
1444 * has been marked as "starving".
1446 * Try again now, without looking for new requests,
1447 * effectively blocking all new requests until we made
1448 * at least _some_ progress with what we currently have.
1450 if (!list_empty(&incoming))
1453 /* Nothing moved to pending, but nothing left
1454 * on incoming: all moved to busy!
1455 * Grab new and iterate. */
1456 spin_lock_irq(&device->resource->req_lock);
1457 list_splice_tail_init(&device->submit.writes, &incoming);
1458 spin_unlock_irq(&device->resource->req_lock);
1460 finish_wait(&device->al_wait, &wait);
1462 /* If the transaction was full, before all incoming requests
1463 * had been processed, skip ahead to commit, and iterate
1464 * without splicing in more incoming requests from upper layers.
1466 * Else, if all incoming have been processed,
1467 * they have become either "pending" (to be submitted after
1468 * next transaction commit) or "busy" (blocked by resync).
1470 * Maybe more was queued, while we prepared the transaction?
1471 * Try to stuff those into this transaction as well.
1472 * Be strictly non-blocking here,
1473 * we already have something to commit.
1475 * Commit if we don't make any more progres.
1478 while (list_empty(&incoming)) {
1479 LIST_HEAD(more_pending);
1480 LIST_HEAD(more_incoming);
1483 /* It is ok to look outside the lock,
1484 * it's only an optimization anyways */
1485 if (list_empty(&device->submit.writes))
1488 spin_lock_irq(&device->resource->req_lock);
1489 list_splice_tail_init(&device->submit.writes, &more_incoming);
1490 spin_unlock_irq(&device->resource->req_lock);
1492 if (list_empty(&more_incoming))
1495 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
1497 list_splice_tail_init(&more_pending, &pending);
1498 list_splice_tail_init(&more_incoming, &incoming);
1503 drbd_al_begin_io_commit(device);
1504 send_and_submit_pending(device, &pending);
1508 void drbd_make_request(struct request_queue *q, struct bio *bio)
1510 struct drbd_device *device = (struct drbd_device *) q->queuedata;
1511 unsigned long start_jif;
1513 start_jif = jiffies;
1516 * what we "blindly" assume:
1518 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1521 __drbd_make_request(device, bio, start_jif);
1524 /* This is called by bio_add_page().
1526 * q->max_hw_sectors and other global limits are already enforced there.
1528 * We need to call down to our lower level device,
1529 * in case it has special restrictions.
1531 * We also may need to enforce configured max-bio-bvecs limits.
1533 * As long as the BIO is empty we have to allow at least one bvec,
1534 * regardless of size and offset, so no need to ask lower levels.
1536 int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
1538 struct drbd_device *device = (struct drbd_device *) q->queuedata;
1539 unsigned int bio_size = bvm->bi_size;
1540 int limit = DRBD_MAX_BIO_SIZE;
1543 if (bio_size && get_ldev(device)) {
1544 unsigned int max_hw_sectors = queue_max_hw_sectors(q);
1545 struct request_queue * const b =
1546 device->ldev->backing_bdev->bd_disk->queue;
1547 if (b->merge_bvec_fn) {
1548 backing_limit = b->merge_bvec_fn(b, bvm, bvec);
1549 limit = min(limit, backing_limit);
1552 if ((limit >> 9) > max_hw_sectors)
1553 limit = max_hw_sectors << 9;
1558 void request_timer_fn(unsigned long data)
1560 struct drbd_device *device = (struct drbd_device *) data;
1561 struct drbd_connection *connection = first_peer_device(device)->connection;
1562 struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
1563 struct net_conf *nc;
1564 unsigned long oldest_submit_jif;
1565 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1569 nc = rcu_dereference(connection->net_conf);
1570 if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
1571 ent = nc->timeout * HZ/10 * nc->ko_count;
1573 if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
1574 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1579 et = min_not_zero(dt, ent);
1582 return; /* Recurring timer stopped */
1587 spin_lock_irq(&device->resource->req_lock);
1588 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1589 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
1590 req_peer = connection->req_not_net_done;
1591 /* maybe the oldest request waiting for the peer is in fact still
1592 * blocking in tcp sendmsg */
1593 if (!req_peer && connection->req_next && connection->req_next->pre_send_jif)
1594 req_peer = connection->req_next;
1596 /* evaluate the oldest peer request only in one timer! */
1597 if (req_peer && req_peer->device != device)
1600 /* do we have something to evaluate? */
1601 if (req_peer == NULL && req_write == NULL && req_read == NULL)
1605 (req_write && req_read)
1606 ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1607 ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1608 : req_write ? req_write->pre_submit_jif
1609 : req_read ? req_read->pre_submit_jif : now;
1611 /* The request is considered timed out, if
1612 * - we have some effective timeout from the configuration,
1613 * with above state restrictions applied,
1614 * - the oldest request is waiting for a response from the network
1615 * resp. the local disk,
1616 * - the oldest request is in fact older than the effective timeout,
1617 * - the connection was established (resp. disk was attached)
1618 * for longer than the timeout already.
1619 * Note that for 32bit jiffies and very stable connections/disks,
1620 * we may have a wrap around, which is catched by
1621 * !time_in_range(now, last_..._jif, last_..._jif + timeout).
1623 * Side effect: once per 32bit wrap-around interval, which means every
1624 * ~198 days with 250 HZ, we have a window where the timeout would need
1625 * to expire twice (worst case) to become effective. Good enough.
1627 if (ent && req_peer &&
1628 time_after(now, req_peer->pre_send_jif + ent) &&
1629 !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
1630 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
1631 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
1633 if (dt && oldest_submit_jif != now &&
1634 time_after(now, oldest_submit_jif + dt) &&
1635 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1636 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1637 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1640 /* Reschedule timer for the nearest not already expired timeout.
1641 * Fallback to now + min(effective network timeout, disk timeout). */
1642 ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1643 ? req_peer->pre_send_jif + ent : now + et;
1644 dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1645 ? oldest_submit_jif + dt : now + et;
1646 nt = time_before(ent, dt) ? ent : dt;
1648 spin_unlock_irq(&connection->resource->req_lock);
1649 mod_timer(&device->request_timer, nt);