mdev = b->w.mdev;
nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ if (test_and_clear_bit(CREATE_BARRIER, &tconn->flags)) {
_tl_add_barrier(tconn, b);
if (nob)
tconn->oldest_tle = nob;
req = list_entry(le, struct drbd_request, tl_requests);
rv = _req_mod(req, what);
- n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
- n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
+ if (rv & MR_WRITE)
+ n_writes++;
+ if (rv & MR_READ)
+ n_reads++;
}
tmp = b->next;
if (b->w.cb == NULL) {
b->w.cb = w_send_barrier;
inc_ap_pending(b->w.mdev);
- set_bit(CREATE_BARRIER, &b->w.mdev->flags);
+ set_bit(CREATE_BARRIER, &tconn->flags);
}
drbd_queue_work(&tconn->data.work, &b->w);
*/
void tl_clear(struct drbd_tconn *tconn)
{
- struct drbd_conf *mdev;
struct list_head *le, *tle;
struct drbd_request *r;
- int vnr;
spin_lock_irq(&tconn->req_lock);
}
/* ensure bit indicating barrier is required is clear */
- rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- clear_bit(CREATE_BARRIER, &mdev->flags);
- rcu_read_unlock();
+ clear_bit(CREATE_BARRIER, &tconn->flags);
spin_unlock_irq(&tconn->req_lock);
}
while (b) {
list_for_each_safe(le, tle, &b->requests) {
req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
if (req->w.mdev == mdev)
_req_mod(req, ABORT_DISK_IO);
}
list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
if (req->w.mdev == mdev)
_req_mod(req, ABORT_DISK_IO);
}
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (uuid && uuid != UUID_JUST_CREATED)
+ uuid = uuid + UUID_NEW_BM_OFFSET;
+ else
+ get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
}
/**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
* @mdev: DRBD device.
*/
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
{
struct drbd_socket *sock;
struct p_state *p;
return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
}
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+ struct drbd_socket *sock;
+ struct p_state *p;
+
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+}
+
int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
{
struct drbd_socket *sock;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
-
}
int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
sock = &mdev->tconn->data;
p = drbd_prepare_command(mdev, sock);
- dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
- crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
if (!p)
return -EIO;
sock = &mdev->tconn->data;
p = drbd_prepare_command(mdev, sock);
- dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
- crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
if (!p)
return -EIO;
spin_lock_init(&mdev->al_lock);
spin_lock_init(&mdev->peer_seq_lock);
- spin_lock_init(&mdev->epoch_lock);
INIT_LIST_HEAD(&mdev->active_ee);
INIT_LIST_HEAD(&mdev->sync_ee);
init_waitqueue_head(&mdev->al_wait);
init_waitqueue_head(&mdev->seq_wait);
- mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
mdev->tconn->receiver.t_state);
- /* no need to lock it, I'm the only thread alive */
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
mdev->al_writ_cnt =
mdev->bm_writ_cnt =
mdev->read_cnt =
kfree(mdev->p_uuid);
/* mdev->p_uuid = NULL; */
- kfree(mdev->current_epoch);
if (mdev->bitmap) /* should no longer be there. */
drbd_bm_cleanup(mdev);
__free_page(mdev->md_io_page);
if (!tl_init(tconn))
goto fail;
+ tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!tconn->current_epoch)
+ goto fail;
+ INIT_LIST_HEAD(&tconn->current_epoch->list);
+ tconn->epochs = 1;
+ spin_lock_init(&tconn->epoch_lock);
+ tconn->write_ordering = WO_bdev_flush;
+
tconn->cstate = C_STANDALONE;
mutex_init(&tconn->cstate_mutex);
spin_lock_init(&tconn->req_lock);
return tconn;
fail:
+ kfree(tconn->current_epoch);
tl_cleanup(tconn);
free_cpumask_var(tconn->cpu_mask);
drbd_free_socket(&tconn->meta);
{
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+ if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
+ conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+ kfree(tconn->current_epoch);
+
idr_destroy(&tconn->volumes);
free_cpumask_var(tconn->cpu_mask);
mdev->read_requests = RB_ROOT;
mdev->write_requests = RB_ROOT;
- mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!mdev->current_epoch)
- goto out_no_epoch;
-
- INIT_LIST_HEAD(&mdev->current_epoch->list);
- mdev->epochs = 1;
-
if (!idr_pre_get(&minors, GFP_KERNEL))
goto out_no_minor_idr;
if (idr_get_new_above(&minors, mdev, minor, &minor_got))
idr_remove(&minors, minor_got);
synchronize_rcu();
out_no_minor_idr:
- kfree(mdev->current_epoch);
-out_no_epoch:
drbd_bm_cleanup(mdev);
out_no_bitmap:
__free_page(mdev->md_io_page);
goto err;
}
if (magic != DRBD_MD_MAGIC_08) {
- if (magic == DRBD_MD_MAGIC_07)
+ if (magic == DRBD_MD_MAGIC_07)
dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");