drbd: Move list of epochs from mdev to tconn
[firefly-linux-kernel-4.4.55.git] / drivers / block / drbd / drbd_main.c
index 137935037664c63ad3c4bc1530c38eb421fa95f6..8b99f4e28ccc2cdc5ff03df07aad7853fda73eaa 100644 (file)
@@ -320,7 +320,7 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
        mdev = b->w.mdev;
 
        nob = b->next;
-       if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+       if (test_and_clear_bit(CREATE_BARRIER, &tconn->flags)) {
                _tl_add_barrier(tconn, b);
                if (nob)
                        tconn->oldest_tle = nob;
@@ -368,8 +368,10 @@ void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
                        req = list_entry(le, struct drbd_request, tl_requests);
                        rv = _req_mod(req, what);
 
-                       n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
-                       n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
+                       if (rv & MR_WRITE)
+                               n_writes++;
+                       if (rv & MR_READ)
+                               n_reads++;
                }
                tmp = b->next;
 
@@ -379,7 +381,7 @@ void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
                                if (b->w.cb == NULL) {
                                        b->w.cb = w_send_barrier;
                                        inc_ap_pending(b->w.mdev);
-                                       set_bit(CREATE_BARRIER, &b->w.mdev->flags);
+                                       set_bit(CREATE_BARRIER, &tconn->flags);
                                }
 
                                drbd_queue_work(&tconn->data.work, &b->w);
@@ -446,10 +448,8 @@ void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
  */
 void tl_clear(struct drbd_tconn *tconn)
 {
-       struct drbd_conf *mdev;
        struct list_head *le, *tle;
        struct drbd_request *r;
-       int vnr;
 
        spin_lock_irq(&tconn->req_lock);
 
@@ -468,10 +468,7 @@ void tl_clear(struct drbd_tconn *tconn)
        }
 
        /* ensure bit indicating barrier is required is clear */
-       rcu_read_lock();
-       idr_for_each_entry(&tconn->volumes, mdev, vnr)
-               clear_bit(CREATE_BARRIER, &mdev->flags);
-       rcu_read_unlock();
+       clear_bit(CREATE_BARRIER, &tconn->flags);
 
        spin_unlock_irq(&tconn->req_lock);
 }
@@ -499,6 +496,8 @@ void tl_abort_disk_io(struct drbd_conf *mdev)
        while (b) {
                list_for_each_safe(le, tle, &b->requests) {
                        req = list_entry(le, struct drbd_request, tl_requests);
+                       if (!(req->rq_state & RQ_LOCAL_PENDING))
+                               continue;
                        if (req->w.mdev == mdev)
                                _req_mod(req, ABORT_DISK_IO);
                }
@@ -507,6 +506,8 @@ void tl_abort_disk_io(struct drbd_conf *mdev)
 
        list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
                req = list_entry(le, struct drbd_request, tl_requests);
+               if (!(req->rq_state & RQ_LOCAL_PENDING))
+                       continue;
                if (req->w.mdev == mdev)
                        _req_mod(req, ABORT_DISK_IO);
        }
@@ -1081,7 +1082,11 @@ void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
 
        D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
 
-       uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+       uuid = mdev->ldev->md.uuid[UI_BITMAP];
+       if (uuid && uuid != UUID_JUST_CREATED)
+               uuid = uuid + UUID_NEW_BM_OFFSET;
+       else
+               get_random_bytes(&uuid, sizeof(u64));
        drbd_uuid_set(mdev, UI_BITMAP, uuid);
        drbd_print_uuids(mdev, "updated sync UUID");
        drbd_md_sync(mdev);
@@ -1138,10 +1143,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
 }
 
 /**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
  * @mdev:      DRBD device.
  */
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
 {
        struct drbd_socket *sock;
        struct p_state *p;
@@ -1154,6 +1159,29 @@ int drbd_send_state(struct drbd_conf *mdev)
        return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
 }
 
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev:      DRBD device.
+ * @state:     the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+       struct drbd_socket *sock;
+       struct p_state *p;
+
+       sock = &mdev->tconn->data;
+       p = drbd_prepare_command(mdev, sock);
+       if (!p)
+               return -EIO;
+       p->state = cpu_to_be32(state.i); /* Within the send mutex */
+       return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+}
+
 int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
 {
        struct drbd_socket *sock;
@@ -1166,7 +1194,6 @@ int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drb
        p->mask = cpu_to_be32(mask.i);
        p->val = cpu_to_be32(val.i);
        return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
-
 }
 
 int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
@@ -1775,8 +1802,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
 
        sock = &mdev->tconn->data;
        p = drbd_prepare_command(mdev, sock);
-       dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
-               crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+       dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
 
        if (!p)
                return -EIO;
@@ -1849,8 +1875,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
        sock = &mdev->tconn->data;
        p = drbd_prepare_command(mdev, sock);
 
-       dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
-               crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+       dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
 
        if (!p)
                return -EIO;
@@ -2057,7 +2082,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
 
        spin_lock_init(&mdev->al_lock);
        spin_lock_init(&mdev->peer_seq_lock);
-       spin_lock_init(&mdev->epoch_lock);
 
        INIT_LIST_HEAD(&mdev->active_ee);
        INIT_LIST_HEAD(&mdev->sync_ee);
@@ -2105,7 +2129,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
        init_waitqueue_head(&mdev->al_wait);
        init_waitqueue_head(&mdev->seq_wait);
 
-       mdev->write_ordering = WO_bdev_flush;
        mdev->resync_wenr = LC_FREE;
        mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
        mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
@@ -2118,9 +2141,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
                dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
                                mdev->tconn->receiver.t_state);
 
-       /* no need to lock it, I'm the only thread alive */
-       if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
-               dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
        mdev->al_writ_cnt  =
        mdev->bm_writ_cnt  =
        mdev->read_cnt     =
@@ -2353,7 +2373,6 @@ void drbd_minor_destroy(struct kref *kref)
        kfree(mdev->p_uuid);
        /* mdev->p_uuid = NULL; */
 
-       kfree(mdev->current_epoch);
        if (mdev->bitmap) /* should no longer be there. */
                drbd_bm_cleanup(mdev);
        __free_page(mdev->md_io_page);
@@ -2600,6 +2619,14 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
        if (!tl_init(tconn))
                goto fail;
 
+       tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+       if (!tconn->current_epoch)
+               goto fail;
+       INIT_LIST_HEAD(&tconn->current_epoch->list);
+       tconn->epochs = 1;
+       spin_lock_init(&tconn->epoch_lock);
+       tconn->write_ordering = WO_bdev_flush;
+
        tconn->cstate = C_STANDALONE;
        mutex_init(&tconn->cstate_mutex);
        spin_lock_init(&tconn->req_lock);
@@ -2623,6 +2650,7 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
        return tconn;
 
 fail:
+       kfree(tconn->current_epoch);
        tl_cleanup(tconn);
        free_cpumask_var(tconn->cpu_mask);
        drbd_free_socket(&tconn->meta);
@@ -2637,6 +2665,10 @@ void conn_destroy(struct kref *kref)
 {
        struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
 
+       if (atomic_read(&tconn->current_epoch->epoch_size) !=  0)
+               conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+       kfree(tconn->current_epoch);
+
        idr_destroy(&tconn->volumes);
 
        free_cpumask_var(tconn->cpu_mask);
@@ -2718,13 +2750,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
        mdev->read_requests = RB_ROOT;
        mdev->write_requests = RB_ROOT;
 
-       mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
-       if (!mdev->current_epoch)
-               goto out_no_epoch;
-
-       INIT_LIST_HEAD(&mdev->current_epoch->list);
-       mdev->epochs = 1;
-
        if (!idr_pre_get(&minors, GFP_KERNEL))
                goto out_no_minor_idr;
        if (idr_get_new_above(&minors, mdev, minor, &minor_got))
@@ -2760,8 +2785,6 @@ out_idr_remove_minor:
        idr_remove(&minors, minor_got);
        synchronize_rcu();
 out_no_minor_idr:
-       kfree(mdev->current_epoch);
-out_no_epoch:
        drbd_bm_cleanup(mdev);
 out_no_bitmap:
        __free_page(mdev->md_io_page);
@@ -2995,7 +3018,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
                goto err;
        }
        if (magic != DRBD_MD_MAGIC_08) {
-               if (magic == DRBD_MD_MAGIC_07) 
+               if (magic == DRBD_MD_MAGIC_07)
                        dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
                else
                        dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");