Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[firefly-linux-kernel-4.4.55.git] / fs / ceph / mds_client.c
index 0122491d947cdc8f4324ec3ec70be7bc88ffce61..84f37f34f9aa663952a60e8a3440a81934c083b0 100644 (file)
@@ -679,7 +679,7 @@ static struct dentry *get_nonsnap_parent(struct dentry *dentry)
         * except to resplice to another snapdir, and either the old or new
         * result is a valid result.
         */
-       while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+       while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
                dentry = dentry->d_parent;
        return dentry;
 }
@@ -716,20 +716,20 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        } else if (req->r_dentry) {
                /* ignore race with rename; old or new d_parent is okay */
                struct dentry *parent = req->r_dentry->d_parent;
-               struct inode *dir = parent->d_inode;
+               struct inode *dir = d_inode(parent);
 
                if (dir->i_sb != mdsc->fsc->sb) {
                        /* not this fs! */
-                       inode = req->r_dentry->d_inode;
+                       inode = d_inode(req->r_dentry);
                } else if (ceph_snap(dir) != CEPH_NOSNAP) {
                        /* direct snapped/virtual snapdir requests
                         * based on parent dir inode */
                        struct dentry *dn = get_nonsnap_parent(parent);
-                       inode = dn->d_inode;
+                       inode = d_inode(dn);
                        dout("__choose_mds using nonsnap parent %p\n", inode);
                } else {
                        /* dentry target */
-                       inode = req->r_dentry->d_inode;
+                       inode = d_inode(req->r_dentry);
                        if (!inode || mode == USE_AUTH_MDS) {
                                /* dir + name */
                                inode = dir;
@@ -1021,6 +1021,33 @@ static void cleanup_cap_releases(struct ceph_mds_session *session)
        spin_unlock(&session->s_cap_lock);
 }
 
+static void cleanup_session_requests(struct ceph_mds_client *mdsc,
+                                    struct ceph_mds_session *session)
+{
+       struct ceph_mds_request *req;
+       struct rb_node *p;
+
+       dout("cleanup_session_requests mds%d\n", session->s_mds);
+       mutex_lock(&mdsc->mutex);
+       while (!list_empty(&session->s_unsafe)) {
+               req = list_first_entry(&session->s_unsafe,
+                                      struct ceph_mds_request, r_unsafe_item);
+               list_del_init(&req->r_unsafe_item);
+               pr_info(" dropping unsafe request %llu\n", req->r_tid);
+               __unregister_request(mdsc, req);
+       }
+       /* zero r_attempts, so kick_requests() will re-send requests */
+       p = rb_first(&mdsc->request_tree);
+       while (p) {
+               req = rb_entry(p, struct ceph_mds_request, r_node);
+               p = rb_next(p);
+               if (req->r_session &&
+                   req->r_session->s_mds == session->s_mds)
+                       req->r_attempts = 0;
+       }
+       mutex_unlock(&mdsc->mutex);
+}
+
 /*
  * Helper to safely iterate over all caps associated with a session, with
  * special care taken to handle a racing __ceph_remove_cap().
@@ -1098,7 +1125,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
             cap, ci, &ci->vfs_inode);
        spin_lock(&ci->i_ceph_lock);
        __ceph_remove_cap(cap, false);
-       if (!__ceph_is_any_real_caps(ci)) {
+       if (!ci->i_auth_cap) {
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
 
@@ -1120,13 +1147,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                        mdsc->num_cap_flushing--;
                        drop = 1;
                }
-               if (drop && ci->i_wrbuffer_ref) {
-                       pr_info(" dropping dirty data for %p %lld\n",
-                               inode, ceph_ino(inode));
-                       ci->i_wrbuffer_ref = 0;
-                       ci->i_wrbuffer_ref_head = 0;
-                       drop++;
-               }
                spin_unlock(&mdsc->cap_dirty_lock);
        }
        spin_unlock(&ci->i_ceph_lock);
@@ -1712,7 +1732,7 @@ retry:
        seq = read_seqbegin(&rename_lock);
        rcu_read_lock();
        for (temp = dentry; !IS_ROOT(temp);) {
-               struct inode *inode = temp->d_inode;
+               struct inode *inode = d_inode(temp);
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
                        len++;  /* slash only */
                else if (stop_on_nosnap && inode &&
@@ -1736,7 +1756,7 @@ retry:
                struct inode *inode;
 
                spin_lock(&temp->d_lock);
-               inode = temp->d_inode;
+               inode = d_inode(temp);
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
                        dout("build_path path+%d: %p SNAPDIR\n",
                             pos, temp);
@@ -1770,7 +1790,7 @@ retry:
                goto retry;
        }
 
-       *base = ceph_ino(temp->d_inode);
+       *base = ceph_ino(d_inode(temp));
        *plen = len;
        dout("build_path on %p %d built %llx '%.*s'\n",
             dentry, d_count(dentry), *base, len, path);
@@ -1783,8 +1803,8 @@ static int build_dentry_path(struct dentry *dentry,
 {
        char *path;
 
-       if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
-               *pino = ceph_ino(dentry->d_parent->d_inode);
+       if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
+               *pino = ceph_ino(d_inode(dentry->d_parent));
                *ppath = dentry->d_name.name;
                *ppathlen = dentry->d_name.len;
                return 0;
@@ -1925,7 +1945,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        releases = 0;
        if (req->r_inode_drop)
                releases += ceph_encode_inode_release(&p,
-                     req->r_inode ? req->r_inode : req->r_dentry->d_inode,
+                     req->r_inode ? req->r_inode : d_inode(req->r_dentry),
                      mds, req->r_inode_drop, req->r_inode_unless, 0);
        if (req->r_dentry_drop)
                releases += ceph_encode_dentry_release(&p, req->r_dentry,
@@ -1935,7 +1955,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
                       mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
        if (req->r_old_inode_drop)
                releases += ceph_encode_inode_release(&p,
-                     req->r_old_dentry->d_inode,
+                     d_inode(req->r_old_dentry),
                      mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
 
        if (drop_cap_releases) {
@@ -2596,6 +2616,7 @@ static void handle_session(struct ceph_mds_session *session,
        case CEPH_SESSION_CLOSE:
                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
                        pr_info("mds%d reconnect denied\n", session->s_mds);
+               cleanup_session_requests(mdsc, session);
                remove_session_caps(session);
                wake = 2; /* for good measure */
                wake_up_all(&mdsc->session_close_wq);
@@ -2871,7 +2892,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        spin_unlock(&session->s_cap_lock);
 
        /* trim unused caps to reduce MDS's cache rejoin time */
-       shrink_dcache_parent(mdsc->fsc->sb->s_root);
+       if (mdsc->fsc->sb->s_root)
+               shrink_dcache_parent(mdsc->fsc->sb->s_root);
 
        ceph_con_close(&session->s_con);
        ceph_con_open(&session->s_con,