diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-10 13:52:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-10 13:52:05 -0700 |
commit | 8dfb790b15e779232d5d4e3f0102af2bea21ca55 (patch) | |
tree | 7208241fc93d39f769dcec0c227c8582f117dfce /fs/ceph | |
parent | fed41f7d039bad02f94cad9059e4b14cd81d13f2 (diff) | |
parent | 64f77566e1c84990d6c448bb3960f899521c0b7d (diff) | |
download | linux-8dfb790b15e779232d5d4e3f0102af2bea21ca55.tar.bz2 |
Merge tag 'ceph-for-4.9-rc1' of git://github.com/ceph/ceph-client
Pull Ceph updates from Ilya Dryomov:
"The big ticket item here is support for rbd exclusive-lock feature,
with maintenance operations offloaded to userspace (Douglas Fuller,
Mike Christie and myself). Another block device bullet is a series
fixing up layering error paths (myself).
On the filesystem side, we've got patches that improve our handling of
buffered vs dio write races (Neil Brown) and a few assorted fixes from
Zheng. Also included a couple of random cleanups and a minor CRUSH
update"
* tag 'ceph-for-4.9-rc1' of git://github.com/ceph/ceph-client: (39 commits)
crush: remove redundant local variable
crush: don't normalize input of crush_ln iteratively
libceph: ceph_build_auth() doesn't need ceph_auth_build_hello()
libceph: use CEPH_AUTH_UNKNOWN in ceph_auth_build_hello()
ceph: fix description for rsize and rasize mount options
rbd: use kmalloc_array() in rbd_header_from_disk()
ceph: use list_move instead of list_del/list_add
ceph: handle CEPH_SESSION_REJECT message
ceph: avoid accessing / when mounting a subpath
ceph: fix mandatory flock check
ceph: remove warning when ceph_releasepage() is called on dirty page
ceph: ignore error from invalidate_inode_pages2_range() in direct write
ceph: fix error handling of start_read()
rbd: add rbd_obj_request_error() helper
rbd: img_data requests don't own their page array
rbd: don't call rbd_osd_req_format_read() for !img_data requests
rbd: rework rbd_img_obj_exists_submit() error paths
rbd: don't crash or leak on errors in rbd_img_obj_parent_read_full_callback()
rbd: move bumping img_request refcount into rbd_obj_request_submit()
rbd: mark the original request as done if stat request fails
...
Diffstat (limited to 'fs/ceph')
-rw-r--r-- | fs/ceph/addr.c | 24 | ||||
-rw-r--r-- | fs/ceph/file.c | 4 | ||||
-rw-r--r-- | fs/ceph/locks.c | 4 | ||||
-rw-r--r-- | fs/ceph/mds_client.c | 30 | ||||
-rw-r--r-- | fs/ceph/mds_client.h | 1 | ||||
-rw-r--r-- | fs/ceph/strings.c | 2 | ||||
-rw-r--r-- | fs/ceph/super.c | 49 |
7 files changed, 61 insertions, 53 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index d5b6f959a3c3..ef3ebd780aff 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -175,9 +175,8 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, static int ceph_releasepage(struct page *page, gfp_t g) { - dout("%p releasepage %p idx %lu\n", page->mapping->host, - page, page->index); - WARN_ON(PageDirty(page)); + dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, + page, page->index, PageDirty(page) ? "" : "not "); /* Can we release the page from the cache? */ if (!ceph_release_fscache_page(page, g)) @@ -298,14 +297,6 @@ unlock: kfree(osd_data->pages); } -static void ceph_unlock_page_vector(struct page **pages, int num_pages) -{ - int i; - - for (i = 0; i < num_pages; i++) - unlock_page(pages[i]); -} - /* * start an async read(ahead) operation. return nr_pages we submitted * a read for on success, or negative error code. @@ -370,6 +361,10 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) dout("start_read %p add_to_page_cache failed %p\n", inode, page); nr_pages = i; + if (nr_pages > 0) { + len = nr_pages << PAGE_SHIFT; + break; + } goto out_pages; } pages[i] = page; @@ -386,8 +381,11 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) return nr_pages; out_pages: - ceph_unlock_page_vector(pages, nr_pages); - ceph_release_page_vector(pages, nr_pages); + for (i = 0; i < nr_pages; ++i) { + ceph_fscache_readpage_cancel(inode, pages[i]); + unlock_page(pages[i]); + } + ceph_put_page_vector(pages, nr_pages, false); out: ceph_osdc_put_request(req); return ret; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 0f5375d8e030..395c7fcb1cea 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -902,10 +902,10 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, return ret; if (write) { - ret = invalidate_inode_pages2_range(inode->i_mapping, + int ret2 = invalidate_inode_pages2_range(inode->i_mapping, pos >> PAGE_SHIFT, (pos + count) >> PAGE_SHIFT); - if (ret < 0) + if (ret2 < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); flags = CEPH_OSD_FLAG_ORDERSNAP | diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index a2cb0c254060..6806dbeaee19 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -210,8 +210,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; /* No mandatory locks */ - if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK) - return -ENOLCK; + if (fl->fl_type & LOCK_MAND) + return -EOPNOTSUPP; dout("ceph_flock, fl_file: %p", fl->fl_file); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f72d4ae303b2..815acd1a56d4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -370,6 +370,7 @@ const char *ceph_session_state_name(int s) case CEPH_MDS_SESSION_CLOSING: return "closing"; case CEPH_MDS_SESSION_RESTARTING: return "restarting"; case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; + case CEPH_MDS_SESSION_REJECTED: return "rejected"; default: return "???"; } } @@ -1150,8 +1151,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, while (!list_empty(&ci->i_cap_flush_list)) { cf = list_first_entry(&ci->i_cap_flush_list, struct ceph_cap_flush, i_list); - list_del(&cf->i_list); - list_add(&cf->i_list, &to_remove); + list_move(&cf->i_list, &to_remove); } spin_lock(&mdsc->cap_dirty_lock); @@ -1378,7 +1378,7 @@ static int request_close_session(struct ceph_mds_client *mdsc, if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); - return 0; + return 1; } /* @@ -2131,6 +2131,10 @@ static int __do_request(struct ceph_mds_client *mdsc, ceph_session_state_name(session->s_state)); if (session->s_state != CEPH_MDS_SESSION_OPEN && session->s_state != CEPH_MDS_SESSION_HUNG) { + if (session->s_state == CEPH_MDS_SESSION_REJECTED) { + err = -EACCES; + goto out_session; + } if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) __open_session(mdsc, session); @@ -2652,6 +2656,15 @@ static void handle_session(struct ceph_mds_session *session, wake_up_session_caps(session, 0); break; + case CEPH_SESSION_REJECT: + WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); + pr_info("mds%d rejected session\n", session->s_mds); + session->s_state = CEPH_MDS_SESSION_REJECTED; + cleanup_session_requests(mdsc, session); + remove_session_caps(session); + wake = 2; /* for good measure */ + break; + default: pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); WARN_ON(1); @@ -3557,11 +3570,11 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) /* * true if all sessions are closed, or we force unmount */ -static bool done_closing_sessions(struct ceph_mds_client *mdsc) +static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) { if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) return true; - return atomic_read(&mdsc->num_sessions) == 0; + return atomic_read(&mdsc->num_sessions) <= skipped; } /* @@ -3572,6 +3585,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) struct ceph_options *opts = mdsc->fsc->client->options; struct ceph_mds_session *session; int i; + int skipped = 0; dout("close_sessions\n"); @@ -3583,7 +3597,8 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) continue; mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); - __close_session(mdsc, session); + if (__close_session(mdsc, session) <= 0) + skipped++; mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); mutex_lock(&mdsc->mutex); @@ -3591,7 +3606,8 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) mutex_unlock(&mdsc->mutex); dout("waiting for sessions to close\n"); - wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), + wait_event_timeout(mdsc->session_close_wq, + done_closing_sessions(mdsc, skipped), ceph_timeout_jiffies(opts->mount_timeout)); /* tear down remaining sessions */ diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 6b3679737d4a..3c6f77b7bb02 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -121,6 +121,7 @@ enum { CEPH_MDS_SESSION_CLOSING = 5, CEPH_MDS_SESSION_RESTARTING = 6, CEPH_MDS_SESSION_RECONNECTING = 7, + CEPH_MDS_SESSION_REJECTED = 8, }; struct ceph_mds_session { diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c index 89e6bc321df3..913dea163d5c 100644 --- a/fs/ceph/strings.c +++ b/fs/ceph/strings.c @@ -43,6 +43,8 @@ const char *ceph_session_op_name(int op) case CEPH_SESSION_RECALL_STATE: return "recall_state"; case CEPH_SESSION_FLUSHMSG: return "flushmsg"; case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack"; + case CEPH_SESSION_FORCE_RO: return "force_ro"; + case CEPH_SESSION_REJECT: return "reject"; } return "???"; } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index e247f6f0feb7..a29ffce98187 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -396,10 +396,12 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt, */ dev_name_end = strchr(dev_name, '/'); if (dev_name_end) { - fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL); - if (!fsopt->server_path) { - err = -ENOMEM; - goto out; + if (strlen(dev_name_end) > 1) { + fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL); + if (!fsopt->server_path) { + err = -ENOMEM; + goto out; + } } } else { dev_name_end = dev_name + strlen(dev_name); @@ -788,15 +790,10 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, struct inode *inode = req->r_target_inode; req->r_target_inode = NULL; dout("open_root_inode success\n"); - if (ceph_ino(inode) == CEPH_INO_ROOT && - fsc->sb->s_root == NULL) { - root = d_make_root(inode); - if (!root) { - root = ERR_PTR(-ENOMEM); - goto out; - } - } else { - root = d_obtain_root(inode); + root = d_make_root(inode); + if (!root) { + root = ERR_PTR(-ENOMEM); + goto out; } ceph_init_dentry(root); dout("open_root_inode success, root dentry is %p\n", root); @@ -825,17 +822,24 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) mutex_lock(&fsc->client->mount_mutex); if (!fsc->sb->s_root) { + const char *path; err = __ceph_open_session(fsc->client, started); if (err < 0) goto out; - dout("mount opening root\n"); - root = open_root_dentry(fsc, "", started); + if (!fsc->mount_options->server_path) { + path = ""; + dout("mount opening path \\t\n"); + } else { + path = fsc->mount_options->server_path + 1; + dout("mount opening path %s\n", path); + } + root = open_root_dentry(fsc, path, started); if (IS_ERR(root)) { err = PTR_ERR(root); goto out; } - fsc->sb->s_root = root; + fsc->sb->s_root = dget(root); first = 1; err = ceph_fs_debugfs_init(fsc); @@ -843,19 +847,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) goto fail; } - if (!fsc->mount_options->server_path) { - root = fsc->sb->s_root; - dget(root); - } else { - const char *path = fsc->mount_options->server_path + 1; - dout("mount opening path %s\n", path); - root = open_root_dentry(fsc, path, started); - if (IS_ERR(root)) { - err = PTR_ERR(root); - goto fail; - } - } - fsc->mount_state = CEPH_MOUNT_MOUNTED; dout("mount success\n"); mutex_unlock(&fsc->client->mount_mutex); |