summaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c654
1 files changed, 280 insertions, 374 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ea8237513dfa..dadd617d826c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -40,107 +40,30 @@ static struct fuse_dev *fuse_get_dev(struct file *file)
return READ_ONCE(file->private_data);
}
-static void fuse_request_init(struct fuse_req *req, struct page **pages,
- struct fuse_page_desc *page_descs,
- unsigned npages)
+static void fuse_request_init(struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
refcount_set(&req->count, 1);
- req->pages = pages;
- req->page_descs = page_descs;
- req->max_pages = npages;
__set_bit(FR_PENDING, &req->flags);
}
-static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
- struct fuse_page_desc **desc)
-{
- struct page **pages;
-
- pages = kzalloc(npages * (sizeof(struct page *) +
- sizeof(struct fuse_page_desc)), flags);
- *desc = (void *) pages + npages * sizeof(struct page *);
-
- return pages;
-}
-
-static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
+static struct fuse_req *fuse_request_alloc(gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
- if (req) {
- struct page **pages = NULL;
- struct fuse_page_desc *page_descs = NULL;
-
- WARN_ON(npages > FUSE_MAX_MAX_PAGES);
- if (npages > FUSE_REQ_INLINE_PAGES) {
- pages = fuse_req_pages_alloc(npages, flags,
- &page_descs);
- if (!pages) {
- kmem_cache_free(fuse_req_cachep, req);
- return NULL;
- }
- } else if (npages) {
- pages = req->inline_pages;
- page_descs = req->inline_page_descs;
- }
+ if (req)
+ fuse_request_init(req);
- fuse_request_init(req, pages, page_descs, npages);
- }
return req;
}
-struct fuse_req *fuse_request_alloc(unsigned npages)
-{
- return __fuse_request_alloc(npages, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(fuse_request_alloc);
-
-struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
-{
- return __fuse_request_alloc(npages, GFP_NOFS);
-}
-
-static void fuse_req_pages_free(struct fuse_req *req)
-{
- if (req->pages != req->inline_pages)
- kfree(req->pages);
-}
-
-bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
- gfp_t flags)
-{
- struct page **pages;
- struct fuse_page_desc *page_descs;
- unsigned int npages = min_t(unsigned int,
- max_t(unsigned int, req->max_pages * 2,
- FUSE_DEFAULT_MAX_PAGES_PER_REQ),
- fc->max_pages);
- WARN_ON(npages <= req->max_pages);
-
- pages = fuse_req_pages_alloc(npages, flags, &page_descs);
- if (!pages)
- return false;
-
- memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
- memcpy(page_descs, req->page_descs,
- sizeof(struct fuse_page_desc) * req->max_pages);
- fuse_req_pages_free(req);
- req->pages = pages;
- req->page_descs = page_descs;
- req->max_pages = npages;
-
- return true;
-}
-
-void fuse_request_free(struct fuse_req *req)
+static void fuse_request_free(struct fuse_req *req)
{
- fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
-void __fuse_get_request(struct fuse_req *req)
+static void __fuse_get_request(struct fuse_req *req)
{
refcount_inc(&req->count);
}
@@ -177,8 +100,9 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
}
}
-static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
- bool for_background)
+static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
+
+static struct fuse_req *fuse_get_req(struct fuse_conn *fc, bool for_background)
{
struct fuse_req *req;
int err;
@@ -201,7 +125,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
if (fc->conn_error)
goto out;
- req = fuse_request_alloc(npages);
+ req = fuse_request_alloc(GFP_KERNEL);
err = -ENOMEM;
if (!req) {
if (for_background)
@@ -229,101 +153,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
return ERR_PTR(err);
}
-struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
-{
- return __fuse_get_req(fc, npages, false);
-}
-EXPORT_SYMBOL_GPL(fuse_get_req);
-
-struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
- unsigned npages)
-{
- return __fuse_get_req(fc, npages, true);
-}
-EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
-
-/*
- * Return request in fuse_file->reserved_req. However that may
- * currently be in use. If that is the case, wait for it to become
- * available.
- */
-static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
- struct file *file)
-{
- struct fuse_req *req = NULL;
- struct fuse_inode *fi = get_fuse_inode(file_inode(file));
- struct fuse_file *ff = file->private_data;
-
- do {
- wait_event(fc->reserved_req_waitq, ff->reserved_req);
- spin_lock(&fi->lock);
- if (ff->reserved_req) {
- req = ff->reserved_req;
- ff->reserved_req = NULL;
- req->stolen_file = get_file(file);
- }
- spin_unlock(&fi->lock);
- } while (!req);
-
- return req;
-}
-
-/*
- * Put stolen request back into fuse_file->reserved_req
- */
-static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
-{
- struct file *file = req->stolen_file;
- struct fuse_inode *fi = get_fuse_inode(file_inode(file));
- struct fuse_file *ff = file->private_data;
-
- WARN_ON(req->max_pages);
- spin_lock(&fi->lock);
- memset(req, 0, sizeof(*req));
- fuse_request_init(req, NULL, NULL, 0);
- BUG_ON(ff->reserved_req);
- ff->reserved_req = req;
- wake_up_all(&fc->reserved_req_waitq);
- spin_unlock(&fi->lock);
- fput(file);
-}
-
-/*
- * Gets a requests for a file operation, always succeeds
- *
- * This is used for sending the FLUSH request, which must get to
- * userspace, due to POSIX locks which may need to be unlocked.
- *
- * If allocation fails due to OOM, use the reserved request in
- * fuse_file.
- *
- * This is very unlikely to deadlock accidentally, since the
- * filesystem should not have it's own file open. If deadlock is
- * intentional, it can still be broken by "aborting" the filesystem.
- */
-struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
- struct file *file)
-{
- struct fuse_req *req;
-
- atomic_inc(&fc->num_waiting);
- wait_event(fc->blocked_waitq, fc->initialized);
- /* Matches smp_wmb() in fuse_set_initialized() */
- smp_rmb();
- req = fuse_request_alloc(0);
- if (!req)
- req = get_reserved_req(fc, file);
-
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
- req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
-
- __set_bit(FR_WAITING, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- return req;
-}
-
-void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (refcount_dec_and_test(&req->count)) {
if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -342,15 +172,11 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
fuse_drop_waiting(fc);
}
- if (req->stolen_file)
- put_reserved_req(fc, req);
- else
- fuse_request_free(req);
+ fuse_request_free(req);
}
}
-EXPORT_SYMBOL_GPL(fuse_put_request);
-static unsigned len_args(unsigned numargs, struct fuse_arg *args)
+unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
{
unsigned nbytes = 0;
unsigned i;
@@ -360,25 +186,47 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
return nbytes;
}
+EXPORT_SYMBOL_GPL(fuse_len_args);
-static u64 fuse_get_unique(struct fuse_iqueue *fiq)
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
-static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
+/**
+ * A new request is available, wake fiq->waitq
+ */
+static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
+__releases(fiq->lock)
+{
+ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ spin_unlock(&fiq->lock);
+}
+
+const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
+ .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
+ .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
+ .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+};
+EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
+
+static void queue_request_and_unlock(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+__releases(fiq->lock)
{
req->in.h.len = sizeof(struct fuse_in_header) +
- len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ fuse_len_args(req->args->in_numargs,
+ (struct fuse_arg *) req->args->in_args);
list_add_tail(&req->list, &fiq->pending);
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_pending_and_unlock(fiq);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -389,16 +237,15 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_forget_and_unlock(fiq);
} else {
kfree(forget);
+ spin_unlock(&fiq->lock);
}
- spin_unlock(&fiq->waitq.lock);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -412,10 +259,9 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
req->in.h.unique = fuse_get_unique(fiq);
- queue_request(fiq, req);
- spin_unlock(&fiq->waitq.lock);
+ queue_request_and_unlock(fiq, req);
}
}
@@ -427,9 +273,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
* the 'end' callback is called if given, else the reference to the
* request is released
*/
-static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
+ bool async = req->args->end;
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
@@ -439,9 +286,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
* smp_mb() from queue_interrupt().
*/
if (!list_empty(&req->intr_entry)) {
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
}
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
@@ -475,18 +322,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
}
- if (req->end)
- req->end(fc, req);
+ if (async)
+ req->args->end(fc, req->args, req->out.h.error);
put_request:
fuse_put_request(fc, req);
}
+EXPORT_SYMBOL_GPL(fuse_request_end);
static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return -EINVAL;
}
@@ -499,13 +347,13 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return 0;
}
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_interrupt_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
}
- spin_unlock(&fiq->waitq.lock);
return 0;
}
@@ -535,16 +383,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
if (!err)
return;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
/* Request is not yet in userspace, bail out */
if (test_bit(FR_PENDING, &req->flags)) {
list_del(&req->list);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
__fuse_put_request(req);
req->out.h.error = -EINTR;
return;
}
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
}
/*
@@ -559,101 +407,110 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_iqueue *fiq = &fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (!fiq->connected) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
} else {
req->in.h.unique = fuse_get_unique(fiq);
- queue_request(fiq, req);
/* acquire extra reference, since request is still needed
- after request_end() */
+ after fuse_request_end() */
__fuse_get_request(req);
- spin_unlock(&fiq->waitq.lock);
+ queue_request_and_unlock(fiq, req);
request_wait_answer(fc, req);
- /* Pairs with smp_wmb() in request_end() */
+ /* Pairs with smp_wmb() in fuse_request_end() */
smp_rmb();
}
}
-void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
-{
- __set_bit(FR_ISREPLY, &req->flags);
- if (!test_bit(FR_WAITING, &req->flags)) {
- __set_bit(FR_WAITING, &req->flags);
- atomic_inc(&fc->num_waiting);
- }
- __fuse_request_send(fc, req);
-}
-EXPORT_SYMBOL_GPL(fuse_request_send);
-
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
{
- if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
- args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+ if (fc->minor < 4 && args->opcode == FUSE_STATFS)
+ args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
if (fc->minor < 9) {
- switch (args->in.h.opcode) {
+ switch (args->opcode) {
case FUSE_LOOKUP:
case FUSE_CREATE:
case FUSE_MKNOD:
case FUSE_MKDIR:
case FUSE_SYMLINK:
case FUSE_LINK:
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
break;
case FUSE_GETATTR:
case FUSE_SETATTR:
- args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
break;
}
}
if (fc->minor < 12) {
- switch (args->in.h.opcode) {
+ switch (args->opcode) {
case FUSE_CREATE:
- args->in.args[0].size = sizeof(struct fuse_open_in);
+ args->in_args[0].size = sizeof(struct fuse_open_in);
break;
case FUSE_MKNOD:
- args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+ args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
break;
}
}
}
+static void fuse_force_creds(struct fuse_conn *fc, struct fuse_req *req)
+{
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
+}
+
+static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
+{
+ req->in.h.opcode = args->opcode;
+ req->in.h.nodeid = args->nodeid;
+ req->args = args;
+}
+
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
ssize_t ret;
- req = fuse_get_req(fc, 0);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (args->force) {
+ atomic_inc(&fc->num_waiting);
+ req = fuse_request_alloc(GFP_KERNEL | __GFP_NOFAIL);
+
+ if (!args->nocreds)
+ fuse_force_creds(fc, req);
+
+ __set_bit(FR_WAITING, &req->flags);
+ __set_bit(FR_FORCE, &req->flags);
+ } else {
+ WARN_ON(args->nocreds);
+ req = fuse_get_req(fc, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
/* Needs to be done after fuse_get_req() so that fc->minor is valid */
fuse_adjust_compat(fc, args);
+ fuse_args_to_req(req, args);
- req->in.h.opcode = args->in.h.opcode;
- req->in.h.nodeid = args->in.h.nodeid;
- req->in.numargs = args->in.numargs;
- memcpy(req->in.args, args->in.args,
- args->in.numargs * sizeof(struct fuse_in_arg));
- req->out.argvar = args->out.argvar;
- req->out.numargs = args->out.numargs;
- memcpy(req->out.args, args->out.args,
- args->out.numargs * sizeof(struct fuse_arg));
- fuse_request_send(fc, req);
+ if (!args->noreply)
+ __set_bit(FR_ISREPLY, &req->flags);
+ __fuse_request_send(fc, req);
ret = req->out.h.error;
- if (!ret && args->out.argvar) {
- BUG_ON(args->out.numargs != 1);
- ret = req->out.args[0].size;
+ if (!ret && args->out_argvar) {
+ BUG_ON(args->out_numargs == 0);
+ ret = args->out_args[args->out_numargs - 1].size;
}
fuse_put_request(fc, req);
return ret;
}
-bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
+static bool fuse_request_queue_background(struct fuse_conn *fc,
+ struct fuse_req *req)
{
bool queued = false;
@@ -681,56 +538,63 @@ bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
return queued;
}
-void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
+int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args,
+ gfp_t gfp_flags)
{
- WARN_ON(!req->end);
+ struct fuse_req *req;
+
+ if (args->force) {
+ WARN_ON(!args->nocreds);
+ req = fuse_request_alloc(gfp_flags);
+ if (!req)
+ return -ENOMEM;
+ __set_bit(FR_BACKGROUND, &req->flags);
+ } else {
+ WARN_ON(args->nocreds);
+ req = fuse_get_req(fc, true);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ fuse_args_to_req(req, args);
+
if (!fuse_request_queue_background(fc, req)) {
- req->out.h.error = -ENOTCONN;
- req->end(fc, req);
fuse_put_request(fc, req);
+ return -ENOTCONN;
}
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(fuse_request_send_background);
+EXPORT_SYMBOL_GPL(fuse_simple_background);
-static int fuse_request_send_notify_reply(struct fuse_conn *fc,
- struct fuse_req *req, u64 unique)
+static int fuse_simple_notify_reply(struct fuse_conn *fc,
+ struct fuse_args *args, u64 unique)
{
- int err = -ENODEV;
+ struct fuse_req *req;
struct fuse_iqueue *fiq = &fc->iq;
+ int err = 0;
+
+ req = fuse_get_req(fc, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
__clear_bit(FR_ISREPLY, &req->flags);
req->in.h.unique = unique;
- spin_lock(&fiq->waitq.lock);
+
+ fuse_args_to_req(req, args);
+
+ spin_lock(&fiq->lock);
if (fiq->connected) {
- queue_request(fiq, req);
- err = 0;
+ queue_request_and_unlock(fiq, req);
+ } else {
+ err = -ENODEV;
+ spin_unlock(&fiq->lock);
+ fuse_put_request(fc, req);
}
- spin_unlock(&fiq->waitq.lock);
return err;
}
-void fuse_force_forget(struct file *file, u64 nodeid)
-{
- struct inode *inode = file_inode(file);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- struct fuse_forget_in inarg;
-
- memset(&inarg, 0, sizeof(inarg));
- inarg.nlookup = 1;
- req = fuse_get_req_nofail_nopages(fc, file);
- req->in.h.opcode = FUSE_FORGET;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- __clear_bit(FR_ISREPLY, &req->flags);
- __fuse_request_send(fc, req);
- /* ignore errors */
- fuse_put_request(fc, req);
-}
-
/*
* Lock the request. Up to the next unlock_request() there mustn't be
* anything that could cause a page-fault. If the request was already
@@ -1084,14 +948,15 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
{
unsigned i;
struct fuse_req *req = cs->req;
+ struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
+
- for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
+ for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
int err;
- unsigned offset = req->page_descs[i].offset;
- unsigned count = min(nbytes, req->page_descs[i].length);
+ unsigned int offset = ap->descs[i].offset;
+ unsigned int count = min(nbytes, ap->descs[i].length);
- err = fuse_copy_page(cs, &req->pages[i], offset, count,
- zeroing);
+ err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
if (err)
return err;
@@ -1149,12 +1014,12 @@ static int request_pending(struct fuse_iqueue *fiq)
* Unlike other requests this is assembled on demand, without a need
* to allocate a separate fuse_req structure.
*
- * Called with fiq->waitq.lock held, releases it
+ * Called with fiq->lock held, releases it
*/
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
@@ -1169,7 +1034,7 @@ __releases(fiq->waitq.lock)
ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
arg.unique = req->in.h.unique;
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
if (nbytes < reqsize)
return -EINVAL;
@@ -1181,9 +1046,9 @@ __releases(fiq->waitq.lock)
return err ? err : reqsize;
}
-static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
- unsigned max,
- unsigned *countp)
+struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1202,14 +1067,15 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
+EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
int err;
- struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
+ struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
struct fuse_forget_in arg = {
.nlookup = forget->forget_one.nlookup,
};
@@ -1220,7 +1086,7 @@ __releases(fiq->waitq.lock)
.len = sizeof(ih) + sizeof(arg),
};
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
kfree(forget);
if (nbytes < ih.len)
return -EINVAL;
@@ -1238,7 +1104,7 @@ __releases(fiq->waitq.lock)
static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs, size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
int err;
unsigned max_forgets;
@@ -1252,13 +1118,13 @@ __releases(fiq->waitq.lock)
};
if (nbytes < ih.len) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return -EINVAL;
}
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
- head = dequeue_forget(fiq, max_forgets, &count);
- spin_unlock(&fiq->waitq.lock);
+ head = fuse_dequeue_forget(fiq, max_forgets, &count);
+ spin_unlock(&fiq->lock);
arg.count = count;
ih.len += count * sizeof(struct fuse_forget_one);
@@ -1288,7 +1154,7 @@ __releases(fiq->waitq.lock)
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
return fuse_read_single_forget(fiq, cs, nbytes);
@@ -1302,7 +1168,7 @@ __releases(fiq->waitq.lock)
* the pending list and copies request data to userspace buffer. If
* no reply is needed (FORGET) or request has been aborted or there
* was an error during the copying then it's finished by calling
- * request_end(). Otherwise add it to the processing list, and set
+ * fuse_request_end(). Otherwise add it to the processing list, and set
* the 'sent' flag.
*/
static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
@@ -1313,21 +1179,42 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_iqueue *fiq = &fc->iq;
struct fuse_pqueue *fpq = &fud->pq;
struct fuse_req *req;
- struct fuse_in *in;
+ struct fuse_args *args;
unsigned reqsize;
unsigned int hash;
+ /*
+ * Require sane minimum read buffer - that has capacity for fixed part
+ * of any request header + negotiated max_write room for data.
+ *
+ * Historically libfuse reserves 4K for fixed header room, but e.g.
+ * GlusterFS reserves only 80 bytes
+ *
+ * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
+ *
+ * which is the absolute minimum any sane filesystem should be using
+ * for header room.
+ */
+ if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in) +
+ fc->max_write))
+ return -EINVAL;
+
restart:
- spin_lock(&fiq->waitq.lock);
- err = -EAGAIN;
- if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
- !request_pending(fiq))
- goto err_unlock;
+ for (;;) {
+ spin_lock(&fiq->lock);
+ if (!fiq->connected || request_pending(fiq))
+ break;
+ spin_unlock(&fiq->lock);
- err = wait_event_interruptible_exclusive_locked(fiq->waitq,
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ err = wait_event_interruptible_exclusive(fiq->waitq,
!fiq->connected || request_pending(fiq));
- if (err)
- goto err_unlock;
+ if (err)
+ return err;
+ }
if (!fiq->connected) {
err = fc->aborted ? -ECONNABORTED : -ENODEV;
@@ -1351,28 +1238,28 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
req = list_entry(fiq->pending.next, struct fuse_req, list);
clear_bit(FR_PENDING, &req->flags);
list_del_init(&req->list);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
- in = &req->in;
- reqsize = in->h.len;
+ args = req->args;
+ reqsize = req->in.h.len;
/* If request is too large, reply with an error and restart the read */
if (nbytes < reqsize) {
req->out.h.error = -EIO;
/* SETXATTR is special, since it may contain too large data */
- if (in->h.opcode == FUSE_SETXATTR)
+ if (args->opcode == FUSE_SETXATTR)
req->out.h.error = -E2BIG;
- request_end(fc, req);
+ fuse_request_end(fc, req);
goto restart;
}
spin_lock(&fpq->lock);
list_add(&req->list, &fpq->io);
spin_unlock(&fpq->lock);
cs->req = req;
- err = fuse_copy_one(cs, &in->h, sizeof(in->h));
+ err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
if (!err)
- err = fuse_copy_args(cs, in->numargs, in->argpages,
- (struct fuse_arg *) in->args, 0);
+ err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
+ (struct fuse_arg *) args->in_args, 0);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
@@ -1405,11 +1292,11 @@ out_end:
if (!test_bit(FR_PRIVATE, &req->flags))
list_del_init(&req->list);
spin_unlock(&fpq->lock);
- request_end(fc, req);
+ fuse_request_end(fc, req);
return err;
err_unlock:
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return err;
}
@@ -1728,9 +1615,19 @@ out_finish:
return err;
}
-static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
+struct fuse_retrieve_args {
+ struct fuse_args_pages ap;
+ struct fuse_notify_retrieve_in inarg;
+};
+
+static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- release_pages(req->pages, req->num_pages);
+ struct fuse_retrieve_args *ra =
+ container_of(args, typeof(*ra), ap.args);
+
+ release_pages(ra->ap.pages, ra->ap.num_pages);
+ kfree(ra);
}
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
@@ -1738,13 +1635,16 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
{
int err;
struct address_space *mapping = inode->i_mapping;
- struct fuse_req *req;
pgoff_t index;
loff_t file_size;
unsigned int num;
unsigned int offset;
size_t total_len = 0;
unsigned int num_pages;
+ struct fuse_retrieve_args *ra;
+ size_t args_size = sizeof(*ra);
+ struct fuse_args_pages *ap;
+ struct fuse_args *args;
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
@@ -1758,19 +1658,26 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages = min(num_pages, fc->max_pages);
- req = fuse_get_req(fc, num_pages);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
- req->in.h.opcode = FUSE_NOTIFY_REPLY;
- req->in.h.nodeid = outarg->nodeid;
- req->in.numargs = 2;
- req->in.argpages = 1;
- req->end = fuse_retrieve_end;
+ ra = kzalloc(args_size, GFP_KERNEL);
+ if (!ra)
+ return -ENOMEM;
+
+ ap = &ra->ap;
+ ap->pages = (void *) (ra + 1);
+ ap->descs = (void *) (ap->pages + num_pages);
+
+ args = &ap->args;
+ args->nodeid = outarg->nodeid;
+ args->opcode = FUSE_NOTIFY_REPLY;
+ args->in_numargs = 2;
+ args->in_pages = true;
+ args->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
- while (num && req->num_pages < num_pages) {
+ while (num && ap->num_pages < num_pages) {
struct page *page;
unsigned int this_num;
@@ -1779,27 +1686,25 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
break;
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].offset = offset;
- req->page_descs[req->num_pages].length = this_num;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].offset = offset;
+ ap->descs[ap->num_pages].length = this_num;
+ ap->num_pages++;
offset = 0;
num -= this_num;
total_len += this_num;
index++;
}
- req->misc.retrieve_in.offset = outarg->offset;
- req->misc.retrieve_in.size = total_len;
- req->in.args[0].size = sizeof(req->misc.retrieve_in);
- req->in.args[0].value = &req->misc.retrieve_in;
- req->in.args[1].size = total_len;
+ ra->inarg.offset = outarg->offset;
+ ra->inarg.size = total_len;
+ args->in_args[0].size = sizeof(ra->inarg);
+ args->in_args[0].value = &ra->inarg;
+ args->in_args[1].size = total_len;
- err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
- if (err) {
- fuse_retrieve_end(fc, req);
- fuse_put_request(fc, req);
- }
+ err = fuse_simple_notify_reply(fc, args, outarg->notify_unique);
+ if (err)
+ fuse_retrieve_end(fc, args, err);
return err;
}
@@ -1885,27 +1790,25 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
return NULL;
}
-static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
+static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
unsigned nbytes)
{
unsigned reqsize = sizeof(struct fuse_out_header);
- if (out->h.error)
- return nbytes != reqsize ? -EINVAL : 0;
-
- reqsize += len_args(out->numargs, out->args);
+ reqsize += fuse_len_args(args->out_numargs, args->out_args);
- if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
+ if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
return -EINVAL;
else if (reqsize > nbytes) {
- struct fuse_arg *lastarg = &out->args[out->numargs-1];
+ struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
unsigned diffsize = reqsize - nbytes;
+
if (diffsize > lastarg->size)
return -EINVAL;
lastarg->size -= diffsize;
}
- return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
- out->page_zeroing);
+ return fuse_copy_args(cs, args->out_numargs, args->out_pages,
+ args->out_args, args->page_zeroing);
}
/*
@@ -1913,7 +1816,7 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
* the write buffer. The request is then searched on the processing
* list by the unique ID found in the header. If found, then remove
* it from the list and copy the rest of the buffer to the request.
- * The request is finished by calling request_end()
+ * The request is finished by calling fuse_request_end().
*/
static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_copy_state *cs, size_t nbytes)
@@ -1984,10 +1887,13 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
set_bit(FR_LOCKED, &req->flags);
spin_unlock(&fpq->lock);
cs->req = req;
- if (!req->out.page_replace)
+ if (!req->args->page_replace)
cs->move_pages = 0;
- err = copy_out_args(cs, &req->out, nbytes);
+ if (oh.error)
+ err = nbytes != sizeof(oh) ? -EINVAL : 0;
+ else
+ err = copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
@@ -2000,7 +1906,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
list_del_init(&req->list);
spin_unlock(&fpq->lock);
- request_end(fc, req);
+ fuse_request_end(fc, req);
out:
return err ? err : nbytes;
@@ -2121,12 +2027,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
fiq = &fud->fc->iq;
poll_wait(file, &fiq->waitq, wait);
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (!fiq->connected)
mask = EPOLLERR;
else if (request_pending(fiq))
mask |= EPOLLIN | EPOLLRDNORM;
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return mask;
}
@@ -2140,7 +2046,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
req->out.h.error = -ECONNABORTED;
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
- request_end(fc, req);
+ fuse_request_end(fc, req);
}
}
@@ -2221,15 +2127,15 @@ void fuse_abort_conn(struct fuse_conn *fc)
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
fiq->connected = 0;
list_for_each_entry(req, &fiq->pending, list)
clear_bit(FR_PENDING, &req->flags);
list_splice_tail_init(&fiq->pending, &to_end);
while (forget_pending(fiq))
- kfree(dequeue_forget(fiq, 1, NULL));
- wake_up_all_locked(&fiq->waitq);
- spin_unlock(&fiq->waitq.lock);
+ kfree(fuse_dequeue_forget(fiq, 1, NULL));
+ wake_up_all(&fiq->waitq);
+ spin_unlock(&fiq->lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
@@ -2296,7 +2202,7 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
if (new->private_data)
return -EINVAL;
- fud = fuse_dev_alloc(fc);
+ fud = fuse_dev_alloc_install(fc);
if (!fud)
return -ENOMEM;