diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-03-31 15:34:58 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-03-31 15:34:58 -0500 |
commit | 86579dd06deecfa6ac88d5e84e4d63c397cd6f6d (patch) | |
tree | b4475d3ccde53015ad84a06e4e55e64591171b75 /fs/lockd | |
parent | 7ea9ea832212c4a755650f7c7cc1ff0b63292a41 (diff) | |
parent | a0f067802576d4eb4c65d40b8ee7d6ea3c81dd61 (diff) | |
download | linux-86579dd06deecfa6ac88d5e84e4d63c397cd6f6d.tar.bz2 |
Merge branch 'master'
Diffstat (limited to 'fs/lockd')
-rw-r--r-- | fs/lockd/clntlock.c | 112 | ||||
-rw-r--r-- | fs/lockd/clntproc.c | 317 | ||||
-rw-r--r-- | fs/lockd/host.c | 31 | ||||
-rw-r--r-- | fs/lockd/mon.c | 17 | ||||
-rw-r--r-- | fs/lockd/svc.c | 19 | ||||
-rw-r--r-- | fs/lockd/svc4proc.c | 157 | ||||
-rw-r--r-- | fs/lockd/svclock.c | 349 | ||||
-rw-r--r-- | fs/lockd/svcproc.c | 151 | ||||
-rw-r--r-- | fs/lockd/svcshare.c | 4 | ||||
-rw-r--r-- | fs/lockd/svcsubs.c | 24 | ||||
-rw-r--r-- | fs/lockd/xdr.c | 19 | ||||
-rw-r--r-- | fs/lockd/xdr4.c | 21 |
12 files changed, 538 insertions, 683 deletions
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index da6354baa0b8..bce744468708 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -44,32 +44,25 @@ static LIST_HEAD(nlm_blocked); /* * Queue up a lock for blocking so that the GRANTED request can see it */ -int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl) +struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) { struct nlm_wait *block; - BUG_ON(req->a_block != NULL); block = kmalloc(sizeof(*block), GFP_KERNEL); - if (block == NULL) - return -ENOMEM; - block->b_host = host; - block->b_lock = fl; - init_waitqueue_head(&block->b_wait); - block->b_status = NLM_LCK_BLOCKED; - - list_add(&block->b_list, &nlm_blocked); - req->a_block = block; - - return 0; + if (block != NULL) { + block->b_host = host; + block->b_lock = fl; + init_waitqueue_head(&block->b_wait); + block->b_status = NLM_LCK_BLOCKED; + list_add(&block->b_list, &nlm_blocked); + } + return block; } -void nlmclnt_finish_block(struct nlm_rqst *req) +void nlmclnt_finish_block(struct nlm_wait *block) { - struct nlm_wait *block = req->a_block; - if (block == NULL) return; - req->a_block = NULL; list_del(&block->b_list); kfree(block); } @@ -77,15 +70,14 @@ void nlmclnt_finish_block(struct nlm_rqst *req) /* * Block on a lock */ -long nlmclnt_block(struct nlm_rqst *req, long timeout) +int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) { - struct nlm_wait *block = req->a_block; long ret; /* A borken server might ask us to block even if we didn't * request it. Just say no! */ - if (!req->a_args.block) + if (block == NULL) return -EAGAIN; /* Go to sleep waiting for GRANT callback. Some servers seem @@ -99,13 +91,10 @@ long nlmclnt_block(struct nlm_rqst *req, long timeout) ret = wait_event_interruptible_timeout(block->b_wait, block->b_status != NLM_LCK_BLOCKED, timeout); - - if (block->b_status != NLM_LCK_BLOCKED) { - req->a_res.status = block->b_status; - block->b_status = NLM_LCK_BLOCKED; - } - - return ret; + if (ret < 0) + return -ERESTARTSYS; + req->a_res.status = block->b_status; + return 0; } /* @@ -125,7 +114,15 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) list_for_each_entry(block, &nlm_blocked, b_list) { struct file_lock *fl_blocked = block->b_lock; - if (!nlm_compare_locks(fl_blocked, fl)) + if (fl_blocked->fl_start != fl->fl_start) + continue; + if (fl_blocked->fl_end != fl->fl_end) + continue; + /* + * Careful! The NLM server will return the 32-bit "pid" that + * we put on the wire: in this case the lockowner "pid". + */ + if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) continue; if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) continue; @@ -147,34 +144,6 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) */ /* - * Mark the locks for reclaiming. - * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. - * Maintain NLM lock reclaiming lists in the nlm_host instead. - */ -static -void nlmclnt_mark_reclaim(struct nlm_host *host) -{ - struct file_lock *fl; - struct inode *inode; - struct list_head *tmp; - - list_for_each(tmp, &file_lock_list) { - fl = list_entry(tmp, struct file_lock, fl_link); - - inode = fl->fl_file->f_dentry->d_inode; - if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) - continue; - if (fl->fl_u.nfs_fl.owner == NULL) - continue; - if (fl->fl_u.nfs_fl.owner->host != host) - continue; - if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) - continue; - fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; - } -} - -/* * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, * that we mark locks for reclaiming, and that we bump the pseudo NSM state. */ @@ -186,7 +155,12 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) host->h_state++; host->h_nextrebind = 0; nlm_rebind_host(host); - nlmclnt_mark_reclaim(host); + + /* + * Mark the locks for reclaiming. + */ + list_splice_init(&host->h_granted, &host->h_reclaim); + dprintk("NLM: reclaiming locks for host %s", host->h_name); } @@ -215,9 +189,7 @@ reclaimer(void *ptr) { struct nlm_host *host = (struct nlm_host *) ptr; struct nlm_wait *block; - struct list_head *tmp; - struct file_lock *fl; - struct inode *inode; + struct file_lock *fl, *next; daemonize("%s-reclaim", host->h_name); allow_signal(SIGKILL); @@ -229,23 +201,13 @@ reclaimer(void *ptr) /* First, reclaim all locks that have been marked. */ restart: - list_for_each(tmp, &file_lock_list) { - fl = list_entry(tmp, struct file_lock, fl_link); + list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { + list_del_init(&fl->fl_u.nfs_fl.list); - inode = fl->fl_file->f_dentry->d_inode; - if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) - continue; - if (fl->fl_u.nfs_fl.owner == NULL) - continue; - if (fl->fl_u.nfs_fl.owner->host != host) - continue; - if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) - continue; - - fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; - nlmclnt_reclaim(host, fl); if (signalled()) - break; + continue; + if (nlmclnt_reclaim(host, fl) == 0) + list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); goto restart; } diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 970b6a6aa337..f96e38155b5c 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -132,59 +132,18 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh)); lock->caller = system_utsname.nodename; lock->oh.data = req->a_owner; - lock->oh.len = sprintf(req->a_owner, "%d@%s", - current->pid, system_utsname.nodename); - locks_copy_lock(&lock->fl, fl); + lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", + (unsigned int)fl->fl_u.nfs_fl.owner->pid, + system_utsname.nodename); + lock->svid = fl->fl_u.nfs_fl.owner->pid; + lock->fl.fl_start = fl->fl_start; + lock->fl.fl_end = fl->fl_end; + lock->fl.fl_type = fl->fl_type; } static void nlmclnt_release_lockargs(struct nlm_rqst *req) { - struct file_lock *fl = &req->a_args.lock.fl; - - if (fl->fl_ops && fl->fl_ops->fl_release_private) - fl->fl_ops->fl_release_private(fl); -} - -/* - * Initialize arguments for GRANTED call. The nlm_rqst structure - * has been cleared already. - */ -int -nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) -{ - locks_copy_lock(&call->a_args.lock.fl, &lock->fl); - memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); - call->a_args.lock.caller = system_utsname.nodename; - call->a_args.lock.oh.len = lock->oh.len; - - /* set default data area */ - call->a_args.lock.oh.data = call->a_owner; - - if (lock->oh.len > NLMCLNT_OHSIZE) { - void *data = kmalloc(lock->oh.len, GFP_KERNEL); - if (!data) { - nlmclnt_freegrantargs(call); - return 0; - } - call->a_args.lock.oh.data = (u8 *) data; - } - - memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); - return 1; -} - -void -nlmclnt_freegrantargs(struct nlm_rqst *call) -{ - struct file_lock *fl = &call->a_args.lock.fl; - /* - * Check whether we allocated memory for the owner. - */ - if (call->a_args.lock.oh.data != (u8 *) call->a_owner) { - kfree(call->a_args.lock.oh.data); - } - if (fl->fl_ops && fl->fl_ops->fl_release_private) - fl->fl_ops->fl_release_private(fl); + BUG_ON(req->a_args.lock.fl.fl_ops != NULL); } /* @@ -193,9 +152,8 @@ nlmclnt_freegrantargs(struct nlm_rqst *call) int nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) { - struct nfs_server *nfssrv = NFS_SERVER(inode); struct nlm_host *host; - struct nlm_rqst reqst, *call = &reqst; + struct nlm_rqst *call; sigset_t oldset; unsigned long flags; int status, proto, vers; @@ -209,23 +167,17 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) /* Retrieve transport protocol from NFS client */ proto = NFS_CLIENT(inode)->cl_xprt->prot; - if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers))) + host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers); + if (host == NULL) return -ENOLCK; - /* Create RPC client handle if not there, and copy soft - * and intr flags from NFS client. */ - if (host->h_rpcclnt == NULL) { - struct rpc_clnt *clnt; + call = nlm_alloc_call(host); + if (call == NULL) + return -ENOMEM; - /* Bind an rpc client to this host handle (does not - * perform a portmapper lookup) */ - if (!(clnt = nlm_bind_host(host))) { - status = -ENOLCK; - goto done; - } - clnt->cl_softrtry = nfssrv->client->cl_softrtry; - clnt->cl_intr = nfssrv->client->cl_intr; - } + nlmclnt_locks_init_private(fl, host); + /* Set up the argument struct */ + nlmclnt_setlockargs(call, fl); /* Keep the old signal mask */ spin_lock_irqsave(¤t->sighand->siglock, flags); @@ -238,26 +190,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) && (current->flags & PF_EXITING)) { sigfillset(¤t->blocked); /* Mask all signals */ recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - call = nlmclnt_alloc_call(); - if (!call) { - status = -ENOMEM; - goto out_restore; - } call->a_flags = RPC_TASK_ASYNC; - } else { - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - memset(call, 0, sizeof(*call)); - locks_init_lock(&call->a_args.lock.fl); - locks_init_lock(&call->a_res.lock.fl); } - call->a_host = host; - - nlmclnt_locks_init_private(fl, host); - - /* Set up the argument struct */ - nlmclnt_setlockargs(call, fl); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { @@ -270,41 +206,58 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) else status = -EINVAL; - out_restore: + fl->fl_ops->fl_release_private(fl); + fl->fl_ops = NULL; + spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); -done: dprintk("lockd: clnt proc returns %d\n", status); - nlm_release_host(host); return status; } EXPORT_SYMBOL(nlmclnt_proc); /* * Allocate an NLM RPC call struct + * + * Note: the caller must hold a reference to host. In case of failure, + * this reference will be released. */ -struct nlm_rqst * -nlmclnt_alloc_call(void) +struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) { struct nlm_rqst *call; - while (!signalled()) { - call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL); - if (call) { - memset(call, 0, sizeof(*call)); + for(;;) { + call = kzalloc(sizeof(*call), GFP_KERNEL); + if (call != NULL) { locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); + call->a_host = host; return call; } - printk("nlmclnt_alloc_call: failed, waiting for memory\n"); + if (signalled()) + break; + printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } + nlm_release_host(host); return NULL; } +void nlm_release_call(struct nlm_rqst *call) +{ + nlm_release_host(call->a_host); + nlmclnt_release_lockargs(call); + kfree(call); +} + +static void nlmclnt_rpc_release(void *data) +{ + return nlm_release_call(data); +} + static int nlm_wait_on_grace(wait_queue_head_t *queue) { DEFINE_WAIT(wait); @@ -401,57 +354,45 @@ in_grace_period: /* * Generic NLM call, async version. */ -int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) +static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; - struct rpc_message msg = { - .rpc_argp = &req->a_args, - .rpc_resp = &req->a_res, - }; - int status; + int status = -ENOLCK; dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); /* If we have no RPC client yet, create one. */ - if ((clnt = nlm_bind_host(host)) == NULL) - return -ENOLCK; - msg.rpc_proc = &clnt->cl_procinfo[proc]; + clnt = nlm_bind_host(host); + if (clnt == NULL) + goto out_err; + msg->rpc_proc = &clnt->cl_procinfo[proc]; /* bootstrap and kick off the async RPC call */ - status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req); - + status = rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); + if (status == 0) + return 0; +out_err: + nlm_release_call(req); return status; } -static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) +int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { - struct nlm_host *host = req->a_host; - struct rpc_clnt *clnt; - struct nlm_args *argp = &req->a_args; - struct nlm_res *resp = &req->a_res; struct rpc_message msg = { - .rpc_argp = argp, - .rpc_resp = resp, + .rpc_argp = &req->a_args, + .rpc_resp = &req->a_res, }; - int status; - - dprintk("lockd: call procedure %d on %s (async)\n", - (int)proc, host->h_name); - - /* If we have no RPC client yet, create one. */ - if ((clnt = nlm_bind_host(host)) == NULL) - return -ENOLCK; - msg.rpc_proc = &clnt->cl_procinfo[proc]; + return __nlm_async_call(req, proc, &msg, tk_ops); +} - /* Increment host refcount */ - nlm_get_host(host); - /* bootstrap and kick off the async RPC call */ - status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req); - if (status < 0) - nlm_release_host(host); - return status; +int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) +{ + struct rpc_message msg = { + .rpc_argp = &req->a_res, + }; + return __nlm_async_call(req, proc, &msg, tk_ops); } /* @@ -463,36 +404,41 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) int status; status = nlmclnt_call(req, NLMPROC_TEST); - nlmclnt_release_lockargs(req); if (status < 0) - return status; + goto out; - status = req->a_res.status; - if (status == NLM_LCK_GRANTED) { - fl->fl_type = F_UNLCK; - } if (status == NLM_LCK_DENIED) { - /* - * Report the conflicting lock back to the application. - */ - locks_copy_lock(fl, &req->a_res.lock.fl); - fl->fl_pid = 0; - } else { - return nlm_stat_to_errno(req->a_res.status); + switch (req->a_res.status) { + case NLM_LCK_GRANTED: + fl->fl_type = F_UNLCK; + break; + case NLM_LCK_DENIED: + /* + * Report the conflicting lock back to the application. + */ + fl->fl_start = req->a_res.lock.fl.fl_start; + fl->fl_end = req->a_res.lock.fl.fl_start; + fl->fl_type = req->a_res.lock.fl.fl_type; + fl->fl_pid = 0; + break; + default: + status = nlm_stat_to_errno(req->a_res.status); } - - return 0; +out: + nlm_release_call(req); + return status; } static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) { - memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl)); - nlm_get_lockowner(new->fl_u.nfs_fl.owner); + new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; + new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); + list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); } static void nlmclnt_locks_release_private(struct file_lock *fl) { + list_del(&fl->fl_u.nfs_fl.list); nlm_put_lockowner(fl->fl_u.nfs_fl.owner); - fl->fl_ops = NULL; } static struct file_lock_operations nlmclnt_lock_ops = { @@ -504,8 +450,8 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho { BUG_ON(fl->fl_ops != NULL); fl->fl_u.nfs_fl.state = 0; - fl->fl_u.nfs_fl.flags = 0; fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); + INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); fl->fl_ops = &nlmclnt_lock_ops; } @@ -552,57 +498,52 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; - long timeout; - int status; + struct nlm_wait *block = NULL; + int status = -ENOLCK; if (!host->h_monitored && nsm_monitor(host) < 0) { printk(KERN_NOTICE "lockd: failed to monitor %s\n", host->h_name); - status = -ENOLCK; goto out; } - if (req->a_args.block) { - status = nlmclnt_prepare_block(req, host, fl); - if (status < 0) - goto out; - } + block = nlmclnt_prepare_block(host, fl); for(;;) { status = nlmclnt_call(req, NLMPROC_LOCK); if (status < 0) goto out_unblock; - if (resp->status != NLM_LCK_BLOCKED) + if (!req->a_args.block) break; - /* Wait on an NLM blocking lock */ - timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT); /* Did a reclaimer thread notify us of a server reboot? */ if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) continue; if (resp->status != NLM_LCK_BLOCKED) break; - if (timeout >= 0) - continue; - /* We were interrupted. Send a CANCEL request to the server + /* Wait on an NLM blocking lock */ + status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); + /* if we were interrupted. Send a CANCEL request to the server * and exit */ - status = (int)timeout; - goto out_unblock; + if (status < 0) + goto out_unblock; + if (resp->status != NLM_LCK_BLOCKED) + break; } if (resp->status == NLM_LCK_GRANTED) { fl->fl_u.nfs_fl.state = host->h_state; - fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED; fl->fl_flags |= FL_SLEEP; + /* Ensure the resulting lock will get added to granted list */ do_vfs_lock(fl); } status = nlm_stat_to_errno(resp->status); out_unblock: - nlmclnt_finish_block(req); + nlmclnt_finish_block(block); /* Cancel the blocked request if it is still pending */ if (resp->status == NLM_LCK_BLOCKED) nlmclnt_cancel(host, req->a_args.block, fl); out: - nlmclnt_release_lockargs(req); + nlm_release_call(req); return status; } @@ -658,10 +599,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) struct nlm_res *resp = &req->a_res; int status; - /* Clean the GRANTED flag now so the lock doesn't get - * reclaimed while we're stuck in the unlock call. */ - fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; - /* * Note: the server is supposed to either grant us the unlock * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either @@ -669,32 +606,24 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) */ do_vfs_lock(fl); - if (req->a_flags & RPC_TASK_ASYNC) { - status = nlmclnt_async_call(req, NLMPROC_UNLOCK, - &nlmclnt_unlock_ops); - /* Hrmf... Do the unlock early since locks_remove_posix() - * really expects us to free the lock synchronously */ - if (status < 0) { - nlmclnt_release_lockargs(req); - kfree(req); - } - return status; - } + if (req->a_flags & RPC_TASK_ASYNC) + return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); status = nlmclnt_call(req, NLMPROC_UNLOCK); - nlmclnt_release_lockargs(req); if (status < 0) - return status; + goto out; + status = 0; if (resp->status == NLM_LCK_GRANTED) - return 0; + goto out; if (resp->status != NLM_LCK_DENIED_NOLOCKS) printk("lockd: unexpected unlock status: %d\n", resp->status); - /* What to do now? I'm out of my depth... */ - - return -ENOLCK; + status = -ENOLCK; +out: + nlm_release_call(req); + return status; } static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) @@ -716,9 +645,6 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) if (status != NLM_LCK_GRANTED) printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); die: - nlm_release_host(req->a_host); - nlmclnt_release_lockargs(req); - kfree(req); return; retry_rebind: nlm_rebind_host(req->a_host); @@ -728,6 +654,7 @@ die: static const struct rpc_call_ops nlmclnt_unlock_ops = { .rpc_call_done = nlmclnt_unlock_callback, + .rpc_release = nlmclnt_rpc_release, }; /* @@ -749,20 +676,15 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); - req = nlmclnt_alloc_call(); + req = nlm_alloc_call(nlm_get_host(host)); if (!req) return -ENOMEM; - req->a_host = host; req->a_flags = RPC_TASK_ASYNC; nlmclnt_setlockargs(req, fl); req->a_args.block = block; - status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); - if (status < 0) { - nlmclnt_release_lockargs(req); - kfree(req); - } + status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; @@ -791,6 +713,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) switch (req->a_res.status) { case NLM_LCK_GRANTED: case NLM_LCK_DENIED_GRACE_PERIOD: + case NLM_LCK_DENIED: /* Everything's good */ break; case NLM_LCK_DENIED_NOLOCKS: @@ -802,9 +725,6 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) } die: - nlm_release_host(req->a_host); - nlmclnt_release_lockargs(req); - kfree(req); return; retry_cancel: @@ -818,6 +738,7 @@ retry_cancel: static const struct rpc_call_ops nlmclnt_cancel_ops = { .rpc_call_done = nlmclnt_cancel_callback, + .rpc_release = nlmclnt_rpc_release, }; /* diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 82f7a0b1d8ae..729ac427d359 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -16,6 +16,7 @@ #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <linux/lockd/sm_inter.h> +#include <linux/mutex.h> #define NLMDBG_FACILITY NLMDBG_HOSTCACHE @@ -30,7 +31,7 @@ static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; static unsigned long next_gc; static int nrhosts; -static DECLARE_MUTEX(nlm_host_sema); +static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(void); @@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, hash = NLM_ADDRHASH(sin->sin_addr.s_addr); /* Lock hash table */ - down(&nlm_host_sema); + mutex_lock(&nlm_host_mutex); if (time_after_eq(jiffies, next_gc)) nlm_gc_hosts(); @@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, nlm_hosts[hash] = host; } nlm_get_host(host); - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return host; } } @@ -123,12 +124,14 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, nlm_hosts[hash] = host; INIT_LIST_HEAD(&host->h_lockowners); spin_lock_init(&host->h_lock); + INIT_LIST_HEAD(&host->h_granted); + INIT_LIST_HEAD(&host->h_reclaim); if (++nrhosts > NLM_HOST_MAX) next_gc = 0; nohost: - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return host; } @@ -139,19 +142,19 @@ nlm_find_client(void) * and return it */ int hash; - down(&nlm_host_sema); + mutex_lock(&nlm_host_mutex); for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { struct nlm_host *host, **hp; for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { if (host->h_server && host->h_killed == 0) { nlm_get_host(host); - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return host; } } } - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return NULL; } @@ -191,11 +194,12 @@ nlm_bind_host(struct nlm_host *host) xprt->resvport = 1; /* NLM requires a reserved port */ /* Existing NLM servers accept AUTH_UNIX only */ - clnt = rpc_create_client(xprt, host->h_name, &nlm_program, + clnt = rpc_new_client(xprt, host->h_name, &nlm_program, host->h_version, RPC_AUTH_UNIX); if (IS_ERR(clnt)) goto forgetit; clnt->cl_autobind = 1; /* turn on pmap queries */ + clnt->cl_softrtry = 1; /* All queries are soft */ host->h_rpcclnt = clnt; } @@ -242,8 +246,12 @@ void nlm_release_host(struct nlm_host *host) { if (host != NULL) { dprintk("lockd: release host %s\n", host->h_name); - atomic_dec(&host->h_count); BUG_ON(atomic_read(&host->h_count) < 0); + if (atomic_dec_and_test(&host->h_count)) { + BUG_ON(!list_empty(&host->h_lockowners)); + BUG_ON(!list_empty(&host->h_granted)); + BUG_ON(!list_empty(&host->h_reclaim)); + } } } @@ -258,7 +266,7 @@ nlm_shutdown_hosts(void) int i; dprintk("lockd: shutting down host module\n"); - down(&nlm_host_sema); + mutex_lock(&nlm_host_mutex); /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts...\n"); @@ -269,7 +277,7 @@ nlm_shutdown_hosts(void) /* Then, perform a garbage collection pass */ nlm_gc_hosts(); - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); /* complain if any hosts are left */ if (nrhosts) { @@ -331,7 +339,6 @@ nlm_gc_hosts(void) rpc_destroy_client(host->h_rpcclnt); } } - BUG_ON(!list_empty(&host->h_lockowners)); kfree(host); nrhosts--; } diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 0edc03e67966..3fc683f46b3e 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -35,6 +35,10 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res) struct rpc_clnt *clnt; int status; struct nsm_args args; + struct rpc_message msg = { + .rpc_argp = &args, + .rpc_resp = res, + }; clnt = nsm_create(); if (IS_ERR(clnt)) { @@ -49,7 +53,8 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res) args.proc = NLMPROC_NSM_NOTIFY; memset(res, 0, sizeof(*res)); - status = rpc_call(clnt, proc, &args, res, 0); + msg.rpc_proc = &clnt->cl_procinfo[proc]; + status = rpc_call_sync(clnt, &msg, 0); if (status < 0) printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n", status); @@ -214,18 +219,22 @@ static struct rpc_procinfo nsm_procedures[] = { .p_encode = (kxdrproc_t) xdr_encode_mon, .p_decode = (kxdrproc_t) xdr_decode_stat_res, .p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2, + .p_statidx = SM_MON, + .p_name = "MONITOR", }, [SM_UNMON] = { .p_proc = SM_UNMON, .p_encode = (kxdrproc_t) xdr_encode_unmon, .p_decode = (kxdrproc_t) xdr_decode_stat, .p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, + .p_statidx = SM_UNMON, + .p_name = "UNMONITOR", }, }; static struct rpc_version nsm_version1 = { - .number = 1, - .nrprocs = sizeof(nsm_procedures)/sizeof(nsm_procedures[0]), + .number = 1, + .nrprocs = ARRAY_SIZE(nsm_procedures), .procs = nsm_procedures }; @@ -238,7 +247,7 @@ static struct rpc_stat nsm_stats; static struct rpc_program nsm_program = { .name = "statd", .number = SM_PROGRAM, - .nrvers = sizeof(nsm_version)/sizeof(nsm_version[0]), + .nrvers = ARRAY_SIZE(nsm_version), .version = nsm_version, .stats = &nsm_stats }; diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 71a30b416d1a..fd56c8872f34 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/smp.h> #include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/stats.h> @@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program; struct nlmsvc_binding * nlmsvc_ops; EXPORT_SYMBOL(nlmsvc_ops); -static DECLARE_MUTEX(nlmsvc_sema); +static DEFINE_MUTEX(nlmsvc_mutex); static unsigned int nlmsvc_users; static pid_t nlmsvc_pid; int nlmsvc_grace_period; unsigned long nlmsvc_timeout; -static DECLARE_MUTEX_LOCKED(lockd_start); +static DECLARE_COMPLETION(lockd_start_done); static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); /* @@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp) * Let our maker know we're running. */ nlmsvc_pid = current->pid; - up(&lockd_start); + complete(&lockd_start_done); daemonize("lockd"); @@ -215,7 +216,7 @@ lockd_up(void) struct svc_serv * serv; int error = 0; - down(&nlmsvc_sema); + mutex_lock(&nlmsvc_mutex); /* * Unconditionally increment the user count ... this is * the number of clients who _want_ a lockd process. @@ -263,7 +264,7 @@ lockd_up(void) "lockd_up: create thread failed, error=%d\n", error); goto destroy_and_out; } - down(&lockd_start); + wait_for_completion(&lockd_start_done); /* * Note: svc_serv structures have an initial use count of 1, @@ -272,7 +273,7 @@ lockd_up(void) destroy_and_out: svc_destroy(serv); out: - up(&nlmsvc_sema); + mutex_unlock(&nlmsvc_mutex); return error; } EXPORT_SYMBOL(lockd_up); @@ -285,7 +286,7 @@ lockd_down(void) { static int warned; - down(&nlmsvc_sema); + mutex_lock(&nlmsvc_mutex); if (nlmsvc_users) { if (--nlmsvc_users) goto out; @@ -315,7 +316,7 @@ lockd_down(void) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); out: - up(&nlmsvc_sema); + mutex_unlock(&nlmsvc_mutex); } EXPORT_SYMBOL(lockd_down); @@ -509,7 +510,7 @@ static struct svc_version * nlmsvc_version[] = { static struct svc_stat nlmsvc_stats; -#define NLM_NRVERS (sizeof(nlmsvc_version)/sizeof(nlmsvc_version[0])) +#define NLM_NRVERS ARRAY_SIZE(nlmsvc_version) static struct svc_program nlmsvc_program = { .pg_prog = NLM_PROGRAM, /* program number */ .pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */ diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index b10f913aa06a..a2dd9ccb9b32 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -21,10 +21,6 @@ #define NLMDBG_FACILITY NLMDBG_CLIENT -static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *); - -static const struct rpc_call_ops nlm4svc_callback_ops; - /* * Obtain client and file from arguments */ @@ -234,83 +230,89 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, } /* + * This is the generic lockd callback for async RPC calls + */ +static void nlm4svc_callback_exit(struct rpc_task *task, void *data) +{ + dprintk("lockd: %4d callback returned %d\n", task->tk_pid, + -task->tk_status); +} + +static void nlm4svc_callback_release(void *data) +{ + nlm_release_call(data); +} + +static const struct rpc_call_ops nlm4svc_callback_ops = { + .rpc_call_done = nlm4svc_callback_exit, + .rpc_release = nlm4svc_callback_release, +}; + +/* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ -static int -nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, - void *resp) +static int nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, + int (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) { - struct nlm_res res; - u32 stat; + struct nlm_host *host; + struct nlm_rqst *call; + int stat; - dprintk("lockd: TEST_MSG called\n"); - memset(&res, 0, sizeof(res)); + host = nlmsvc_lookup_host(rqstp); + if (host == NULL) + return rpc_system_err; + + call = nlm_alloc_call(host); + if (call == NULL) + return rpc_system_err; - if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0) - stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res); - return stat; + stat = func(rqstp, argp, &call->a_res); + if (stat != 0) { + nlm_release_call(call); + return stat; + } + + call->a_flags = RPC_TASK_ASYNC; + if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0) + return rpc_system_err; + return rpc_success; } -static int -nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, +static int nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; + dprintk("lockd: TEST_MSG called\n"); + return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test); +} +static int nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, + void *resp) +{ dprintk("lockd: LOCK_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0) - stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res); - return stat; + return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock); } -static int -nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, +static int nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; - dprintk("lockd: CANCEL_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0) - stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res); - return stat; + return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel); } -static int -nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, +static int nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; - dprintk("lockd: UNLOCK_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0) - stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res); - return stat; + return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock); } -static int -nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, +static int nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; - dprintk("lockd: GRANTED_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0) - stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res); - return stat; + return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlm4svc_proc_granted); } /* @@ -472,55 +474,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, /* - * This is the generic lockd callback for async RPC calls - */ -static u32 -nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp) -{ - struct nlm_host *host; - struct nlm_rqst *call; - - if (!(call = nlmclnt_alloc_call())) - return rpc_system_err; - - host = nlmclnt_lookup_host(&rqstp->rq_addr, - rqstp->rq_prot, rqstp->rq_vers); - if (!host) { - kfree(call); - return rpc_system_err; - } - - call->a_flags = RPC_TASK_ASYNC; - call->a_host = host; - memcpy(&call->a_args, resp, sizeof(*resp)); - - if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0) - goto error; - - return rpc_success; - error: - kfree(call); - nlm_release_host(host); - return rpc_system_err; -} - -static void nlm4svc_callback_exit(struct rpc_task *task, void *data) -{ - struct nlm_rqst *call = data; - - if (task->tk_status < 0) { - dprintk("lockd: %4d callback failed (errno = %d)\n", - task->tk_pid, -task->tk_status); - } - nlm_release_host(call->a_host); - kfree(call); -} - -static const struct rpc_call_ops nlm4svc_callback_ops = { - .rpc_call_done = nlm4svc_callback_exit, -}; - -/* * NLM Server procedures. */ diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 9cfced65d4a2..d2b66bad7d50 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -39,9 +39,12 @@ #define nlm_deadlock nlm_lck_denied #endif +static void nlmsvc_release_block(struct nlm_block *block); static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); static int nlmsvc_remove_block(struct nlm_block *block); +static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); +static void nlmsvc_freegrantargs(struct nlm_rqst *call); static const struct rpc_call_ops nlmsvc_grant_ops; /* @@ -58,6 +61,7 @@ nlmsvc_insert_block(struct nlm_block *block, unsigned long when) struct nlm_block **bp, *b; dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); + kref_get(&block->b_count); if (block->b_queued) nlmsvc_remove_block(block); bp = &nlm_blocked; @@ -90,6 +94,7 @@ nlmsvc_remove_block(struct nlm_block *block) if (b == block) { *bp = block->b_next; block->b_queued = 0; + nlmsvc_release_block(block); return 1; } } @@ -98,11 +103,10 @@ nlmsvc_remove_block(struct nlm_block *block) } /* - * Find a block for a given lock and optionally remove it from - * the list. + * Find a block for a given lock */ static struct nlm_block * -nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove) +nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block **head, *block; struct file_lock *fl; @@ -112,17 +116,14 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove) (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, lock->fl.fl_type); for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) { - fl = &block->b_call.a_args.lock.fl; + fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", block->b_file, fl->fl_pid, (long long)fl->fl_start, (long long)fl->fl_end, fl->fl_type, - nlmdbg_cookie2a(&block->b_call.a_args.cookie)); + nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { - if (remove) { - *head = block->b_next; - block->b_queued = 0; - } + kref_get(&block->b_count); return block; } } @@ -150,11 +151,13 @@ nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin) for (block = nlm_blocked; block; block = block->b_next) { dprintk("cookie: head of blocked queue %p, block %p\n", nlm_blocked, block); - if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie) + if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie) && nlm_cmp_addr(sin, &block->b_host->h_addr)) break; } + if (block != NULL) + kref_get(&block->b_count); return block; } @@ -174,27 +177,30 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, { struct nlm_block *block; struct nlm_host *host; - struct nlm_rqst *call; + struct nlm_rqst *call = NULL; /* Create host handle for callback */ - host = nlmclnt_lookup_host(&rqstp->rq_addr, - rqstp->rq_prot, rqstp->rq_vers); + host = nlmsvc_lookup_host(rqstp); if (host == NULL) return NULL; + call = nlm_alloc_call(host); + if (call == NULL) + return NULL; + /* Allocate memory for block, and initialize arguments */ - if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL))) + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (block == NULL) goto failed; - memset(block, 0, sizeof(*block)); - locks_init_lock(&block->b_call.a_args.lock.fl); - locks_init_lock(&block->b_call.a_res.lock.fl); + kref_init(&block->b_count); - if (!nlmclnt_setgrantargs(&block->b_call, lock)) + if (!nlmsvc_setgrantargs(call, lock)) goto failed_free; /* Set notifier function for VFS, and init args */ - block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; - block->b_call.a_args.cookie = *cookie; /* see above */ + call->a_args.lock.fl.fl_flags |= FL_SLEEP; + call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; + call->a_args.cookie = *cookie; /* see above */ dprintk("lockd: created block %p...\n", block); @@ -202,22 +208,23 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, block->b_daemon = rqstp->rq_server; block->b_host = host; block->b_file = file; + file->f_count++; /* Add to file's list of blocks */ block->b_fnext = file->f_blocks; file->f_blocks = block; /* Set up RPC arguments for callback */ - call = &block->b_call; - call->a_host = host; + block->b_call = call; call->a_flags = RPC_TASK_ASYNC; + call->a_block = block; return block; failed_free: kfree(block); failed: - nlm_release_host(host); + nlm_release_call(call); return NULL; } @@ -227,29 +234,26 @@ failed: * It is the caller's responsibility to check whether the file * can be closed hereafter. */ -static int -nlmsvc_delete_block(struct nlm_block *block, int unlock) +static int nlmsvc_unlink_block(struct nlm_block *block) { - struct file_lock *fl = &block->b_call.a_args.lock.fl; - struct nlm_file *file = block->b_file; - struct nlm_block **bp; - int status = 0; - - dprintk("lockd: deleting block %p...\n", block); + int status; + dprintk("lockd: unlinking block %p...\n", block); /* Remove block from list */ + status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); nlmsvc_remove_block(block); - if (unlock) - status = posix_unblock_lock(file->f_file, fl); + return status; +} - /* If the block is in the middle of a GRANT callback, - * don't kill it yet. */ - if (block->b_incall) { - nlmsvc_insert_block(block, NLM_NEVER); - block->b_done = 1; - return status; - } +static void nlmsvc_free_block(struct kref *kref) +{ + struct nlm_block *block = container_of(kref, struct nlm_block, b_count); + struct nlm_file *file = block->b_file; + struct nlm_block **bp; + dprintk("lockd: freeing block %p...\n", block); + + down(&file->f_sema); /* Remove block from file's list of blocks */ for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) { if (*bp == block) { @@ -257,36 +261,93 @@ nlmsvc_delete_block(struct nlm_block *block, int unlock) break; } } + up(&file->f_sema); - if (block->b_host) - nlm_release_host(block->b_host); - nlmclnt_freegrantargs(&block->b_call); + nlmsvc_freegrantargs(block->b_call); + nlm_release_call(block->b_call); + nlm_release_file(block->b_file); kfree(block); - return status; +} + +static void nlmsvc_release_block(struct nlm_block *block) +{ + if (block != NULL) + kref_put(&block->b_count, nlmsvc_free_block); +} + +static void nlmsvc_act_mark(struct nlm_host *host, struct nlm_file *file) +{ + struct nlm_block *block; + + down(&file->f_sema); + for (block = file->f_blocks; block != NULL; block = block->b_fnext) + block->b_host->h_inuse = 1; + up(&file->f_sema); +} + +static void nlmsvc_act_unlock(struct nlm_host *host, struct nlm_file *file) +{ + struct nlm_block *block; + +restart: + down(&file->f_sema); + for (block = file->f_blocks; block != NULL; block = block->b_fnext) { + if (host != NULL && host != block->b_host) + continue; + if (!block->b_queued) + continue; + kref_get(&block->b_count); + up(&file->f_sema); + nlmsvc_unlink_block(block); + nlmsvc_release_block(block); + goto restart; + } + up(&file->f_sema); } /* * Loop over all blocks and perform the action specified. * (NLM_ACT_CHECK handled by nlmsvc_inspect_file). */ -int +void nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action) { - struct nlm_block *block, *next; - /* XXX: Will everything get cleaned up if we don't unlock here? */ + if (action == NLM_ACT_MARK) + nlmsvc_act_mark(host, file); + else + nlmsvc_act_unlock(host, file); +} - down(&file->f_sema); - for (block = file->f_blocks; block; block = next) { - next = block->b_fnext; - if (action == NLM_ACT_MARK) - block->b_host->h_inuse = 1; - else if (action == NLM_ACT_UNLOCK) { - if (host == NULL || host == block->b_host) - nlmsvc_delete_block(block, 1); - } +/* + * Initialize arguments for GRANTED call. The nlm_rqst structure + * has been cleared already. + */ +static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) +{ + locks_copy_lock(&call->a_args.lock.fl, &lock->fl); + memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); + call->a_args.lock.caller = system_utsname.nodename; + call->a_args.lock.oh.len = lock->oh.len; + + /* set default data area */ + call->a_args.lock.oh.data = call->a_owner; + call->a_args.lock.svid = lock->fl.fl_pid; + + if (lock->oh.len > NLMCLNT_OHSIZE) { + void *data = kmalloc(lock->oh.len, GFP_KERNEL); + if (!data) + return 0; + call->a_args.lock.oh.data = (u8 *) data; } - up(&file->f_sema); - return 0; + + memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); + return 1; +} + +static void nlmsvc_freegrantargs(struct nlm_rqst *call) +{ + if (call->a_args.lock.oh.data != call->a_owner) + kfree(call->a_args.lock.oh.data); } /* @@ -297,9 +358,9 @@ u32 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) { - struct file_lock *conflock; - struct nlm_block *block; + struct nlm_block *block, *newblock = NULL; int error; + u32 ret; dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", file->f_file->f_dentry->d_inode->i_sb->s_id, @@ -310,69 +371,65 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, wait); - /* Get existing block (in case client is busy-waiting) */ - block = nlmsvc_lookup_block(file, lock, 0); - - lock->fl.fl_flags |= FL_LOCKD; - + lock->fl.fl_flags &= ~FL_SLEEP; again: /* Lock file against concurrent access */ down(&file->f_sema); + /* Get existing block (in case client is busy-waiting) */ + block = nlmsvc_lookup_block(file, lock); + if (block == NULL) { + if (newblock != NULL) + lock = &newblock->b_call->a_args.lock; + } else + lock = &block->b_call->a_args.lock; - if (!(conflock = posix_test_lock(file->f_file, &lock->fl))) { - error = posix_lock_file(file->f_file, &lock->fl); + error = posix_lock_file(file->f_file, &lock->fl); + lock->fl.fl_flags &= ~FL_SLEEP; - if (block) - nlmsvc_delete_block(block, 0); - up(&file->f_sema); + dprintk("lockd: posix_lock_file returned %d\n", error); - dprintk("lockd: posix_lock_file returned %d\n", -error); - switch(-error) { + switch(error) { case 0: - return nlm_granted; - case EDEADLK: - return nlm_deadlock; - case EAGAIN: - return nlm_lck_denied; + ret = nlm_granted; + goto out; + case -EAGAIN: + break; + case -EDEADLK: + ret = nlm_deadlock; + goto out; default: /* includes ENOLCK */ - return nlm_lck_denied_nolocks; - } + ret = nlm_lck_denied_nolocks; + goto out; } - if (!wait) { - up(&file->f_sema); - return nlm_lck_denied; - } + ret = nlm_lck_denied; + if (!wait) + goto out; - if (posix_locks_deadlock(&lock->fl, conflock)) { - up(&file->f_sema); - return nlm_deadlock; - } + ret = nlm_lck_blocked; + if (block != NULL) + goto out; /* If we don't have a block, create and initialize it. Then * retry because we may have slept in kmalloc. */ /* We have to release f_sema as nlmsvc_create_block may try to * to claim it while doing host garbage collection */ - if (block == NULL) { + if (newblock == NULL) { up(&file->f_sema); dprintk("lockd: blocking on this lock (allocating).\n"); - if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie))) + if (!(newblock = nlmsvc_create_block(rqstp, file, lock, cookie))) return nlm_lck_denied_nolocks; goto again; } /* Append to list of blocked */ - nlmsvc_insert_block(block, NLM_NEVER); - - if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) { - /* Now add block to block list of the conflicting lock - if we haven't done so. */ - dprintk("lockd: blocking on this lock.\n"); - posix_block_lock(conflock, &block->b_call.a_args.lock.fl); - } - + nlmsvc_insert_block(newblock, NLM_NEVER); +out: up(&file->f_sema); - return nlm_lck_blocked; + nlmsvc_release_block(newblock); + nlmsvc_release_block(block); + dprintk("lockd: nlmsvc_lock returned %u\n", ret); + return ret; } /* @@ -382,8 +439,6 @@ u32 nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, struct nlm_lock *conflock) { - struct file_lock *fl; - dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", file->f_file->f_dentry->d_inode->i_sb->s_id, file->f_file->f_dentry->d_inode->i_ino, @@ -391,13 +446,14 @@ nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); - if ((fl = posix_test_lock(file->f_file, &lock->fl)) != NULL) { + if (posix_test_lock(file->f_file, &lock->fl, &conflock->fl)) { dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", - fl->fl_type, (long long)fl->fl_start, - (long long)fl->fl_end); + conflock->fl.fl_type, + (long long)conflock->fl.fl_start, + (long long)conflock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->oh.len = 0; /* don't return OH info */ - conflock->fl = *fl; + conflock->svid = conflock->fl.fl_pid; return nlm_lck_denied; } @@ -453,9 +509,12 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock) (long long)lock->fl.fl_end); down(&file->f_sema); - if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL) - status = nlmsvc_delete_block(block, 1); + block = nlmsvc_lookup_block(file, lock); up(&file->f_sema); + if (block != NULL) { + status = nlmsvc_unlink_block(block); + nlmsvc_release_block(block); + } return status ? nlm_lck_denied : nlm_granted; } @@ -473,7 +532,7 @@ nlmsvc_notify_blocked(struct file_lock *fl) dprintk("lockd: VFS unblock notification for block %p\n", fl); for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) { - if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) { + if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { nlmsvc_insert_block(block, 0); svc_wake_up(block->b_daemon); return; @@ -508,17 +567,13 @@ static void nlmsvc_grant_blocked(struct nlm_block *block) { struct nlm_file *file = block->b_file; - struct nlm_lock *lock = &block->b_call.a_args.lock; - struct file_lock *conflock; + struct nlm_lock *lock = &block->b_call->a_args.lock; int error; dprintk("lockd: grant blocked lock %p\n", block); - /* First thing is lock the file */ - down(&file->f_sema); - /* Unlink block request from list */ - nlmsvc_remove_block(block); + nlmsvc_unlink_block(block); /* If b_granted is true this means we've been here before. * Just retry the grant callback, possibly refreshing the RPC @@ -529,24 +584,21 @@ nlmsvc_grant_blocked(struct nlm_block *block) } /* Try the lock operation again */ - if ((conflock = posix_test_lock(file->f_file, &lock->fl)) != NULL) { - /* Bummer, we blocked again */ + lock->fl.fl_flags |= FL_SLEEP; + error = posix_lock_file(file->f_file, &lock->fl); + lock->fl.fl_flags &= ~FL_SLEEP; + + switch (error) { + case 0: + break; + case -EAGAIN: dprintk("lockd: lock still blocked\n"); nlmsvc_insert_block(block, NLM_NEVER); - posix_block_lock(conflock, &lock->fl); - up(&file->f_sema); return; - } - - /* Alright, no conflicting lock. Now lock it for real. If the - * following yields an error, this is most probably due to low - * memory. Retry the lock in a few seconds. - */ - if ((error = posix_lock_file(file->f_file, &lock->fl)) < 0) { + default: printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", -error, __FUNCTION__); nlmsvc_insert_block(block, 10 * HZ); - up(&file->f_sema); return; } @@ -554,17 +606,15 @@ callback: /* Lock was granted by VFS. */ dprintk("lockd: GRANTing blocked lock.\n"); block->b_granted = 1; - block->b_incall = 1; /* Schedule next grant callback in 30 seconds */ nlmsvc_insert_block(block, 30 * HZ); /* Call the client */ - nlm_get_host(block->b_call.a_host); - if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG, + kref_get(&block->b_count); + if (nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops) < 0) - nlm_release_host(block->b_call.a_host); - up(&file->f_sema); + nlmsvc_release_block(block); } /* @@ -578,20 +628,10 @@ callback: static void nlmsvc_grant_callback(struct rpc_task *task, void *data) { struct nlm_rqst *call = data; - struct nlm_block *block; + struct nlm_block *block = call->a_block; unsigned long timeout; - struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client); dprintk("lockd: GRANT_MSG RPC callback\n"); - dprintk("callback: looking for cookie %s, host (%u.%u.%u.%u)\n", - nlmdbg_cookie2a(&call->a_args.cookie), - NIPQUAD(peer_addr->sin_addr.s_addr)); - if (!(block = nlmsvc_find_block(&call->a_args.cookie, peer_addr))) { - dprintk("lockd: no block for cookie %s, host (%u.%u.%u.%u)\n", - nlmdbg_cookie2a(&call->a_args.cookie), - NIPQUAD(peer_addr->sin_addr.s_addr)); - return; - } /* Technically, we should down the file semaphore here. Since we * move the block towards the head of the queue only, no harm @@ -608,13 +648,18 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) } nlmsvc_insert_block(block, timeout); svc_wake_up(block->b_daemon); - block->b_incall = 0; +} - nlm_release_host(call->a_host); +void nlmsvc_grant_release(void *data) +{ + struct nlm_rqst *call = data; + + nlmsvc_release_block(call->a_block); } static const struct rpc_call_ops nlmsvc_grant_ops = { .rpc_call_done = nlmsvc_grant_callback, + .rpc_release = nlmsvc_grant_release, }; /* @@ -634,25 +679,17 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status return; file = block->b_file; - file->f_count++; - down(&file->f_sema); - block = nlmsvc_find_block(cookie, &rqstp->rq_addr); if (block) { if (status == NLM_LCK_DENIED_GRACE_PERIOD) { /* Try again in a couple of seconds */ nlmsvc_insert_block(block, 10 * HZ); - up(&file->f_sema); } else { /* Lock is now held by client, or has been rejected. * In both cases, the block should be removed. */ - up(&file->f_sema); - if (status == NLM_LCK_GRANTED) - nlmsvc_delete_block(block, 0); - else - nlmsvc_delete_block(block, 1); + nlmsvc_unlink_block(block); } } - nlm_release_file(file); + nlmsvc_release_block(block); } /* @@ -675,10 +712,12 @@ nlmsvc_retry_blocked(void) break; dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", block, block->b_when, block->b_done); + kref_get(&block->b_count); if (block->b_done) - nlmsvc_delete_block(block, 0); + nlmsvc_unlink_block(block); else nlmsvc_grant_blocked(block); + nlmsvc_release_block(block); } if ((block = nlm_blocked) && block->b_when != NLM_NEVER) diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 35681d9cf1fc..d210cf304e92 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -22,10 +22,6 @@ #define NLMDBG_FACILITY NLMDBG_CLIENT -static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *); - -static const struct rpc_call_ops nlmsvc_callback_ops; - #ifdef CONFIG_LOCKD_V4 static u32 cast_to_nlm(u32 status, u32 vers) @@ -262,83 +258,91 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, } /* + * This is the generic lockd callback for async RPC calls + */ +static void nlmsvc_callback_exit(struct rpc_task *task, void *data) +{ + dprintk("lockd: %4d callback returned %d\n", task->tk_pid, + -task->tk_status); +} + +static void nlmsvc_callback_release(void *data) +{ + nlm_release_call(data); +} + +static const struct rpc_call_ops nlmsvc_callback_ops = { + .rpc_call_done = nlmsvc_callback_exit, + .rpc_release = nlmsvc_callback_release, +}; + +/* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ -static int -nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, - void *resp) +static int nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, + int (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) { - struct nlm_res res; - u32 stat; + struct nlm_host *host; + struct nlm_rqst *call; + int stat; - dprintk("lockd: TEST_MSG called\n"); - memset(&res, 0, sizeof(res)); + host = nlmsvc_lookup_host(rqstp); + if (host == NULL) + return rpc_system_err; - if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0) - stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res); - return stat; + call = nlm_alloc_call(host); + if (call == NULL) + return rpc_system_err; + + stat = func(rqstp, argp, &call->a_res); + if (stat != 0) { + nlm_release_call(call); + return stat; + } + + call->a_flags = RPC_TASK_ASYNC; + if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0) + return rpc_system_err; + return rpc_success; } -static int -nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, +static int nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; + dprintk("lockd: TEST_MSG called\n"); + return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test); +} +static int nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, + void *resp) +{ dprintk("lockd: LOCK_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0) - stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res); - return stat; + return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock); } -static int -nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, +static int nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; - dprintk("lockd: CANCEL_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0) - stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res); - return stat; + return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel); } static int nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; - dprintk("lockd: UNLOCK_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0) - stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res); - return stat; + return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock); } static int nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { - struct nlm_res res; - u32 stat; - dprintk("lockd: GRANTED_MSG called\n"); - memset(&res, 0, sizeof(res)); - - if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0) - stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res); - return stat; + return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted); } /* @@ -497,55 +501,6 @@ nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, } /* - * This is the generic lockd callback for async RPC calls - */ -static u32 -nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp) -{ - struct nlm_host *host; - struct nlm_rqst *call; - - if (!(call = nlmclnt_alloc_call())) - return rpc_system_err; - - host = nlmclnt_lookup_host(&rqstp->rq_addr, - rqstp->rq_prot, rqstp->rq_vers); - if (!host) { - kfree(call); - return rpc_system_err; - } - - call->a_flags = RPC_TASK_ASYNC; - call->a_host = host; - memcpy(&call->a_args, resp, sizeof(*resp)); - - if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0) - goto error; - - return rpc_success; - error: - nlm_release_host(host); - kfree(call); - return rpc_system_err; -} - -static void nlmsvc_callback_exit(struct rpc_task *task, void *data) -{ - struct nlm_rqst *call = data; - - if (task->tk_status < 0) { - dprintk("lockd: %4d callback failed (errno = %d)\n", - task->tk_pid, -task->tk_status); - } - nlm_release_host(call->a_host); - kfree(call); -} - -static const struct rpc_call_ops nlmsvc_callback_ops = { - .rpc_call_done = nlmsvc_callback_exit, -}; - -/* * NLM Server procedures. */ diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c index 4943fb7836ce..27288c83da96 100644 --- a/fs/lockd/svcshare.c +++ b/fs/lockd/svcshare.c @@ -88,7 +88,7 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file, * Traverse all shares for a given file (and host). * NLM_ACT_CHECK is handled by nlmsvc_inspect_file. */ -int +void nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) { struct nlm_share *share, **shpp; @@ -106,6 +106,4 @@ nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) } shpp = &share->s_next; } - - return 0; } diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 62f4a385177f..a570e5c8a930 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -11,6 +11,7 @@ #include <linux/string.h> #include <linux/time.h> #include <linux/in.h> +#include <linux/mutex.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/clnt.h> #include <linux/nfsd/nfsfh.h> @@ -28,7 +29,7 @@ #define FILE_HASH_BITS 5 #define FILE_NRHASH (1<<FILE_HASH_BITS) static struct nlm_file * nlm_files[FILE_NRHASH]; -static DECLARE_MUTEX(nlm_file_sema); +static DEFINE_MUTEX(nlm_file_mutex); #ifdef NFSD_DEBUG static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) @@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, hash = file_hash(f); /* Lock file table */ - down(&nlm_file_sema); + mutex_lock(&nlm_file_mutex); for (file = nlm_files[hash]; file; file = file->f_next) if (!nfs_compare_fh(&file->f_handle, f)) @@ -130,7 +131,7 @@ found: nfserr = 0; out_unlock: - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); return nfserr; out_free: @@ -182,7 +183,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action) again: file->f_locks = 0; for (fl = inode->i_flock; fl; fl = fl->fl_next) { - if (!(fl->fl_flags & FL_LOCKD)) + if (fl->fl_lmops != &nlmsvc_lock_operations) continue; /* update current lock count */ @@ -224,9 +225,8 @@ nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action) if (file->f_count || file->f_blocks || file->f_shares) return 1; } else { - if (nlmsvc_traverse_blocks(host, file, action) - || nlmsvc_traverse_shares(host, file, action)) - return 1; + nlmsvc_traverse_blocks(host, file, action); + nlmsvc_traverse_shares(host, file, action); } return nlm_traverse_locks(host, file, action); } @@ -240,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action) struct nlm_file *file, **fp; int i; - down(&nlm_file_sema); + mutex_lock(&nlm_file_mutex); for (i = 0; i < FILE_NRHASH; i++) { fp = nlm_files + i; while ((file = *fp) != NULL) { /* Traverse locks, blocks and shares of this file * and update file->f_locks count */ if (nlm_inspect_file(host, file, action)) { - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); return 1; } @@ -262,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action) } } } - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); return 0; } @@ -282,7 +282,7 @@ nlm_release_file(struct nlm_file *file) file, file->f_count); /* Lock file table */ - down(&nlm_file_sema); + mutex_lock(&nlm_file_mutex); /* If there are no more locks etc, delete the file */ if(--file->f_count == 0) { @@ -290,7 +290,7 @@ nlm_release_file(struct nlm_file *file) nlm_delete_file(file); } - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); } /* diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 200fbda2c6d1..f22a3764461a 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -131,10 +131,11 @@ nlm_decode_lock(u32 *p, struct nlm_lock *lock) || !(p = nlm_decode_fh(p, &lock->fh)) || !(p = nlm_decode_oh(p, &lock->oh))) return NULL; + lock->svid = ntohl(*p++); locks_init_lock(fl); fl->fl_owner = current->files; - fl->fl_pid = ntohl(*p++); + fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ start = ntohl(*p++); @@ -174,7 +175,7 @@ nlm_encode_lock(u32 *p, struct nlm_lock *lock) else len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); - *p++ = htonl(fl->fl_pid); + *p++ = htonl(lock->svid); *p++ = htonl(start); *p++ = htonl(len); @@ -197,7 +198,7 @@ nlm_encode_testres(u32 *p, struct nlm_res *resp) struct file_lock *fl = &resp->lock.fl; *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; - *p++ = htonl(fl->fl_pid); + *p++ = htonl(resp->lock.svid); /* Encode owner handle. */ if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) @@ -298,7 +299,8 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); - lock->fl.fl_pid = ~(u32) 0; + lock->svid = ~(u32) 0; + lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, @@ -415,7 +417,8 @@ nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) memset(&resp->lock, 0, sizeof(resp->lock)); locks_init_lock(fl); excl = ntohl(*p++); - fl->fl_pid = ntohl(*p++); + resp->lock.svid = ntohl(*p++); + fl->fl_pid = (pid_t)resp->lock.svid; if (!(p = nlm_decode_oh(p, &resp->lock.oh))) return -EIO; @@ -543,7 +546,9 @@ nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) .p_proc = NLMPROC_##proc, \ .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ - .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2 \ + .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2, \ + .p_statidx = NLMPROC_##proc, \ + .p_name = #proc, \ } static struct rpc_procinfo nlm_procedures[] = { @@ -599,7 +604,7 @@ static struct rpc_stat nlm_stats; struct rpc_program nlm_program = { .name = "lockd", .number = NLM_PROGRAM, - .nrvers = sizeof(nlm_versions) / sizeof(nlm_versions[0]), + .nrvers = ARRAY_SIZE(nlm_versions), .version = nlm_versions, .stats = &nlm_stats, }; diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index fdcf105a5303..36eb175ec335 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -130,10 +130,11 @@ nlm4_decode_lock(u32 *p, struct nlm_lock *lock) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return NULL; + lock->svid = ntohl(*p++); locks_init_lock(fl); fl->fl_owner = current->files; - fl->fl_pid = ntohl(*p++); + fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ p = xdr_decode_hyper(p, &start); @@ -167,7 +168,7 @@ nlm4_encode_lock(u32 *p, struct nlm_lock *lock) || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) return NULL; - *p++ = htonl(fl->fl_pid); + *p++ = htonl(lock->svid); start = loff_t_to_s64(fl->fl_start); if (fl->fl_end == OFFSET_MAX) @@ -198,7 +199,7 @@ nlm4_encode_testres(u32 *p, struct nlm_res *resp) struct file_lock *fl = &resp->lock.fl; *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; - *p++ = htonl(fl->fl_pid); + *p++ = htonl(resp->lock.svid); /* Encode owner handle. */ if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) @@ -212,8 +213,8 @@ nlm4_encode_testres(u32 *p, struct nlm_res *resp) p = xdr_encode_hyper(p, start); p = xdr_encode_hyper(p, len); - dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n", - resp->status, fl->fl_pid, fl->fl_type, + dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n", + resp->status, (int)resp->lock.svid, fl->fl_type, (long long)fl->fl_start, (long long)fl->fl_end); } @@ -303,7 +304,8 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp) memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); - lock->fl.fl_pid = ~(u32) 0; + lock->svid = ~(u32) 0; + lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, @@ -420,7 +422,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) memset(&resp->lock, 0, sizeof(resp->lock)); locks_init_lock(fl); excl = ntohl(*p++); - fl->fl_pid = ntohl(*p++); + resp->lock.svid = ntohl(*p++); + fl->fl_pid = (pid_t)resp->lock.svid; if (!(p = nlm4_decode_oh(p, &resp->lock.oh))) return -EIO; @@ -548,7 +551,9 @@ nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp) .p_proc = NLMPROC_##proc, \ .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ - .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2 \ + .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2, \ + .p_statidx = NLMPROC_##proc, \ + .p_name = #proc, \ } static struct rpc_procinfo nlm4_procedures[] = { |