summaryrefslogtreecommitdiffstats
path: root/fs/afs/rotate.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2020-04-30 01:03:49 +0100
committerDavid Howells <dhowells@redhat.com>2020-06-04 15:37:57 +0100
commit20325960f8750165964a6891a733e4cc15d19076 (patch)
treed372bb21037626d2fac40b9a1e6e6bea21786b8e /fs/afs/rotate.c
parentcca37d45d547434144409ae648a19b7eb6db5eb4 (diff)
downloadlinux-20325960f8750165964a6891a733e4cc15d19076.tar.bz2
afs: Reorganise volume and server trees to be rooted on the cell
Reorganise afs_volume objects such that they're in a tree keyed on volume ID, rooted at on an afs_cell object rather than being in multiple trees, each of which is rooted on an afs_server object. afs_server structs become per-cell and acquire a pointer to the cell. The process of breaking a callback then starts with finding the server by its network address, following that to the cell and then looking up each volume ID in the volume tree. This is simpler than the afs_vol_interest/afs_cb_interest N:M mapping web and allows those structs and the code for maintaining them to be simplified or removed. It does make a couple of things a bit more tricky, though: (1) Operations now start with a volume, not a server, so there can be more than one answer as to whether or not the server we'll end up using supports the FS.InlineBulkStatus RPC. (2) CB RPC operations that specify the server UUID. There's still a tree of servers by UUID on the afs_net struct, but the UUIDs in it aren't guaranteed unique. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/afs/rotate.c')
-rw-r--r--fs/afs/rotate.c128
1 files changed, 20 insertions, 108 deletions
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d67b8c7eb3b9..8d5473cd8ea4 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -21,7 +21,8 @@
static bool afs_start_fs_iteration(struct afs_operation *op,
struct afs_vnode *vnode)
{
- struct afs_cb_interest *cbi;
+ struct afs_server *server;
+ void *cb_server;
int i;
read_lock(&op->volume->servers_lock);
@@ -33,12 +34,12 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
op->untried = (1UL << op->server_list->nr_servers) - 1;
op->index = READ_ONCE(op->server_list->preferred);
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
- if (cbi) {
+ cb_server = vnode->cb_server;
+ if (cb_server) {
/* See if the vnode's preferred record is still available */
for (i = 0; i < op->server_list->nr_servers; i++) {
- if (op->server_list->servers[i].cb_interest == cbi) {
+ server = op->server_list->servers[i].server;
+ if (server == cb_server) {
op->index = i;
goto found_interest;
}
@@ -55,14 +56,11 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
/* Note that the callback promise is effectively broken */
write_seqlock(&vnode->cb_lock);
- ASSERTCMP(cbi, ==, rcu_access_pointer(vnode->cb_interest));
- rcu_assign_pointer(vnode->cb_interest, NULL);
+ ASSERTCMP(cb_server, ==, vnode->cb_server);
+ vnode->cb_server = NULL;
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
-
- afs_put_cb_interest(op->net, cbi);
- cbi = NULL;
}
found_interest:
@@ -303,8 +301,7 @@ bool afs_select_fileserver(struct afs_operation *op)
restart_from_beginning:
_debug("restart");
afs_end_cursor(&op->ac);
- afs_put_cb_interest(op->net, op->cbi);
- op->cbi = NULL;
+ op->server = NULL;
afs_put_serverlist(op->net, op->server_list);
op->server_list = NULL;
start:
@@ -331,13 +328,12 @@ pick_server:
/* Pick the untried server with the lowest RTT. If we have outstanding
* callbacks, we stick with the server we're already using if we can.
*/
- if (op->cbi) {
- _debug("cbi %u", op->index);
+ if (op->server) {
+ _debug("server %u", op->index);
if (test_bit(op->index, &op->untried))
goto selected_server;
- afs_put_cb_interest(op->net, op->cbi);
- op->cbi = NULL;
- _debug("nocbi");
+ op->server = NULL;
+ _debug("no server");
}
op->index = -1;
@@ -372,19 +368,13 @@ selected_server:
_debug("USING SERVER: %pU", &server->uuid);
- /* Make sure we've got a callback interest record for this server. We
- * have to link it in before we send the request as we can be sent a
- * break request before we've finished decoding the reply and
- * installing the vnode.
- */
- error = afs_register_server_cb_interest(vnode, op->server_list,
- op->index);
- if (error < 0)
- goto failed_set_error;
-
- op->cbi = afs_get_cb_interest(
- rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock)));
+ op->server = server;
+ if (vnode->cb_server != server) {
+ vnode->cb_server = server;
+ vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
read_lock(&server->fs_lock);
alist = rcu_dereference_protected(server->addresses,
@@ -447,84 +437,6 @@ failed:
}
/*
- * Select the same fileserver we used for a vnode before and only that
- * fileserver. We use this when we have a lock on that file, which is backed
- * only by the fileserver we obtained it from.
- */
-bool afs_select_current_fileserver(struct afs_operation *op)
-{
- struct afs_cb_interest *cbi;
- struct afs_addr_list *alist;
- int error = op->ac.error;
-
- _enter("");
-
- switch (error) {
- case SHRT_MAX:
- cbi = op->cbi;
- if (!cbi) {
- op->error = -ESTALE;
- op->flags |= AFS_OPERATION_STOP;
- return false;
- }
-
- read_lock(&cbi->server->fs_lock);
- alist = rcu_dereference_protected(cbi->server->addresses,
- lockdep_is_held(&cbi->server->fs_lock));
- afs_get_addrlist(alist);
- read_unlock(&cbi->server->fs_lock);
- if (!alist) {
- op->error = -ESTALE;
- op->flags |= AFS_OPERATION_STOP;
- return false;
- }
-
- memset(&op->ac, 0, sizeof(op->ac));
- op->ac.alist = alist;
- op->ac.index = -1;
- goto iterate_address;
-
- case 0:
- default:
- /* Success or local failure. Stop. */
- op->error = error;
- op->flags |= AFS_OPERATION_STOP;
- _leave(" = f [okay/local %d]", error);
- return false;
-
- case -ECONNABORTED:
- op->error = afs_abort_to_error(op->ac.abort_code);
- op->flags |= AFS_OPERATION_STOP;
- _leave(" = f [abort]");
- return false;
-
- case -ERFKILL:
- case -EADDRNOTAVAIL:
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -EHOSTDOWN:
- case -ECONNREFUSED:
- case -ETIMEDOUT:
- case -ETIME:
- _debug("no conn");
- op->error = error;
- goto iterate_address;
- }
-
-iterate_address:
- /* Iterate over the current server's address list to try and find an
- * address on which it will respond to us.
- */
- if (afs_iterate_addresses(&op->ac)) {
- _leave(" = t");
- return true;
- }
-
- afs_end_cursor(&op->ac);
- return false;
-}
-
-/*
* Dump cursor state in the case of the error being EDESTADDRREQ.
*/
void afs_dump_edestaddrreq(const struct afs_operation *op)