summaryrefslogtreecommitdiffstats
path: root/fs/cifs/connect.c
diff options
context:
space:
mode:
authorShyam Prasad N <sprasad@microsoft.com>2022-07-27 14:49:56 -0500
committerSteve French <stfrench@microsoft.com>2022-08-01 01:34:45 -0500
commitd7d7a66aacd6fd8ca57baf08a7bac5421282f6f8 (patch)
tree2565cb830065b2c06cefd085866c3de926d688b5 /fs/cifs/connect.c
parent1bfa25ee30dfebe32a3b40c1a954052becbd7b8d (diff)
downloadlinux-d7d7a66aacd6fd8ca57baf08a7bac5421282f6f8.tar.bz2
cifs: avoid use of global locks for high contention data
During analysis of multichannel perf, it was seen that the global locks cifs_tcp_ses_lock and GlobalMid_Lock, which were shared between various data structures were causing a lot of contention points. With this change, we're breaking down the use of these locks by introducing new locks at more granular levels. i.e. server->srv_lock, ses->ses_lock and tcon->tc_lock to protect the unprotected fields of server, session and tcon structs; and server->mid_lock to protect mid related lists and entries at server level. Signed-off-by: Shyam Prasad N <sprasad@microsoft.com> Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs/cifs/connect.c')
-rw-r--r--fs/cifs/connect.c216
1 files changed, 121 insertions, 95 deletions
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6e670e7c2182..931d4b6fafc8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -119,10 +119,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
goto requeue_resolve;
}
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
strlen(ipaddr));
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
kfree(ipaddr);
/* rc == 1 means success here */
@@ -205,17 +205,22 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&pserver->srv_lock);
if (!all_channels) {
pserver->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&pserver->srv_lock);
return;
}
+ spin_unlock(&pserver->srv_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
spin_lock(&ses->chan_lock);
- for (i = 0; i < ses->chan_count; i++)
+ for (i = 0; i < ses->chan_count; i++) {
+ spin_lock(&ses->chans[i].server->srv_lock);
ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&ses->chans[i].server->srv_lock);
+ }
spin_unlock(&ses->chan_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -252,17 +257,8 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
/* check if iface is still active */
- if (!cifs_chan_is_iface_active(ses, server)) {
- /*
- * HACK: drop the lock before calling
- * cifs_chan_update_iface to avoid deadlock
- */
- ses->ses_count++;
- spin_unlock(&cifs_tcp_ses_lock);
+ if (!cifs_chan_is_iface_active(ses, server))
cifs_chan_update_iface(ses, server);
- spin_lock(&cifs_tcp_ses_lock);
- ses->ses_count--;
- }
spin_lock(&ses->chan_lock);
if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
@@ -323,7 +319,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
kref_get(&mid->refcount);
if (mid->mid_state == MID_REQUEST_SUBMITTED)
@@ -331,7 +327,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
list_move(&mid->qhead, &retry_list);
mid->mid_flags |= MID_DELETED;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
cifs_server_unlock(server);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
@@ -350,11 +346,11 @@ cifs_abort_connection(struct TCP_Server_Info *server)
static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
{
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
server->nr_targets = num_targets;
if (server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally next time through the loop */
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up(&server->response_q);
return false;
}
@@ -364,7 +360,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
server->hostname);
server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return true;
}
@@ -414,20 +410,20 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
} else {
atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up(&server->response_q);
return rc;
@@ -541,10 +537,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
*/
atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
@@ -556,11 +552,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
-
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up(&server->response_q);
return rc;
@@ -569,12 +564,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{
/* If tcp session is not an dfs connection, then reconnect to last target server */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (!server->is_dfs_conn) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return __cifs_reconnect(server, mark_smb_session);
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
mutex_lock(&server->refpath_lock);
if (!server->origin_fullpath || !server->leaf_fullpath) {
@@ -670,18 +665,18 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
(!server->ops->can_echo || server->ops->can_echo(server)) &&
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
(3 * server->echo_interval) / HZ);
cifs_reconnect(server, false);
return true;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return false;
}
@@ -726,18 +721,18 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
else
length = sock_recvmsg(server->ssocket, smb_msg, 0);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ESHUTDOWN;
}
if (server->tcpStatus == CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_reconnect(server, false);
return -ECONNABORTED;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (length == -ERESTARTSYS ||
length == -EAGAIN ||
@@ -849,7 +844,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&mid->server->mid_lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
@@ -859,12 +854,12 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* function has finished processing it is a bug.
*/
if (mid->mid_flags & MID_DELETED) {
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&mid->server->mid_lock);
pr_warn_once("trying to dequeue a deleted mid\n");
} else {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&mid->server->mid_lock);
}
}
@@ -908,16 +903,16 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
int length;
/* take it off the list, if it's not already */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
list_del_init(&server->tcp_ses_list);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cancel_delayed_work_sync(&server->echo);
cancel_delayed_work_sync(&server->resolve);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up_all(&server->response_q);
/* check if we have blocked requests that need to free */
@@ -948,7 +943,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
struct list_head *tmp, *tmp2;
INIT_LIST_HEAD(&dispose_list);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
@@ -957,7 +952,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
list_move(&mid_entry->qhead, &dispose_list);
mid_entry->mid_flags |= MID_DELETED;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
@@ -1410,6 +1405,7 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
return true;
}
+/* this function must be called with srv_lock held */
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
@@ -1470,6 +1466,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ spin_lock(&server->srv_lock);
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
@@ -1477,15 +1474,20 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* shares or even links that may connect to same server but having completely
* different failover targets.
*/
- if (server->is_dfs_conn)
+ if (server->is_dfs_conn) {
+ spin_unlock(&server->srv_lock);
continue;
+ }
#endif
/*
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
- if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx))
+ if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
+ spin_unlock(&server->srv_lock);
continue;
+ }
+ spin_unlock(&server->srv_lock);
++server->srv_count;
spin_unlock(&cifs_tcp_ses_lock);
@@ -1533,9 +1535,9 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
else
cancel_delayed_work_sync(&server->reconnect);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_crypto_secmech_release(server);
@@ -1594,8 +1596,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (primary_server) {
spin_lock(&cifs_tcp_ses_lock);
++primary_server->srv_count;
- tcp_ses->primary_server = primary_server;
spin_unlock(&cifs_tcp_ses_lock);
+ tcp_ses->primary_server = primary_server;
}
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -1611,6 +1613,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->lstrp = jiffies;
tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
spin_lock_init(&tcp_ses->req_lock);
+ spin_lock_init(&tcp_ses->srv_lock);
+ spin_lock_init(&tcp_ses->mid_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
@@ -1684,9 +1688,9 @@ smbd_connected:
* to the struct since the kernel thread not created yet
* no need to spinlock this update of tcpStatus
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcp_ses->srv_lock);
tcp_ses->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcp_ses->srv_lock);
if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
@@ -1728,6 +1732,7 @@ out_err:
return ERR_PTR(rc);
}
+/* this function must be called with ses_lock held */
static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
{
if (ctx->sectype != Unspecified &&
@@ -1863,10 +1868,17 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- if (ses->ses_status == SES_EXITING)
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
continue;
- if (!match_session(ses, ctx))
+ }
+ if (!match_session(ses, ctx)) {
+ spin_unlock(&ses->ses_lock);
continue;
+ }
+ spin_unlock(&ses->ses_lock);
+
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
@@ -1881,26 +1893,28 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
unsigned int chan_count;
struct TCP_Server_Info *server = ses->server;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return;
}
+ spin_unlock(&ses->ses_lock);
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE");
+ spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
+ spin_unlock(&cifs_tcp_ses_lock);
/* ses_count can never go negative */
WARN_ON(ses->ses_count < 0);
if (ses->ses_status == SES_GOOD)
ses->ses_status = SES_EXITING;
- spin_unlock(&cifs_tcp_ses_lock);
cifs_free_ipc(ses);
@@ -2235,6 +2249,7 @@ get_ses_fail:
return ERR_PTR(rc);
}
+/* this function must be called with tc_lock held */
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
if (tcon->status == TID_EXITING)
@@ -2261,9 +2276,13 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
- if (!match_tcon(tcon, ctx))
+ spin_lock(&tcon->tc_lock);
+ if (!match_tcon(tcon, ctx)) {
+ spin_unlock(&tcon->tc_lock);
continue;
+ }
++tcon->tc_count;
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
}
@@ -2663,6 +2682,9 @@ cifs_match_super(struct super_block *sb, void *data)
ctx = mnt_data->ctx;
+ spin_lock(&tcp_srv->srv_lock);
+ spin_lock(&ses->ses_lock);
+ spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx) ||
!match_session(ses, ctx) ||
!match_tcon(tcon, ctx) ||
@@ -2673,6 +2695,10 @@ cifs_match_super(struct super_block *sb, void *data)
rc = compare_mount_options(sb, mnt_data);
out:
+ spin_unlock(&tcon->tc_lock);
+ spin_unlock(&ses->ses_lock);
+ spin_unlock(&tcp_srv->srv_lock);
+
spin_unlock(&cifs_tcp_ses_lock);
cifs_put_tlink(tlink);
return rc;
@@ -3178,15 +3204,15 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
* for just this mount.
*/
reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->ses->server->srv_lock);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->ses->server->srv_lock);
rc = -EACCES;
goto out;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->ses->server->srv_lock);
} else
tcon->unix_ext = 0; /* server does not support them */
@@ -3269,9 +3295,9 @@ static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
rc = mount_get_conns(mnt_ctx);
if (mnt_ctx->server) {
cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&mnt_ctx->server->srv_lock);
mnt_ctx->server->is_dfs_conn = true;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&mnt_ctx->server->srv_lock);
}
return rc;
}
@@ -3986,28 +4012,28 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
return -ENOSYS;
/* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (!server->ops->need_neg(server) ||
server->tcpStatus != CifsNeedNegotiate) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return 0;
}
server->tcpStatus = CifsInNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server);
if (rc == 0) {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate)
server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
}
return rc;
@@ -4023,7 +4049,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
bool is_binding = false;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (server->dstaddr.ss_family == AF_INET6)
scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
else
@@ -4032,7 +4058,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (ses->ses_status != SES_GOOD &&
ses->ses_status != SES_NEW &&
ses->ses_status != SES_NEED_RECON) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return 0;
}
@@ -4041,7 +4067,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (CIFS_ALL_CHANS_GOOD(ses) ||
cifs_chan_in_reconnect(ses, server)) {
spin_unlock(&ses->chan_lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return 0;
}
is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
@@ -4050,7 +4076,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (!is_binding)
ses->ses_status = SES_IN_SETUP;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
if (!is_binding) {
ses->capabilities = server->capabilities;
@@ -4074,22 +4100,22 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_IN_SETUP)
ses->ses_status = SES_NEED_RECON;
spin_lock(&ses->chan_lock);
cifs_chan_clear_in_reconnect(ses, server);
spin_unlock(&ses->chan_lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_IN_SETUP)
ses->ses_status = SES_GOOD;
spin_lock(&ses->chan_lock);
cifs_chan_clear_in_reconnect(ses, server);
cifs_chan_clear_need_reconnect(ses, server);
spin_unlock(&ses->chan_lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
}
return rc;
@@ -4553,15 +4579,15 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
struct dfs_info3_param ref = {0};
/* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->ses->ses_lock);
return 0;
}
tcon->status = TID_IN_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
if (!tree) {
@@ -4600,15 +4626,15 @@ out:
cifs_put_tcp_super(sb);
if (rc) {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_NEED_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
tcon->need_reconnect = false;
}
@@ -4621,28 +4647,28 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
const struct smb_version_operations *ops = tcon->ses->server->ops;
/* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
return 0;
}
tcon->status = TID_IN_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
if (rc) {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_NEED_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD;
- spin_unlock(&cifs_tcp_ses_lock);
tcon->need_reconnect = false;
+ spin_unlock(&tcon->tc_lock);
}
return rc;