diff options
author | Hans Wippel <hwippel@linux.ibm.com> | 2018-05-18 09:34:10 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-18 13:15:01 -0400 |
commit | 69cb7dc0218b2c54416722eddf181e720f24c305 (patch) | |
tree | 521471fca3a62bfcd69228b4ea9224a86bf133f5 /net/smc/smc_tx.c | |
parent | d6830519a9de10621651433459d2c72a7145dd25 (diff) | |
download | linux-69cb7dc0218b2c54416722eddf181e720f24c305.tar.bz2 |
net/smc: add common buffer size in send and receive buffer descriptors
In addition to the buffer references, SMC currently stores the sizes of
the receive and send buffers in each connection as separate variables.
This patch introduces a buffer length variable in the common buffer
descriptor and uses this length instead.
Signed-off-by: Hans Wippel <hwippel@linux.ibm.com>
Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_tx.c')
-rw-r--r-- | net/smc/smc_tx.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 08a7de98bb03..58bc7ca3101a 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -180,8 +180,8 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) tx_cnt_prep = prep.count; /* determine chunks where to write into sndbuf */ /* either unwrapped case, or 1st chunk of wrapped case */ - chunk_len = min_t(size_t, - copylen, conn->sndbuf_size - tx_cnt_prep); + chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - + tx_cnt_prep); chunk_len_sum = chunk_len; chunk_off = tx_cnt_prep; smc_sndbuf_sync_sg_for_cpu(conn); @@ -206,21 +206,21 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) } smc_sndbuf_sync_sg_for_device(conn); /* update cursors */ - smc_curs_add(conn->sndbuf_size, &prep, copylen); + smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); smc_curs_write(&conn->tx_curs_prep, smc_curs_read(&prep, conn), conn); /* increased in send tasklet smc_cdc_tx_handler() */ smp_mb__before_atomic(); atomic_sub(copylen, &conn->sndbuf_space); - /* guarantee 0 <= sndbuf_space <= sndbuf_size */ + /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ smp_mb__after_atomic(); /* since we just produced more new data into sndbuf, * trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) && (atomic_read(&conn->sndbuf_space) > - (conn->sndbuf_size >> 1))) + (conn->sndbuf_desc->len >> 1))) /* for a corked socket defer the RDMA writes if there * is still sufficient sndbuf_space available */ @@ -286,7 +286,7 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, atomic_sub(len, &conn->peer_rmbe_space); /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ smp_mb__after_atomic(); - smc_curs_add(conn->sndbuf_size, sent, len); + smc_curs_add(conn->sndbuf_desc->len, sent, len); } /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; @@ -309,7 +309,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn); smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn); /* cf. wmem_alloc - (snd_max - snd_una) */ - to_send = smc_curs_diff(conn->sndbuf_size, &sent, &prep); + to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); if (to_send <= 0) return 0; @@ -351,12 +351,12 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) dst_len_sum = dst_len; src_off = sent.count; /* dst_len determines the maximum src_len */ - if (sent.count + dst_len <= conn->sndbuf_size) { + if (sent.count + dst_len <= conn->sndbuf_desc->len) { /* unwrapped src case: single chunk of entire dst_len */ src_len = dst_len; } else { /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ - src_len = conn->sndbuf_size - sent.count; + src_len = conn->sndbuf_desc->len - sent.count; } src_len_sum = src_len; dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); @@ -368,8 +368,8 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; num_sges++; src_off += src_len; - if (src_off >= conn->sndbuf_size) - src_off -= conn->sndbuf_size; + if (src_off >= conn->sndbuf_desc->len) + src_off -= conn->sndbuf_desc->len; /* modulo in send ring */ if (src_len_sum == dst_len) break; /* either on 1st or 2nd iteration */ @@ -387,7 +387,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) dst_len = len - dst_len; /* remainder */ dst_len_sum += dst_len; src_len = min_t(int, - dst_len, conn->sndbuf_size - sent.count); + dst_len, conn->sndbuf_desc->len - sent.count); src_len_sum = src_len; } @@ -484,11 +484,11 @@ void smc_tx_consumer_update(struct smc_connection *conn) smc_curs_write(&cfed, smc_curs_read(&conn->rx_curs_confirmed, conn), conn); - to_confirm = smc_curs_diff(conn->rmbe_size, &cfed, &cons); + to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || ((to_confirm > conn->rmbe_update_limit) && - ((to_confirm > (conn->rmbe_size / 2)) || + ((to_confirm > (conn->rmb_desc->len / 2)) || conn->local_rx_ctrl.prod_flags.write_blocked))) { if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && conn->alert_token_local) { /* connection healthy */ |