summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-01-28 11:47:53 -0600
committerJason Gunthorpe <jgg@nvidia.com>2021-02-05 13:44:22 -0400
commit8fc1b7027fc162738d5a85c82410e501a371a404 (patch)
tree7127b5006a1b51ad67c4aca2f9144fc8ac7b5399 /drivers
parente328197423e09094aff48619ebef6671ff64d3b2 (diff)
downloadlinux-8fc1b7027fc162738d5a85c82410e501a371a404.tar.bz2
RDMA/rxe: Fix coding error in rxe_rcv_mcast_pkt
rxe_rcv_mcast_pkt() in rxe_recv.c can leak SKBs in error path code. The loop over the QPs attached to a multicast group creates new cloned SKBs for all but the last QP in the list and passes the SKB and its clones to rxe_rcv_pkt() for further processing. Any QPs that do not pass some checks are skipped. If the last QP in the list fails the tests the SKB is leaked. This patch checks if the SKB for the last QP was used and if not frees it. Also removes a redundant loop invariant assignment. Fixes: 8700e3e7c485 ("Soft RoCE driver") Fixes: 71abf20b28ff ("RDMA/rxe: Handle skb_clone() failure in rxe_recv.c") Link: https://lore.kernel.org/r/20210128174752.16128-1-rpearson@hpe.com Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 0dd163b745fe..2bbcea61b780 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -256,7 +256,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
list_for_each_entry(mce, &mcg->qp_list, qp_list) {
qp = mce->qp;
- pkt = SKB_TO_PKT(skb);
/* validate qp for incoming packet */
err = check_type_state(rxe, pkt, qp);
@@ -268,12 +267,18 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
continue;
/* for all but the last qp create a new clone of the
- * skb and pass to the qp.
+ * skb and pass to the qp. If an error occurs in the
+ * checks for the last qp in the list we need to
+ * free the skb since it hasn't been passed on to
+ * rxe_rcv_pkt() which would free it later.
*/
- if (mce->qp_list.next != &mcg->qp_list)
+ if (mce->qp_list.next != &mcg->qp_list) {
per_qp_skb = skb_clone(skb, GFP_ATOMIC);
- else
+ } else {
per_qp_skb = skb;
+ /* show we have consumed the skb */
+ skb = NULL;
+ }
if (unlikely(!per_qp_skb))
continue;
@@ -288,9 +293,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
- return;
-
err1:
+ /* free skb if not consumed */
kfree_skb(skb);
}