summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorColin Ian King <colin.king@canonical.com>2019-06-19 15:30:44 +0100
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-06-28 16:00:10 -0700
commit9fe06a51287b2d41baef7ece94df34b5abf19b90 (patch)
tree5af0454ba2e181d1cd391fda6ac1941a5cd854fe /drivers/net/ethernet/intel
parentcd502a7f7c9c03b5b9a01fe8b59ba7c11efee3ad (diff)
downloadlinux-9fe06a51287b2d41baef7ece94df34b5abf19b90.tar.bz2
iavf: fix dereference of null rx_buffer pointer
A recent commit efa14c3985828d ("iavf: allow null RX descriptors") added a null pointer sanity check on rx_buffer, however, rx_buffer is being dereferenced before that check, which implies a null pointer dereference bug can potentially occur. Fix this by only dereferencing rx_buffer until after the null pointer check. Addresses-Coverity: ("Dereference before null check") Signed-off-by: Colin Ian King <colin.king@canonical.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 1cde1601bc32..0cca1b589b56 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1296,7 +1296,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
@@ -1308,6 +1308,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
@@ -1362,7 +1363,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
@@ -1374,6 +1375,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);