summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiu Jian <liujian56@huawei.com>2022-04-16 18:57:59 +0800
committerDaniel Borkmann <daniel@iogearbox.net>2022-04-20 23:47:40 +0200
commit45969b4152c1752089351cd6836a42a566d49bcf (patch)
treecd2ee44a535c276c7f14836e554fba45beabb94c
parentc7655df434de1dab1af1b1ba2aad757b15e25b83 (diff)
downloadlinux-45969b4152c1752089351cd6836a42a566d49bcf.tar.bz2
bpf: Enlarge offset check value to INT_MAX in bpf_skb_{load,store}_bytes
The data length of skb frags + frag_list may be greater than 0xffff, and skb_header_pointer can not handle negative offset. So, here INT_MAX is used to check the validity of offset. Add the same change to the related function skb_store_bytes. Fixes: 05c74e5e53f6 ("bpf: add bpf_skb_load_bytes helper") Signed-off-by: Liu Jian <liujian56@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20220416105801.88708-2-liujian56@huawei.com
-rw-r--r--net/core/filter.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 143f442a9505..8847316ee20e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
- if (unlikely(offset > 0xffff))
+ if (unlikely(offset > INT_MAX))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
{
void *ptr;
- if (unlikely(offset > 0xffff))
+ if (unlikely(offset > INT_MAX))
goto err_clear;
ptr = skb_header_pointer(skb, offset, len, to);