summaryrefslogtreecommitdiffstats
path: root/drivers/net/team/team_mode_loadbalance.c
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-05-23 18:43:58 +0200
committerDavid S. Miller <davem@davemloft.net>2014-05-23 16:48:05 -0400
commitb1fcd35cf53553a0a3ef949b05106d921446abc3 (patch)
tree4784eb248a9705f2eae7dcb10968497f0559499f /drivers/net/team/team_mode_loadbalance.c
parent8556ce79d5986a87fee4c29300b4efee07c0f15e (diff)
downloadlinux-b1fcd35cf53553a0a3ef949b05106d921446abc3.tar.bz2
net: filter: let unattached filters use sock_fprog_kern
The sk_unattached_filter_create() API is used by BPF filters that are not directly attached or related to sockets, and are used in team, ptp, xt_bpf, cls_bpf, etc. As such all users do their own internal managment of obtaining filter blocks and thus already have them in kernel memory and set up before calling into sk_unattached_filter_create(). As a result, due to __user annotation in sock_fprog, sparse triggers false positives (incorrect type in assignment [different address space]) when filters are set up before passing them to sk_unattached_filter_create(). Therefore, let sk_unattached_filter_create() API use sock_fprog_kern to overcome this issue. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/team/team_mode_loadbalance.c')
-rw-r--r--drivers/net/team/team_mode_loadbalance.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index dbde3412ee5e..0a6ee07bf0af 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -49,7 +49,7 @@ struct lb_port_mapping {
struct lb_priv_ex {
struct team *team;
struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
- struct sock_fprog *orig_fprog;
+ struct sock_fprog_kern *orig_fprog;
struct {
unsigned int refresh_interval; /* in tenths of second */
struct delayed_work refresh_dw;
@@ -241,10 +241,10 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
return 0;
}
-static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
const void *data)
{
- struct sock_fprog *fprog;
+ struct sock_fprog_kern *fprog;
struct sock_filter *filter = (struct sock_filter *) data;
if (data_len % sizeof(struct sock_filter))
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
return 0;
}
-static void __fprog_destroy(struct sock_fprog *fprog)
+static void __fprog_destroy(struct sock_fprog_kern *fprog)
{
kfree(fprog->filter);
kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
struct sk_filter *orig_fp;
- struct sock_fprog *fprog = NULL;
+ struct sock_fprog_kern *fprog = NULL;
int err;
if (ctx->data.bin_val.len) {