From 4212368750cf94623f5b382293625dbb3d8d041a Mon Sep 17 00:00:00 2001 From: Jasmin Jessich Date: Fri, 9 Nov 2018 16:06:05 -0500 Subject: media: Use wait_queue_head_t for media_request The portable type for a wait queue is wait_queue_head_t. Signed-off-by: Jasmin Jessich Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/media-request.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/media-request.h b/include/media/media-request.h index 0ce75c35131f..bd36d7431698 100644 --- a/include/media/media-request.h +++ b/include/media/media-request.h @@ -68,7 +68,7 @@ struct media_request { unsigned int access_count; struct list_head objects; unsigned int num_incomplete_objects; - struct wait_queue_head poll_wait; + wait_queue_head_t poll_wait; spinlock_t lock; }; -- cgit v1.2.3 From 5363857b916c1f48027e9b96ee8be8376bf20811 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 29 Nov 2018 12:05:19 +0100 Subject: ALSA: pcm: Fix interval evaluation with openmin/max As addressed in alsa-lib (commit b420056604f0), we need to fix the case where the evaluation of PCM interval "(x x+1]" leading to -EINVAL. After applying rules, such an interval may be translated as "(x x+1)". Fixes: ff2d6acdf6f1 ("ALSA: pcm: Fix snd_interval_refine first/last with open min/max") Cc: Signed-off-by: Takashi Iwai --- include/sound/pcm_params.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h index 2dd37cada7c0..888a833d3b00 100644 --- a/include/sound/pcm_params.h +++ b/include/sound/pcm_params.h @@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i) static inline int snd_interval_single(const struct snd_interval *i) { return (i->min == i->max || - (i->min + 1 == i->max && i->openmax)); + (i->min + 1 == i->max && (i->openmin || i->openmax))); } static inline int snd_interval_value(const struct snd_interval *i) { + if (i->openmin && !i->openmax) + return i->max; return i->min; } -- cgit v1.2.3 From 90230968f102acbe103fbf7c03d41addfef5f153 Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Thu, 29 Nov 2018 12:00:05 +0200 Subject: net: phy: sfp: correct location of SFP standards SFP standards are now available from the SNIA (Storage Networking Industry Association) website. Cc: Andrew Lunn Cc: Florian Fainelli Signed-off-by: Baruch Siach Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/sfp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d37518e89db2..d9d9de3fcf8e 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -224,7 +224,7 @@ struct sfp_eeprom_ext { * * See the SFF-8472 specification and related documents for the definition * of these structure members. This can be obtained from - * ftp://ftp.seagate.com/sff + * https://www.snia.org/technology-communities/sff/specifications */ struct sfp_eeprom_id { struct sfp_eeprom_base base; -- cgit v1.2.3 From b7df9ada9a7700dbcca1ba53d217c01e3d48179c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 1 Dec 2018 01:18:53 +0100 Subject: bpf: fix pointer offsets in context for 32 bit Currently, pointer offsets in three BPF context structures are broken in two scenarios: i) 32 bit compiled applications running on 64 bit kernels, and ii) LLVM compiled BPF programs running on 32 bit kernels. The latter is due to BPF target machine being strictly 64 bit. So in each of the cases the offsets will mismatch in verifier when checking / rewriting context access. Fix this by providing a helper macro __bpf_md_ptr() that will enforce padding up to 64 bit and proper alignment, and for context access a macro bpf_ctx_range_ptr() which will cover full 64 bit member range on 32 bit archs. For flow_keys, we additionally need to force the size check to sizeof(__u64) as with other pointer types. Fixes: d58e468b1112 ("flow_dissector: implements flow dissector BPF hook") Fixes: 4f738adba30a ("bpf: create tcp_bpf_ulp allowing BPF to monitor socket TX/RX data") Fixes: 2dbb9b9e6df6 ("bpf: Introduce BPF_PROG_TYPE_SK_REUSEPORT") Reported-by: David S. Miller Signed-off-by: Daniel Borkmann Acked-by: David S. Miller Tested-by: David S. Miller Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 7 +++++++ include/uapi/linux/bpf.h | 17 ++++++++++++----- net/core/filter.c | 16 ++++++++-------- tools/include/uapi/linux/bpf.h | 17 ++++++++++++----- 4 files changed, 39 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index 448dcc448f1f..795ff0b869bb 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -449,6 +449,13 @@ struct sock_reuseport; offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 +#if BITS_PER_LONG == 64 +# define bpf_ctx_range_ptr(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 +#else +# define bpf_ctx_range_ptr(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 +#endif /* BITS_PER_LONG == 64 */ #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ ({ \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 852dc17ab47a..426b5c8a245b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2422,6 +2422,12 @@ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6_INLINE }; +#define __bpf_md_ptr(type, name) \ +union { \ + type name; \ + __u64 :64; \ +} __attribute__((aligned(8))) + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -2456,7 +2462,7 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; - struct bpf_flow_keys *flow_keys; + __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); }; struct bpf_tunnel_key { @@ -2572,8 +2578,8 @@ enum sk_action { * be added to the end of this structure */ struct sk_msg_md { - void *data; - void *data_end; + __bpf_md_ptr(void *, data); + __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ @@ -2589,8 +2595,9 @@ struct sk_reuseport_md { * Start of directly accessible data. It begins from * the tcp/udp header. */ - void *data; - void *data_end; /* End of directly accessible data */ + __bpf_md_ptr(void *, data); + /* End of directly accessible data */ + __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) diff --git a/net/core/filter.c b/net/core/filter.c index 9a1327eb25fa..6ee605da990f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5435,8 +5435,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (size != size_default) return false; break; - case bpf_ctx_range(struct __sk_buff, flow_keys): - if (size != sizeof(struct bpf_flow_keys *)) + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): + if (size != sizeof(__u64)) return false; break; default: @@ -5464,7 +5464,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -5489,7 +5489,7 @@ static bool cg_skb_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): @@ -5530,7 +5530,7 @@ static bool lwt_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; } @@ -5756,7 +5756,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -5958,7 +5958,7 @@ static bool sk_skb_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; } @@ -6039,7 +6039,7 @@ static bool flow_dissector_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): info->reg_type = PTR_TO_FLOW_KEYS; break; case bpf_ctx_range(struct __sk_buff, tc_classid): diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 852dc17ab47a..426b5c8a245b 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2422,6 +2422,12 @@ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6_INLINE }; +#define __bpf_md_ptr(type, name) \ +union { \ + type name; \ + __u64 :64; \ +} __attribute__((aligned(8))) + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -2456,7 +2462,7 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; - struct bpf_flow_keys *flow_keys; + __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); }; struct bpf_tunnel_key { @@ -2572,8 +2578,8 @@ enum sk_action { * be added to the end of this structure */ struct sk_msg_md { - void *data; - void *data_end; + __bpf_md_ptr(void *, data); + __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ @@ -2589,8 +2595,9 @@ struct sk_reuseport_md { * Start of directly accessible data. It begins from * the tcp/udp header. */ - void *data; - void *data_end; /* End of directly accessible data */ + __bpf_md_ptr(void *, data); + /* End of directly accessible data */ + __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) -- cgit v1.2.3 From f71c6143c2038df1cb43a4b9c90740d14f77467c Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Fri, 30 Nov 2018 15:32:20 -0800 Subject: bpf: Support sk lookup in netns with id 0 David Ahern and Nicolas Dichtel report that the handling of the netns id 0 is incorrect for the BPF socket lookup helpers: rather than finding the netns with id 0, it is resolving to the current netns. This renders the netns_id 0 inaccessible. To fix this, adjust the API for the netns to treat all negative s32 values as a lookup in the current netns (including u64 values which when truncated to s32 become negative), while any values with a positive value in the signed 32-bit integer space would result in a lookup for a socket in the netns corresponding to that id. As before, if the netns with that ID does not exist, no socket will be found. Any netns outside of these ranges will fail to find a corresponding socket, as those values are reserved for future usage. Signed-off-by: Joe Stringer Acked-by: Nicolas Dichtel Acked-by: Joey Pabalinas Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 35 ++++++++++++-------- net/core/filter.c | 11 ++++--- tools/include/uapi/linux/bpf.h | 39 +++++++++++++++-------- tools/testing/selftests/bpf/bpf_helpers.h | 4 +-- tools/testing/selftests/bpf/test_sk_lookup_kern.c | 18 +++++------ 5 files changed, 63 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 426b5c8a245b..cba518c57229 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2170,7 +2170,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, @@ -2187,12 +2187,14 @@ union bpf_attr { * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * - * If the *netns* is zero, then the socket lookup table in the - * netns associated with the *ctx* will be used. For the TC hooks, - * this in the netns of the device in the skb. For socket hooks, - * this in the netns of the socket. If *netns* is non-zero, then - * it specifies the ID of the netns relative to the netns - * associated with the *ctx*. + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. @@ -2202,7 +2204,7 @@ union bpf_attr { * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. * - * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, @@ -2219,12 +2221,14 @@ union bpf_attr { * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * - * If the *netns* is zero, then the socket lookup table in the - * netns associated with the *ctx* will be used. For the TC hooks, - * this in the netns of the device in the skb. For socket hooks, - * this in the netns of the socket. If *netns* is non-zero, then - * it specifies the ID of the netns relative to the netns - * associated with the *ctx*. + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. @@ -2405,6 +2409,9 @@ enum bpf_func_id { /* BPF_FUNC_perf_event_output for sk_buff input context. */ #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) +/* Current network namespace */ +#define BPF_F_CURRENT_NETNS (-1L) + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, diff --git a/net/core/filter.c b/net/core/filter.c index 6ee605da990f..8d2c629501e2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *net; family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; - if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) + if (unlikely(family == AF_UNSPEC || flags || + !((s32)netns_id < 0 || netns_id <= S32_MAX))) goto out; if (skb->dev) caller_net = dev_net(skb->dev); else caller_net = sock_net(skb->sk); - if (netns_id) { + if ((s32)netns_id < 0) { + net = caller_net; + sk = sk_lookup(net, tuple, skb, family, proto); + } else { net = get_net_ns_by_id(caller_net, netns_id); if (unlikely(!net)) goto out; sk = sk_lookup(net, tuple, skb, family, proto); put_net(net); - } else { - net = caller_net; - sk = sk_lookup(net, tuple, skb, family, proto); } if (sk) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 426b5c8a245b..76b265c7d93e 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2170,7 +2170,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, @@ -2187,12 +2187,14 @@ union bpf_attr { * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * - * If the *netns* is zero, then the socket lookup table in the - * netns associated with the *ctx* will be used. For the TC hooks, - * this in the netns of the device in the skb. For socket hooks, - * this in the netns of the socket. If *netns* is non-zero, then - * it specifies the ID of the netns relative to the netns - * associated with the *ctx*. + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. @@ -2201,8 +2203,10 @@ union bpf_attr { * **CONFIG_NET** configuration option. * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. + * For sockets with reuseport option, *struct bpf_sock* + * return is from reuse->socks[] using hash of the packet. * - * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, @@ -2219,12 +2223,14 @@ union bpf_attr { * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * - * If the *netns* is zero, then the socket lookup table in the - * netns associated with the *ctx* will be used. For the TC hooks, - * this in the netns of the device in the skb. For socket hooks, - * this in the netns of the socket. If *netns* is non-zero, then - * it specifies the ID of the netns relative to the netns - * associated with the *ctx*. + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. @@ -2233,6 +2239,8 @@ union bpf_attr { * **CONFIG_NET** configuration option. * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. + * For sockets with reuseport option, *struct bpf_sock* + * return is from reuse->socks[] using hash of the packet. * * int bpf_sk_release(struct bpf_sock *sk) * Description @@ -2405,6 +2413,9 @@ enum bpf_func_id { /* BPF_FUNC_perf_event_output for sk_buff input context. */ #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) +/* Current network namespace */ +#define BPF_F_CURRENT_NETNS (-1L) + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 686e57ce40f4..efb6c13ab0de 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -154,12 +154,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = (void *) BPF_FUNC_skb_ancestor_cgroup_id; static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, - int size, unsigned int netns_id, + int size, unsigned long long netns_id, unsigned long long flags) = (void *) BPF_FUNC_sk_lookup_tcp; static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, - int size, unsigned int netns_id, + int size, unsigned long long netns_id, unsigned long long flags) = (void *) BPF_FUNC_sk_lookup_udp; static int (*bpf_sk_release)(struct bpf_sock *sk) = diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/test_sk_lookup_kern.c index b745bdc08c2b..e21cd736c196 100644 --- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/test_sk_lookup_kern.c @@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb) return TC_ACT_SHOT; tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); - sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0); + sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0); if (sk) bpf_sk_release(sk); return sk ? TC_ACT_OK : TC_ACT_UNSPEC; @@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb) struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); if (sk) bpf_sk_release(sk); return 0; @@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb) struct bpf_sock *sk; __u32 family = 0; - sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); if (sk) { bpf_sk_release(sk); family = sk->family; @@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb) struct bpf_sock *sk; __u32 family; - sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); if (sk) { sk += 1; bpf_sk_release(sk); @@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) struct bpf_sock *sk; __u32 family; - sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); sk += 1; if (sk) bpf_sk_release(sk); @@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; - bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); return 0; } @@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb) struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); bpf_sk_release(sk); bpf_sk_release(sk); return 0; @@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb) struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); bpf_sk_release(sk); return 0; } @@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb) void lookup_no_release(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; - bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); } SEC("fail_no_release_subcall") -- cgit v1.2.3 From d74286d2c25ad29dbf9e342955dd8dc31f21653b Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Fri, 30 Nov 2018 15:32:21 -0800 Subject: bpf: Improve socket lookup reuseport documentation Improve the wording around socket lookup for reuseport sockets, and ensure that both bpf.h headers are in sync. Signed-off-by: Joe Stringer Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 4 ++++ tools/include/uapi/linux/bpf.h | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index cba518c57229..72c453a8bf50 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2203,6 +2203,8 @@ union bpf_attr { * **CONFIG_NET** configuration option. * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. + * For sockets with reuseport option, the *struct bpf_sock* + * result is from reuse->socks[] using the hash of the tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description @@ -2237,6 +2239,8 @@ union bpf_attr { * **CONFIG_NET** configuration option. * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. + * For sockets with reuseport option, the *struct bpf_sock* + * result is from reuse->socks[] using the hash of the tuple. * * int bpf_sk_release(struct bpf_sock *sk) * Description diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 76b265c7d93e..72c453a8bf50 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2203,8 +2203,8 @@ union bpf_attr { * **CONFIG_NET** configuration option. * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. - * For sockets with reuseport option, *struct bpf_sock* - * return is from reuse->socks[] using hash of the packet. + * For sockets with reuseport option, the *struct bpf_sock* + * result is from reuse->socks[] using the hash of the tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description @@ -2239,8 +2239,8 @@ union bpf_attr { * **CONFIG_NET** configuration option. * Return * Pointer to *struct bpf_sock*, or NULL in case of failure. - * For sockets with reuseport option, *struct bpf_sock* - * return is from reuse->socks[] using hash of the packet. + * For sockets with reuseport option, the *struct bpf_sock* + * result is from reuse->socks[] using the hash of the tuple. * * int bpf_sk_release(struct bpf_sock *sk) * Description -- cgit v1.2.3 From 71700bb96047f68a0aae3932466fc7c9ad5ce6c0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 30 Nov 2018 16:11:15 -0500 Subject: SUNRPC: Fix a memory leak in call_encode() If we retransmit an RPC request, we currently end up clobbering the value of req->rq_rcv_buf.bvec that was allocated by the initial call to xprt_request_prepare(req). Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xdr.h | 1 - net/sunrpc/clnt.c | 1 + net/sunrpc/xprt.c | 2 ++ 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 43106ffa6788..2ec128060239 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; - buf->bvec = NULL; buf->pages = NULL; buf->page_len = 0; buf->flags = 0; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index e35d642558e7..c6782aa47525 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2309,6 +2309,7 @@ out_retry: task->tk_status = 0; /* Note: rpc_verify_header() may have freed the RPC slot */ if (task->tk_rqstp == req) { + xdr_free_bvec(&req->rq_rcv_buf); req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(req->rq_xprt, diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 86bea4520c4d..122c91c28e7c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1623,6 +1623,8 @@ xprt_request_init(struct rpc_task *task) req->rq_snd_buf.buflen = 0; req->rq_rcv_buf.len = 0; req->rq_rcv_buf.buflen = 0; + req->rq_snd_buf.bvec = NULL; + req->rq_rcv_buf.bvec = NULL; req->rq_release_snd_buf = NULL; xprt_reset_majortimeo(req); dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, -- cgit v1.2.3 From 37c2578c0c40e286bc0d30bdc05290b2058cf66e Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Mon, 3 Dec 2018 00:54:35 +0000 Subject: Drivers: hv: vmbus: Offload the handling of channels to two workqueues vmbus_process_offer() mustn't call channel->sc_creation_callback() directly for sub-channels, because sc_creation_callback() -> vmbus_open() may never get the host's response to the OPEN_CHANNEL message (the host may rescind a channel at any time, e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind() may not wake up the vmbus_open() as it's blocked due to a non-zero vmbus_connection.offer_in_progress, and finally we have a deadlock. The above is also true for primary channels, if the related device drivers use sync probing mode by default. And, usually the handling of primary channels and sub-channels can depend on each other, so we should offload them to different workqueues to avoid possible deadlock, e.g. in sync-probing mode, NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() -> rtnl_lock(), and causes deadlock: the former gets the rtnl_lock and waits for all the sub-channels to appear, but the latter can't get the rtnl_lock and this blocks the handling of sub-channels. The patch can fix the multiple-NIC deadlock described above for v3.x kernels (e.g. RHEL 7.x) which don't support async-probing of devices, and v4.4, v4.9, v4.14 and v4.18 which support async-probing but don't enable async-probing for Hyper-V drivers (yet). The patch can also fix the hang issue in sub-channel's handling described above for all versions of kernels, including v4.19 and v4.20-rc4. So actually the patch should be applied to all the existing kernels, not only the kernels that have 8195b1396ec8. Fixes: 8195b1396ec8 ("hv_netvsc: fix deadlock on hotplug") Cc: stable@vger.kernel.org Cc: Stephen Hemminger Cc: K. Y. Srinivasan Cc: Haiyang Zhang Signed-off-by: Dexuan Cui Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel_mgmt.c | 189 ++++++++++++++++++++++++++++++---------------- drivers/hv/connection.c | 24 +++++- drivers/hv/hyperv_vmbus.h | 7 ++ include/linux/hyperv.h | 7 ++ 4 files changed, 161 insertions(+), 66 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6277597d3d58..edd34c167a9b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -435,61 +435,16 @@ void vmbus_free_channels(void) } } -/* - * vmbus_process_offer - Process the offer by creating a channel/device - * associated with this offer - */ -static void vmbus_process_offer(struct vmbus_channel *newchannel) +/* Note: the function can run concurrently for primary/sub channels. */ +static void vmbus_add_channel_work(struct work_struct *work) { - struct vmbus_channel *channel; - bool fnew = true; + struct vmbus_channel *newchannel = + container_of(work, struct vmbus_channel, add_channel_work); + struct vmbus_channel *primary_channel = newchannel->primary_channel; unsigned long flags; u16 dev_type; int ret; - /* Make sure this is a new offer */ - mutex_lock(&vmbus_connection.channel_mutex); - - /* - * Now that we have acquired the channel_mutex, - * we can release the potentially racing rescind thread. - */ - atomic_dec(&vmbus_connection.offer_in_progress); - - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (!uuid_le_cmp(channel->offermsg.offer.if_type, - newchannel->offermsg.offer.if_type) && - !uuid_le_cmp(channel->offermsg.offer.if_instance, - newchannel->offermsg.offer.if_instance)) { - fnew = false; - break; - } - } - - if (fnew) - list_add_tail(&newchannel->listentry, - &vmbus_connection.chn_list); - - mutex_unlock(&vmbus_connection.channel_mutex); - - if (!fnew) { - /* - * Check to see if this is a sub-channel. - */ - if (newchannel->offermsg.offer.sub_channel_index != 0) { - /* - * Process the sub-channel. - */ - newchannel->primary_channel = channel; - spin_lock_irqsave(&channel->lock, flags); - list_add_tail(&newchannel->sc_list, &channel->sc_list); - channel->num_sc++; - spin_unlock_irqrestore(&channel->lock, flags); - } else { - goto err_free_chan; - } - } - dev_type = hv_get_dev_type(newchannel); init_vp_index(newchannel, dev_type); @@ -507,27 +462,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) /* * This state is used to indicate a successful open * so that when we do close the channel normally, we - * can cleanup properly + * can cleanup properly. */ newchannel->state = CHANNEL_OPEN_STATE; - if (!fnew) { - struct hv_device *dev - = newchannel->primary_channel->device_obj; + if (primary_channel != NULL) { + /* newchannel is a sub-channel. */ + struct hv_device *dev = primary_channel->device_obj; if (vmbus_add_channel_kobj(dev, newchannel)) - goto err_free_chan; + goto err_deq_chan; + + if (primary_channel->sc_creation_callback != NULL) + primary_channel->sc_creation_callback(newchannel); - if (channel->sc_creation_callback != NULL) - channel->sc_creation_callback(newchannel); newchannel->probe_done = true; return; } /* - * Start the process of binding this offer to the driver - * We need to set the DeviceObject field before calling - * vmbus_child_dev_add() + * Start the process of binding the primary channel to the driver */ newchannel->device_obj = vmbus_device_create( &newchannel->offermsg.offer.if_type, @@ -556,13 +510,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) err_deq_chan: mutex_lock(&vmbus_connection.channel_mutex); - list_del(&newchannel->listentry); + + /* + * We need to set the flag, otherwise + * vmbus_onoffer_rescind() can be blocked. + */ + newchannel->probe_done = true; + + if (primary_channel == NULL) { + list_del(&newchannel->listentry); + } else { + spin_lock_irqsave(&primary_channel->lock, flags); + list_del(&newchannel->sc_list); + spin_unlock_irqrestore(&primary_channel->lock, flags); + } + mutex_unlock(&vmbus_connection.channel_mutex); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, - percpu_channel_deq, newchannel, true); + percpu_channel_deq, + newchannel, true); } else { percpu_channel_deq(newchannel); put_cpu(); @@ -570,14 +539,104 @@ err_deq_chan: vmbus_release_relid(newchannel->offermsg.child_relid); -err_free_chan: free_channel(newchannel); } +/* + * vmbus_process_offer - Process the offer by creating a channel/device + * associated with this offer + */ +static void vmbus_process_offer(struct vmbus_channel *newchannel) +{ + struct vmbus_channel *channel; + struct workqueue_struct *wq; + unsigned long flags; + bool fnew = true; + + mutex_lock(&vmbus_connection.channel_mutex); + + /* + * Now that we have acquired the channel_mutex, + * we can release the potentially racing rescind thread. + */ + atomic_dec(&vmbus_connection.offer_in_progress); + + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { + if (!uuid_le_cmp(channel->offermsg.offer.if_type, + newchannel->offermsg.offer.if_type) && + !uuid_le_cmp(channel->offermsg.offer.if_instance, + newchannel->offermsg.offer.if_instance)) { + fnew = false; + break; + } + } + + if (fnew) + list_add_tail(&newchannel->listentry, + &vmbus_connection.chn_list); + else { + /* + * Check to see if this is a valid sub-channel. + */ + if (newchannel->offermsg.offer.sub_channel_index == 0) { + mutex_unlock(&vmbus_connection.channel_mutex); + /* + * Don't call free_channel(), because newchannel->kobj + * is not initialized yet. + */ + kfree(newchannel); + WARN_ON_ONCE(1); + return; + } + /* + * Process the sub-channel. + */ + newchannel->primary_channel = channel; + spin_lock_irqsave(&channel->lock, flags); + list_add_tail(&newchannel->sc_list, &channel->sc_list); + spin_unlock_irqrestore(&channel->lock, flags); + } + + mutex_unlock(&vmbus_connection.channel_mutex); + + /* + * vmbus_process_offer() mustn't call channel->sc_creation_callback() + * directly for sub-channels, because sc_creation_callback() -> + * vmbus_open() may never get the host's response to the + * OPEN_CHANNEL message (the host may rescind a channel at any time, + * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind() + * may not wake up the vmbus_open() as it's blocked due to a non-zero + * vmbus_connection.offer_in_progress, and finally we have a deadlock. + * + * The above is also true for primary channels, if the related device + * drivers use sync probing mode by default. + * + * And, usually the handling of primary channels and sub-channels can + * depend on each other, so we should offload them to different + * workqueues to avoid possible deadlock, e.g. in sync-probing mode, + * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() -> + * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock + * and waits for all the sub-channels to appear, but the latter + * can't get the rtnl_lock and this blocks the handling of + * sub-channels. + */ + INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work); + wq = fnew ? vmbus_connection.handle_primary_chan_wq : + vmbus_connection.handle_sub_chan_wq; + queue_work(wq, &newchannel->add_channel_work); +} + /* * We use this state to statically distribute the channel interrupt load. */ static int next_numa_node_id; +/* + * init_vp_index() accesses global variables like next_numa_node_id, and + * it can run concurrently for primary channels and sub-channels: see + * vmbus_process_offer(), so we need the lock to protect the global + * variables. + */ +static DEFINE_SPINLOCK(bind_channel_to_cpu_lock); /* * Starting with Win8, we can statically distribute the incoming @@ -613,6 +672,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) return; } + spin_lock(&bind_channel_to_cpu_lock); + /* * Based on the channel affinity policy, we will assign the NUMA * nodes. @@ -695,6 +756,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) channel->target_cpu = cur_cpu; channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); + spin_unlock(&bind_channel_to_cpu_lock); + free_cpumask_var(available_mask); } diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index f4d08c8ac7f8..4fe117b761ce 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -190,6 +190,20 @@ int vmbus_connect(void) goto cleanup; } + vmbus_connection.handle_primary_chan_wq = + create_workqueue("hv_pri_chan"); + if (!vmbus_connection.handle_primary_chan_wq) { + ret = -ENOMEM; + goto cleanup; + } + + vmbus_connection.handle_sub_chan_wq = + create_workqueue("hv_sub_chan"); + if (!vmbus_connection.handle_sub_chan_wq) { + ret = -ENOMEM; + goto cleanup; + } + INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); spin_lock_init(&vmbus_connection.channelmsg_lock); @@ -280,10 +294,14 @@ void vmbus_disconnect(void) */ vmbus_initiate_unload(false); - if (vmbus_connection.work_queue) { - drain_workqueue(vmbus_connection.work_queue); + if (vmbus_connection.handle_sub_chan_wq) + destroy_workqueue(vmbus_connection.handle_sub_chan_wq); + + if (vmbus_connection.handle_primary_chan_wq) + destroy_workqueue(vmbus_connection.handle_primary_chan_wq); + + if (vmbus_connection.work_queue) destroy_workqueue(vmbus_connection.work_queue); - } if (vmbus_connection.int_page) { free_pages((unsigned long)vmbus_connection.int_page, 0); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 72eaba3d50fc..87d3d7da78f8 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -335,7 +335,14 @@ struct vmbus_connection { struct list_head chn_list; struct mutex channel_mutex; + /* + * An offer message is handled first on the work_queue, and then + * is further handled on handle_primary_chan_wq or + * handle_sub_chan_wq. + */ struct workqueue_struct *work_queue; + struct workqueue_struct *handle_primary_chan_wq; + struct workqueue_struct *handle_sub_chan_wq; }; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b3e24368930a..14131b6fae68 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -905,6 +905,13 @@ struct vmbus_channel { bool probe_done; + /* + * We must offload the handling of the primary/sub channels + * from the single-threaded vmbus_connection.work_queue to + * two different workqueue, otherwise we can block + * vmbus_connection.work_queue and hang: see vmbus_process_offer(). + */ + struct work_struct add_channel_work; }; static inline bool is_hvsock_channel(const struct vmbus_channel *c) -- cgit v1.2.3 From 6093d3002eabd7c2913d97f1d1f4ce34b072acf9 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 28 Nov 2018 03:37:45 -0500 Subject: media: vb2: keep a reference to the request until dqbuf When vb2_buffer_done is called the buffer is unbound from the request and put. The media_request_object_put also 'put's the request reference. If the application has already closed the request fd, then that means that the request reference at that point goes to 0 and the whole request is released. This means that the control handler associated with the request is also freed and that causes this kernel oops: [174705.995401] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:908 [174705.995411] in_atomic(): 1, irqs_disabled(): 1, pid: 28071, name: vivid-000-vid-o [174705.995416] 2 locks held by vivid-000-vid-o/28071: [174705.995420] #0: 000000001ea3a232 (&dev->mutex#3){....}, at: vivid_thread_vid_out+0x3f5/0x550 [vivid] [174705.995447] #1: 00000000e30a0d1e (&(&q->done_lock)->rlock){....}, at: vb2_buffer_done+0x92/0x1d0 [videobuf2_common] [174705.995460] Preemption disabled at: [174705.995461] [<0000000000000000>] (null) [174705.995472] CPU: 11 PID: 28071 Comm: vivid-000-vid-o Tainted: G W 4.20.0-rc1-test-no #88 [174705.995476] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/19/2017 [174705.995481] Call Trace: [174705.995500] dump_stack+0x46/0x60 [174705.995512] ___might_sleep.cold.79+0xe1/0xf1 [174705.995523] __mutex_lock+0x50/0x8f0 [174705.995531] ? find_held_lock+0x2d/0x90 [174705.995536] ? find_held_lock+0x2d/0x90 [174705.995542] ? find_held_lock+0x2d/0x90 [174705.995564] ? v4l2_ctrl_handler_free.part.13+0x44/0x1d0 [videodev] [174705.995576] v4l2_ctrl_handler_free.part.13+0x44/0x1d0 [videodev] [174705.995590] v4l2_ctrl_request_release+0x1c/0x30 [videodev] [174705.995600] media_request_clean+0x64/0xe0 [media] [174705.995609] media_request_release+0x19/0x40 [media] [174705.995617] vb2_buffer_done+0xef/0x1d0 [videobuf2_common] [174705.995630] vivid_thread_vid_out+0x2c1/0x550 [vivid] [174705.995645] ? vivid_stop_generating_vid_cap+0x1c0/0x1c0 [vivid] [174705.995653] kthread+0x113/0x130 [174705.995659] ? kthread_park+0x80/0x80 [174705.995667] ret_from_fork+0x35/0x40 The vb2_buffer_done function can be called from interrupt context, so anything that sleeps is not allowed. The solution is to increment the request refcount when the buffer is queued and decrement it when the buffer is dequeued. Releasing the request is fine if that happens from VIDIOC_DQBUF. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-core.c | 38 +++++++++++++++++++++---- include/media/videobuf2-core.h | 2 ++ 2 files changed, 35 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index e006698807fa..a6dfb0860558 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj) { struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); - if (vb->state == VB2_BUF_STATE_IN_REQUEST) + if (vb->state == VB2_BUF_STATE_IN_REQUEST) { vb->state = VB2_BUF_STATE_DEQUEUED; + if (vb->request) + media_request_put(vb->request); + vb->request = NULL; + } } static const struct media_request_object_ops vb2_core_req_ops = { @@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, return ret; vb->state = VB2_BUF_STATE_IN_REQUEST; + + /* + * Increment the refcount and store the request. + * The request refcount is decremented again when the + * buffer is dequeued. This is to prevent vb2_buffer_done() + * from freeing the request from interrupt context, which can + * happen if the application closed the request fd after + * queueing the request. + */ + media_request_get(req); + vb->request = req; + /* Fill buffer information for the userspace */ if (pb) { call_void_bufop(q, copy_timestamp, vb, pb); @@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb) call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); vb->planes[i].dbuf_mapped = 0; } - if (vb->req_obj.req) { - media_request_object_unbind(&vb->req_obj); - media_request_object_put(&vb->req_obj); - } call_void_bufop(q, init_buffer, vb); } @@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, /* go back to dequeued state */ __vb2_dqbuf(vb); + if (WARN_ON(vb->req_obj.req)) { + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + if (vb->request) + media_request_put(vb->request); + vb->request = NULL; + dprintk(2, "dqbuf of buffer %d, with state %d\n", vb->index, vb->state); @@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q) vb->prepared = false; } __vb2_dqbuf(vb); + + if (vb->req_obj.req) { + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + if (vb->request) + media_request_put(vb->request); + vb->request = NULL; } } diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index e86981d615ae..4a737b2c610b 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -239,6 +239,7 @@ struct vb2_queue; * @num_planes: number of planes in the buffer * on an internal driver queue. * @timestamp: frame timestamp in ns. + * @request: the request this buffer is associated with. * @req_obj: used to bind this buffer to a request. This * request object has a refcount. */ @@ -249,6 +250,7 @@ struct vb2_buffer { unsigned int memory; unsigned int num_planes; u64 timestamp; + struct media_request *request; struct media_request_object req_obj; /* private: internal use only -- cgit v1.2.3 From fb6df5a6234c38a9c551559506a49a677ac6f07a Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 1 Dec 2018 01:36:59 +0800 Subject: sctp: kfree_rcu asoc In sctp_hash_transport/sctp_epaddr_lookup_transport, it dereferences a transport's asoc under rcu_read_lock while asoc is freed not after a grace period, which leads to a use-after-free panic. This patch fixes it by calling kfree_rcu to make asoc be freed after a grace period. Note that only the asoc's memory is delayed to free in the patch, it won't cause sk to linger longer. Thanks Neil and Marcelo to make this clear. Fixes: 7fda702f9315 ("sctp: use new rhlist interface on sctp transport rhashtable") Fixes: cd2b70875058 ("sctp: check duplicate node before inserting a new transport") Reported-by: syzbot+0b05d8aa7cb185107483@syzkaller.appspotmail.com Reported-by: syzbot+aad231d51b1923158444@syzkaller.appspotmail.com Suggested-by: Neil Horman Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 2 ++ net/sctp/associola.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a11f93790476..feada358d872 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -2075,6 +2075,8 @@ struct sctp_association { __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; + + struct rcu_head rcu; }; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index dd77ec3892b6..914750b819b2 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -435,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc) WARN_ON(atomic_read(&asoc->rmem_alloc)); - kfree(asoc); + kfree_rcu(asoc, rcu); SCTP_DBG_OBJCNT_DEC(assoc); } -- cgit v1.2.3 From 27359fd6e5f3c5db8fe544b63238b6170e8806d8 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 30 Nov 2018 11:05:06 -0500 Subject: dax: Fix unlock mismatch with updated API Internal to dax_unlock_mapping_entry(), dax_unlock_entry() is used to store a replacement entry in the Xarray at the given xas-index with the DAX_LOCKED bit clear. When called, dax_unlock_entry() expects the unlocked value of the entry relative to the current Xarray state to be specified. In most contexts dax_unlock_entry() is operating in the same scope as the matched dax_lock_entry(). However, in the dax_unlock_mapping_entry() case the implementation needs to recall the original entry. In the case where the original entry is a 'pmd' entry it is possible that the pfn performed to do the lookup is misaligned to the value retrieved in the Xarray. Change the api to return the unlock cookie from dax_lock_page() and pass it to dax_unlock_page(). This fixes a bug where dax_unlock_page() was assuming that the page was PMD-aligned if the entry was a PMD entry with signatures like: WARNING: CPU: 38 PID: 1396 at fs/dax.c:340 dax_insert_entry+0x2b2/0x2d0 RIP: 0010:dax_insert_entry+0x2b2/0x2d0 [..] Call Trace: dax_iomap_pte_fault.isra.41+0x791/0xde0 ext4_dax_huge_fault+0x16f/0x1f0 ? up_read+0x1c/0xa0 __do_fault+0x1f/0x160 __handle_mm_fault+0x1033/0x1490 handle_mm_fault+0x18b/0x3d0 Link: https://lkml.kernel.org/r/20181130154902.GL10377@bombadil.infradead.org Fixes: 9f32d221301c ("dax: Convert dax_lock_mapping_entry to XArray") Reported-by: Dan Williams Signed-off-by: Matthew Wilcox Tested-by: Dan Williams Reviewed-by: Jan Kara Signed-off-by: Dan Williams --- fs/dax.c | 21 ++++++++------------- include/linux/dax.h | 14 ++++++++------ mm/memory-failure.c | 6 ++++-- 3 files changed, 20 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/fs/dax.c b/fs/dax.c index 3f592dc18d67..48132eca3761 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -379,20 +379,20 @@ static struct page *dax_busy_page(void *entry) * @page: The page whose entry we want to lock * * Context: Process context. - * Return: %true if the entry was locked or does not need to be locked. + * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could + * not be locked. */ -bool dax_lock_mapping_entry(struct page *page) +dax_entry_t dax_lock_page(struct page *page) { XA_STATE(xas, NULL, 0); void *entry; - bool locked; /* Ensure page->mapping isn't freed while we look at it */ rcu_read_lock(); for (;;) { struct address_space *mapping = READ_ONCE(page->mapping); - locked = false; + entry = NULL; if (!mapping || !dax_mapping(mapping)) break; @@ -403,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page) * otherwise we would not have a valid pfn_to_page() * translation. */ - locked = true; + entry = (void *)~0UL; if (S_ISCHR(mapping->host->i_mode)) break; @@ -426,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page) break; } rcu_read_unlock(); - return locked; + return (dax_entry_t)entry; } -void dax_unlock_mapping_entry(struct page *page) +void dax_unlock_page(struct page *page, dax_entry_t cookie) { struct address_space *mapping = page->mapping; XA_STATE(xas, &mapping->i_pages, page->index); - void *entry; if (S_ISCHR(mapping->host->i_mode)) return; - rcu_read_lock(); - entry = xas_load(&xas); - rcu_read_unlock(); - entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry)); - dax_unlock_entry(&xas, entry); + dax_unlock_entry(&xas, (void *)cookie); } /* diff --git a/include/linux/dax.h b/include/linux/dax.h index 450b28db9533..0dd316a74a29 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -7,6 +7,8 @@ #include #include +typedef unsigned long dax_entry_t; + struct iomap_ops; struct dax_device; struct dax_operations { @@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct block_device *bdev, struct writeback_control *wbc); struct page *dax_layout_busy_page(struct address_space *mapping); -bool dax_lock_mapping_entry(struct page *page); -void dax_unlock_mapping_entry(struct page *page); +dax_entry_t dax_lock_page(struct page *page); +void dax_unlock_page(struct page *page, dax_entry_t cookie); #else static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) @@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, return -EOPNOTSUPP; } -static inline bool dax_lock_mapping_entry(struct page *page) +static inline dax_entry_t dax_lock_page(struct page *page) { if (IS_DAX(page->mapping->host)) - return true; - return false; + return ~0UL; + return 0; } -static inline void dax_unlock_mapping_entry(struct page *page) +static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) { } #endif diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 0cd3de3550f0..7c72f2a95785 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, LIST_HEAD(tokill); int rc = -EBUSY; loff_t start; + dax_entry_t cookie; /* * Prevent the inode from being freed while we are interrogating @@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, * also prevents changes to the mapping of this pfn until * poison signaling is complete. */ - if (!dax_lock_mapping_entry(page)) + cookie = dax_lock_page(page); + if (!cookie) goto out; if (hwpoison_filter(page)) { @@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); rc = 0; unlock: - dax_unlock_mapping_entry(page); + dax_unlock_page(page, cookie); out: /* drop pgmap ref acquired in caller */ put_dev_pagemap(pgmap); -- cgit v1.2.3 From f51ccf46217c28758b1f3b5bc0ccfc00eca658b2 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 4 Dec 2018 17:00:36 +0100 Subject: USB: serial: console: fix reported terminal settings The USB-serial console implementation has never reported the actual terminal settings used. Despite storing the corresponding cflags in its struct console, these were never honoured on later tty open() where the tty termios would be left initialised to the driver defaults. Unlike the serial console implementation, the USB-serial code calls subdriver open() already at console setup. While calling set_termios() and write() before open() looks like it could work for some USB-serial drivers, others definitely do not expect this, so modelling this after serial core is going to be intrusive, if at all possible. Instead, use a (renamed) tty helper to save the termios data used at console setup so that the tty termios reflects the actual terminal settings after a subsequent tty open(). Note that the calls to tty_init_termios() (tty_driver_install()) and tty_save_termios() are serialised using the disconnect mutex. This specifically fixes a regression that was triggered by a recent change adding software flow control to the pl2303 driver: a getty trying to disable flow control while leaving the baud rate unchanged would now also set the baud rate to the driver default (prior to the flow-control change this had been a noop). Fixes: 7041d9c3f01b ("USB: serial: pl2303: add support for tx xon/xoff flow control") Cc: stable # 4.18 Cc: Florian Zumbiehl Reported-by: Jarkko Nikula Tested-by: Jarkko Nikula Acked-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- drivers/tty/tty_io.c | 11 +++++++++-- drivers/usb/serial/console.c | 2 +- include/linux/tty.h | 1 + 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index ee80dfbd5442..687250ec8032 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1373,7 +1373,13 @@ err_release_lock: return ERR_PTR(retval); } -static void tty_free_termios(struct tty_struct *tty) +/** + * tty_save_termios() - save tty termios data in driver table + * @tty: tty whose termios data to save + * + * Locking: Caller guarantees serialisation with tty_init_termios(). + */ +void tty_save_termios(struct tty_struct *tty) { struct ktermios *tp; int idx = tty->index; @@ -1392,6 +1398,7 @@ static void tty_free_termios(struct tty_struct *tty) } *tp = tty->termios; } +EXPORT_SYMBOL_GPL(tty_save_termios); /** * tty_flush_works - flush all works of a tty/pty pair @@ -1491,7 +1498,7 @@ static void release_tty(struct tty_struct *tty, int idx) WARN_ON(!mutex_is_locked(&tty_mutex)); if (tty->ops->shutdown) tty->ops->shutdown(tty); - tty_free_termios(tty); + tty_save_termios(tty); tty_driver_remove_tty(tty->driver, tty); tty->port->itty = NULL; if (tty->link) diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 17940589c647..7d289302ff6c 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options) cflag |= PARENB; break; } - co->cflag = cflag; /* * no need to check the index here: if the index is wrong, console @@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options) serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); + tty_save_termios(tty); tty_kref_put(tty); } tty_port_set_initialized(&port->port, 1); diff --git a/include/linux/tty.h b/include/linux/tty.h index 414db2bce715..392138fe59b6 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); extern void tty_release_struct(struct tty_struct *tty, int idx); extern int tty_release(struct inode *inode, struct file *filp); extern void tty_init_termios(struct tty_struct *tty); +extern void tty_save_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); -- cgit v1.2.3 From 9514063498cbff9a351f4bc25e1b1bfca8eccb7a Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 5 Dec 2018 06:28:20 -0500 Subject: media: mpeg2-ctrls.h: move MPEG2 state controls to non-public header The MPEG2 state controls for the cedrus stateless MPEG2 driver are not yet stable. Move them out of the public headers into media/mpeg2-ctrls.h. Eventually, once this has stabilized, they will be moved back to the public headers. Unfortunately I had to cast the control type to a u32 in two switch statements to prevent a compiler warning about a control type define not being part of the enum. Signed-off-by: Hans Verkuil Reviewed-by: Paul Kocialkowski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/v4l2-core/v4l2-ctrls.c | 4 +- include/media/mpeg2-ctrls.h | 86 ++++++++++++++++++++++++++++++++++++ include/media/v4l2-ctrls.h | 6 +++ include/uapi/linux/v4l2-controls.h | 68 ---------------------------- include/uapi/linux/videodev2.h | 4 -- 5 files changed, 94 insertions(+), 74 deletions(-) create mode 100644 include/media/mpeg2-ctrls.h (limited to 'include') diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 5f2b033a7a42..10b8d94edbef 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, u64 offset; s64 val; - switch (ctrl->type) { + switch ((u32)ctrl->type) { case V4L2_CTRL_TYPE_INTEGER: return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); case V4L2_CTRL_TYPE_INTEGER64: @@ -2232,7 +2232,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, is_array = nr_of_dims > 0; /* Prefill elem_size for all types handled by std_type_ops */ - switch (type) { + switch ((u32)type) { case V4L2_CTRL_TYPE_INTEGER64: elem_size = sizeof(s64); break; diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h new file mode 100644 index 000000000000..d21f40edc09e --- /dev/null +++ b/include/media/mpeg2-ctrls.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * These are the MPEG2 state controls for use with stateless MPEG-2 + * codec drivers. + * + * It turns out that these structs are not stable yet and will undergo + * more changes. So keep them private until they are stable and ready to + * become part of the official public API. + */ + +#ifndef _MPEG2_CTRLS_H_ +#define _MPEG2_CTRLS_H_ + +#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) +#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) + +/* enum v4l2_ctrl_type type values */ +#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103 +#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104 + +#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 +#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 +#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 +#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 + +struct v4l2_mpeg2_sequence { + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ + __u16 horizontal_size; + __u16 vertical_size; + __u32 vbv_buffer_size; + + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ + __u8 profile_and_level_indication; + __u8 progressive_sequence; + __u8 chroma_format; + __u8 pad; +}; + +struct v4l2_mpeg2_picture { + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ + __u8 picture_coding_type; + + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ + __u8 f_code[2][2]; + __u8 intra_dc_precision; + __u8 picture_structure; + __u8 top_field_first; + __u8 frame_pred_frame_dct; + __u8 concealment_motion_vectors; + __u8 q_scale_type; + __u8 intra_vlc_format; + __u8 alternate_scan; + __u8 repeat_first_field; + __u8 progressive_frame; + __u8 pad; +}; + +struct v4l2_ctrl_mpeg2_slice_params { + __u32 bit_size; + __u32 data_bit_offset; + + struct v4l2_mpeg2_sequence sequence; + struct v4l2_mpeg2_picture picture; + + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ + __u8 quantiser_scale_code; + + __u8 backward_ref_index; + __u8 forward_ref_index; + __u8 pad; +}; + +struct v4l2_ctrl_mpeg2_quantization { + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ + __u8 load_intra_quantiser_matrix; + __u8 load_non_intra_quantiser_matrix; + __u8 load_chroma_intra_quantiser_matrix; + __u8 load_chroma_non_intra_quantiser_matrix; + + __u8 intra_quantiser_matrix[64]; + __u8 non_intra_quantiser_matrix[64]; + __u8 chroma_intra_quantiser_matrix[64]; + __u8 chroma_non_intra_quantiser_matrix[64]; +}; + +#endif diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 83ce0593b275..d63cf227b0ab 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -22,6 +22,12 @@ #include #include +/* + * Include the mpeg2 stateless codec compound control definitions. + * This will move to the public headers once this API is fully stable. + */ +#include + /* forward references */ struct file; struct v4l2_ctrl_handler; diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 998983a6e6b7..3dcfc6148f99 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -404,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) -#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) -#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) - #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) @@ -1097,69 +1094,4 @@ enum v4l2_detect_md_mode { #define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) #define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) -#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 - -struct v4l2_mpeg2_sequence { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ - __u16 horizontal_size; - __u16 vertical_size; - __u32 vbv_buffer_size; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ - __u8 profile_and_level_indication; - __u8 progressive_sequence; - __u8 chroma_format; - __u8 pad; -}; - -struct v4l2_mpeg2_picture { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ - __u8 picture_coding_type; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ - __u8 f_code[2][2]; - __u8 intra_dc_precision; - __u8 picture_structure; - __u8 top_field_first; - __u8 frame_pred_frame_dct; - __u8 concealment_motion_vectors; - __u8 q_scale_type; - __u8 intra_vlc_format; - __u8 alternate_scan; - __u8 repeat_first_field; - __u8 progressive_frame; - __u8 pad; -}; - -struct v4l2_ctrl_mpeg2_slice_params { - __u32 bit_size; - __u32 data_bit_offset; - - struct v4l2_mpeg2_sequence sequence; - struct v4l2_mpeg2_picture picture; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ - __u8 quantiser_scale_code; - - __u8 backward_ref_index; - __u8 forward_ref_index; - __u8 pad; -}; - -struct v4l2_ctrl_mpeg2_quantization { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ - __u8 load_intra_quantiser_matrix; - __u8 load_non_intra_quantiser_matrix; - __u8 load_chroma_intra_quantiser_matrix; - __u8 load_chroma_non_intra_quantiser_matrix; - - __u8 intra_quantiser_matrix[64]; - __u8 non_intra_quantiser_matrix[64]; - __u8 chroma_intra_quantiser_matrix[64]; - __u8 chroma_non_intra_quantiser_matrix[64]; -}; - #endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index c8e8ff810190..2ba2ad0e23fb 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1622,8 +1622,6 @@ struct v4l2_ext_control { __u8 __user *p_u8; __u16 __user *p_u16; __u32 __user *p_u32; - struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params; - struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization; void __user *ptr; }; } __attribute__ ((packed)); @@ -1669,8 +1667,6 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_U8 = 0x0100, V4L2_CTRL_TYPE_U16 = 0x0101, V4L2_CTRL_TYPE_U32 = 0x0102, - V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103, - V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104, }; /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ -- cgit v1.2.3 From 704620afc70cf47abb9d6a1a57f3825d2bca49cf Mon Sep 17 00:00:00 2001 From: Mathias Payer Date: Wed, 5 Dec 2018 21:19:59 +0100 Subject: USB: check usb_get_extra_descriptor for proper size When reading an extra descriptor, we need to properly check the minimum and maximum size allowed, to prevent from invalid data being sent by a device. Reported-by: Hui Peng Reported-by: Mathias Payer Co-developed-by: Linus Torvalds Signed-off-by: Hui Peng Signed-off-by: Mathias Payer Signed-off-by: Linus Torvalds Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 2 +- drivers/usb/core/usb.c | 6 +++--- drivers/usb/host/hwa-hc.c | 2 +- include/linux/usb.h | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 528664059a12..f76b2e0aba9d 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2251,7 +2251,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev) /* descriptor may appear anywhere in config */ err = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), - USB_DT_OTG, (void **) &desc); + USB_DT_OTG, (void **) &desc, sizeof(*desc)); if (err || !(desc->bmAttributes & USB_OTG_HNP)) return 0; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 79d8bd7a612e..4ebfbd737905 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -832,14 +832,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number); */ int __usb_get_extra_descriptor(char *buffer, unsigned size, - unsigned char type, void **ptr) + unsigned char type, void **ptr, size_t minsize) { struct usb_descriptor_header *header; while (size >= sizeof(struct usb_descriptor_header)) { header = (struct usb_descriptor_header *)buffer; - if (header->bLength < 2) { + if (header->bLength < 2 || header->bLength > size) { printk(KERN_ERR "%s: bogus descriptor, type %d length %d\n", usbcore_name, @@ -848,7 +848,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size, return -1; } - if (header->bDescriptorType == type) { + if (header->bDescriptorType == type && header->bLength >= minsize) { *ptr = header; return 0; } diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 684d6f074c3a..09a8ebd95588 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc) top = itr + itr_size; result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), - USB_DT_SECURITY, (void **) &secd); + USB_DT_SECURITY, (void **) &secd, sizeof(*secd)); if (result == -1) { dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); return 0; diff --git a/include/linux/usb.h b/include/linux/usb.h index 4cdd515a4385..5e49e82c4368 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -407,11 +407,11 @@ struct usb_host_bos { }; int __usb_get_extra_descriptor(char *buffer, unsigned size, - unsigned char type, void **ptr); + unsigned char type, void **ptr, size_t min); #define usb_get_extra_descriptor(ifpoint, type, ptr) \ __usb_get_extra_descriptor((ifpoint)->extra, \ (ifpoint)->extralen, \ - type, (void **)ptr) + type, (void **)ptr, sizeof(**(ptr))) /* ----------------------------------------------------------------------- */ -- cgit v1.2.3 From 2f0799a0ffc033bf3cc82d5032acc3ec633464c2 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 5 Dec 2018 15:45:54 -0800 Subject: mm, thp: restore node-local hugepage allocations This is a full revert of ac5b2c18911f ("mm: thp: relax __GFP_THISNODE for MADV_HUGEPAGE mappings") and a partial revert of 89c83fb539f9 ("mm, thp: consolidate THP gfp handling into alloc_hugepage_direct_gfpmask"). By not setting __GFP_THISNODE, applications can allocate remote hugepages when the local node is fragmented or low on memory when either the thp defrag setting is "always" or the vma has been madvised with MADV_HUGEPAGE. Remote access to hugepages often has much higher latency than local pages of the native page size. On Haswell, ac5b2c18911f was shown to have a 13.9% access regression after this commit for binaries that remap their text segment to be backed by transparent hugepages. The intent of ac5b2c18911f is to address an issue where a local node is low on memory or fragmented such that a hugepage cannot be allocated. In every scenario where this was described as a fix, there is abundant and unfragmented remote memory available to allocate from, even with a greater access latency. If remote memory is also low or fragmented, not setting __GFP_THISNODE was also measured on Haswell to have a 40% regression in allocation latency. Restore __GFP_THISNODE for thp allocations. Fixes: ac5b2c18911f ("mm: thp: relax __GFP_THISNODE for MADV_HUGEPAGE mappings") Fixes: 89c83fb539f9 ("mm, thp: consolidate THP gfp handling into alloc_hugepage_direct_gfpmask") Cc: Andrea Arcangeli Cc: Mel Gorman Cc: Vlastimil Babka Cc: Michal Hocko Cc: Andrew Morton Signed-off-by: David Rientjes Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 -- mm/huge_memory.c | 42 ++++++++++++++++-------------------------- mm/mempolicy.c | 2 +- 3 files changed, 17 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index bac395f1d00a..5228c62af416 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); -struct mempolicy *get_vma_policy(struct vm_area_struct *vma, - unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 622cced74fd9..f2d19e4fe854 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -632,37 +632,27 @@ release: static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) { const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); - gfp_t this_node = 0; - -#ifdef CONFIG_NUMA - struct mempolicy *pol; - /* - * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not - * specified, to express a general desire to stay on the current - * node for optimistic allocation attempts. If the defrag mode - * and/or madvise hint requires the direct reclaim then we prefer - * to fallback to other node rather than node reclaim because that - * can lead to excessive reclaim even though there is free memory - * on other nodes. We expect that NUMA preferences are specified - * by memory policies. - */ - pol = get_vma_policy(vma, addr); - if (pol->mode != MPOL_BIND) - this_node = __GFP_THISNODE; - mpol_cond_put(pol); -#endif + const gfp_t gfp_mask = GFP_TRANSHUGE_LIGHT | __GFP_THISNODE; + /* Always do synchronous compaction */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); + return GFP_TRANSHUGE | __GFP_THISNODE | + (vma_madvised ? 0 : __GFP_NORETRY); + + /* Kick kcompactd and fail quickly */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; + return gfp_mask | __GFP_KSWAPD_RECLAIM; + + /* Synchronous compaction if madvised, otherwise kick kcompactd */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : - __GFP_KSWAPD_RECLAIM | this_node); + return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : + __GFP_KSWAPD_RECLAIM); + + /* Only do synchronous compaction if madvised */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : - this_node); - return GFP_TRANSHUGE_LIGHT | this_node; + return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); + + return gfp_mask; } /* Caller must hold page table lock. */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5837a067124d..69e278b469ef 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ -struct mempolicy *get_vma_policy(struct vm_area_struct *vma, +static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); -- cgit v1.2.3 From afd0a8006e98b1890908f81746c94ca5dae29d7c Mon Sep 17 00:00:00 2001 From: Jakub Audykowicz Date: Tue, 4 Dec 2018 20:27:41 +0100 Subject: sctp: frag_point sanity check If for some reason an association's fragmentation point is zero, sctp_datamsg_from_user will try to endlessly try to divide a message into zero-sized chunks. This eventually causes kernel panic due to running out of memory. Although this situation is quite unlikely, it has occurred before as reported. I propose to add this simple last-ditch sanity check due to the severity of the potential consequences. Signed-off-by: Jakub Audykowicz Acked-by: Neil Horman Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 5 +++++ net/sctp/chunk.c | 6 ++++++ net/sctp/socket.c | 3 +-- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ab9242e51d9e..2abbc15824af 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -620,4 +620,9 @@ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) return false; } +static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) +{ + return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); +} + #endif /* __net_sctp_h__ */ diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index ce8087846f05..d2048de86e7c 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -191,6 +191,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, * the packet */ max_data = asoc->frag_point; + if (unlikely(!max_data)) { + max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk), + sctp_datachk_len(&asoc->stream)); + pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)", + __func__, asoc, max_data); + } /* If the the peer requested that we authenticate DATA chunks * we need to account for bundling of the AUTH chunks along with diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bf618d1b41fd..b8cebd5a87e5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3324,8 +3324,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : sizeof(struct sctp_data_chunk); - min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, - datasize); + min_len = sctp_min_frag_point(sp, datasize); max_len = SCTP_MAX_CHUNK_LEN - datasize; if (val < min_len || val > max_len) -- cgit v1.2.3 From 55f3f7eab75c10d9b33d122670b5935ab64db50f Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 26 Nov 2018 16:08:43 -0500 Subject: XArray: Add xa_cmpxchg_irq and xa_cmpxchg_bh These convenience wrappers match the other _irq and _bh wrappers we already have. It turns out I'd already open-coded xa_cmpxchg_irq() in the shmem code, so convert that. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 5 +++- include/linux/xarray.h | 54 +++++++++++++++++++++++++++++++++++++++ mm/shmem.c | 4 +-- 3 files changed, 59 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index dbe96cb5558e..6a6d67acaf69 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -187,6 +187,8 @@ Takes xa_lock internally: * :c:func:`xa_erase_bh` * :c:func:`xa_erase_irq` * :c:func:`xa_cmpxchg` + * :c:func:`xa_cmpxchg_bh` + * :c:func:`xa_cmpxchg_irq` * :c:func:`xa_store_range` * :c:func:`xa_alloc` * :c:func:`xa_alloc_bh` @@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` in the interrupt handler. Some of the more common patterns have helper functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, -:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. +:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh` +and :c:func:`xa_cmpxchg_irq`. Sometimes you need to protect access to the XArray with a mutex because that lock sits above another mutex in the locking hierarchy. That does diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 564892e19f8c..f492e21c4aa2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -553,6 +553,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, return curr; } +/** + * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + /** * xa_insert() - Store this entry in the XArray unless another entry is * already present. diff --git a/mm/shmem.c b/mm/shmem.c index cddc72ac44d8..6adbdd349875 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping, { void *old; - xa_lock_irq(&mapping->i_pages); - old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0); - xa_unlock_irq(&mapping->i_pages); + old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); if (old != radswap) return -ENOENT; free_swap_and_cache(radix_to_swp_entry(radswap)); -- cgit v1.2.3 From b7d624ab431227af376787148cd7d474088c03aa Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 6 Dec 2018 10:07:40 +0800 Subject: asm-generic: unistd.h: fixup broken macro include. The broken macros make the glibc compile error. If there is no __NR3264_fstat*, we should also removed related definitions. Reported-by: Marcin Juszkiewicz Fixes: bf4b6a7d371e ("y2038: Remove stat64 family from default syscall set") [arnd: Both Marcin and Guo provided this patch to fix up my clearly broken commit, I applied the version with the better changelog.] Signed-off-by: Guo Ren Signed-off-by: Mao Han Cc: Arnd Bergmann Signed-off-by: Arnd Bergmann --- include/uapi/asm-generic/unistd.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 538546edbfbd..c7f3321fbe43 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -760,8 +760,10 @@ __SYSCALL(__NR_rseq, sys_rseq) #define __NR_ftruncate __NR3264_ftruncate #define __NR_lseek __NR3264_lseek #define __NR_sendfile __NR3264_sendfile +#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) #define __NR_newfstatat __NR3264_fstatat #define __NR_fstat __NR3264_fstat +#endif #define __NR_mmap __NR3264_mmap #define __NR_fadvise64 __NR3264_fadvise64 #ifdef __NR3264_stat @@ -776,8 +778,10 @@ __SYSCALL(__NR_rseq, sys_rseq) #define __NR_ftruncate64 __NR3264_ftruncate #define __NR_llseek __NR3264_lseek #define __NR_sendfile64 __NR3264_sendfile +#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) #define __NR_fstatat64 __NR3264_fstatat #define __NR_fstat64 __NR3264_fstat +#endif #define __NR_mmap2 __NR3264_mmap #define __NR_fadvise64_64 __NR3264_fadvise64 #ifdef __NR3264_stat -- cgit v1.2.3 From c201e3808e0e4be9b98d192802085a9f491bd80c Mon Sep 17 00:00:00 2001 From: Peter Hutterer Date: Thu, 6 Dec 2018 09:03:36 +1000 Subject: Input: restore EV_ABS ABS_RESERVED ABS_RESERVED was added in d9ca1c990a7 and accidentally removed as part of ffe0e7cf290f5c9 when the high-resolution scrolling code was removed. Signed-off-by: Peter Hutterer Reviewed-by: Martin Kepplinger Acked-by: Benjamin Tissoires Acked-by: Dmitry Torokhov Signed-off-by: Benjamin Tissoires --- include/uapi/linux/input-event-codes.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 3eb5a4c3d60a..ae366b87426a 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -752,6 +752,15 @@ #define ABS_MISC 0x28 +/* + * 0x2e is reserved and should not be used in input drivers. + * It was used by HID as ABS_MISC+6 and userspace needs to detect if + * the next ABS_* event is correct or is just ABS_MISC + n. + * We define here ABS_RESERVED so userspace can rely on it and detect + * the situation described above. + */ +#define ABS_RESERVED 0x2e + #define ABS_MT_SLOT 0x2f /* MT slot being modified */ #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ -- cgit v1.2.3 From e6ac64d4c4d095085d7dd71cbd05704ac99829b2 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 6 Dec 2018 19:30:37 +0100 Subject: neighbour: Avoid writing before skb->head in neigh_hh_output() While skb_push() makes the kernel panic if the skb headroom is less than the unaligned hardware header size, it will proceed normally in case we copy more than that because of alignment, and we'll silently corrupt adjacent slabs. In the case fixed by the previous patch, "ipv6: Check available headroom in ip6_xmit() even without options", we end up in neigh_hh_output() with 14 bytes headroom, 14 bytes hardware header and write 16 bytes, starting 2 bytes before the allocated buffer. Always check we're not writing before skb->head and, if the headroom is not enough, warn and drop the packet. v2: - instead of panicking with BUG_ON(), WARN_ON_ONCE() and drop the packet (Eric Dumazet) - if we avoid the panic, though, we need to explicitly check the headroom before the memcpy(), otherwise we'll have corrupted slabs on a running kernel, after we warn - use __skb_push() instead of skb_push(), as the headroom check is already implemented here explicitly (Eric Dumazet) Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller --- include/net/neighbour.h | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f58b384aa6c9..665990c7dec8 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) { + unsigned int hh_alen = 0; unsigned int seq; unsigned int hh_len; @@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb seq = read_seqbegin(&hh->hh_lock); hh_len = hh->hh_len; if (likely(hh_len <= HH_DATA_MOD)) { - /* this is inlined by gcc */ - memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); + hh_alen = HH_DATA_MOD; + + /* skb_push() would proceed silently if we have room for + * the unaligned size but not for the aligned size: + * check headroom explicitly. + */ + if (likely(skb_headroom(skb) >= HH_DATA_MOD)) { + /* this is inlined by gcc */ + memcpy(skb->data - HH_DATA_MOD, hh->hh_data, + HH_DATA_MOD); + } } else { - unsigned int hh_alen = HH_DATA_ALIGN(hh_len); + hh_alen = HH_DATA_ALIGN(hh_len); - memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); + if (likely(skb_headroom(skb) >= hh_alen)) { + memcpy(skb->data - hh_alen, hh->hh_data, + hh_alen); + } } } while (read_seqretry(&hh->hh_lock, seq)); - skb_push(skb, hh_len); + if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + + __skb_push(skb, hh_len); return dev_queue_xmit(skb); } -- cgit v1.2.3 From 356ff8a9a78fb35d6482584d260c3754dcbdf669 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 7 Dec 2018 14:50:16 -0800 Subject: Revert "mm, thp: consolidate THP gfp handling into alloc_hugepage_direct_gfpmask" This reverts commit 89c83fb539f95491be80cdd5158e6f0ce329e317. This should have been done as part of 2f0799a0ffc0 ("mm, thp: restore node-local hugepage allocations"). The movement of the thp allocation policy from alloc_pages_vma() to alloc_hugepage_direct_gfpmask() was intended to only set __GFP_THISNODE for mempolicies that are not MPOL_BIND whereas the revert could set this regardless of mempolicy. While the check for MPOL_BIND between alloc_hugepage_direct_gfpmask() and alloc_pages_vma() was racy, that has since been removed since the revert. What is left is the possibility to use __GFP_THISNODE in policy_node() when it is unexpected because the special handling for hugepages in alloc_pages_vma() was removed as part of the consolidation. Secondly, prior to 89c83fb539f9, alloc_pages_vma() implemented a somewhat different policy for hugepage allocations, which were allocated through alloc_hugepage_vma(). For hugepage allocations, if the allocating process's node is in the set of allowed nodes, allocate with __GFP_THISNODE for that node (for MPOL_PREFERRED, use that node with __GFP_THISNODE instead). This was changed for shmem_alloc_hugepage() to allow fallback to other nodes in 89c83fb539f9 as it did for new_page() in mm/mempolicy.c which is functionally different behavior and removes the requirement to only allocate hugepages locally. So this commit does a full revert of 89c83fb539f9 instead of the partial revert that was done in 2f0799a0ffc0. The result is the same thp allocation policy for 4.20 that was in 4.19. Fixes: 89c83fb539f9 ("mm, thp: consolidate THP gfp handling into alloc_hugepage_direct_gfpmask") Fixes: 2f0799a0ffc0 ("mm, thp: restore node-local hugepage allocations") Signed-off-by: David Rientjes Acked-by: Vlastimil Babka Cc: Andrea Arcangeli Cc: Mel Gorman Cc: Michal Hocko Cc: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 12 ++++++++---- mm/huge_memory.c | 27 +++++++++++++-------------- mm/mempolicy.c | 32 +++++++++++++++++++++++++++++--- mm/shmem.c | 2 +- 4 files changed, 51 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 76f8db0b0e71..0705164f928c 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, - int node); + int node, bool hugepage); +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ + alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ +#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ + alloc_pages(gfp_mask, order) +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, node) + alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f2d19e4fe854..5da55b38b1b7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -629,30 +629,30 @@ release: * available * never: never stall for any thp allocation */ -static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) +static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) { const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); - const gfp_t gfp_mask = GFP_TRANSHUGE_LIGHT | __GFP_THISNODE; /* Always do synchronous compaction */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE | __GFP_THISNODE | - (vma_madvised ? 0 : __GFP_NORETRY); + return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); /* Kick kcompactd and fail quickly */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) - return gfp_mask | __GFP_KSWAPD_RECLAIM; + return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; /* Synchronous compaction if madvised, otherwise kick kcompactd */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) - return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : - __GFP_KSWAPD_RECLAIM); + return GFP_TRANSHUGE_LIGHT | + (vma_madvised ? __GFP_DIRECT_RECLAIM : + __GFP_KSWAPD_RECLAIM); /* Only do synchronous compaction if madvised */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) - return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); + return GFP_TRANSHUGE_LIGHT | + (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); - return gfp_mask; + return GFP_TRANSHUGE_LIGHT; } /* Caller must hold page table lock. */ @@ -724,8 +724,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) pte_free(vma->vm_mm, pgtable); return ret; } - gfp = alloc_hugepage_direct_gfpmask(vma, haddr); - page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id()); + gfp = alloc_hugepage_direct_gfpmask(vma); + page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; @@ -1295,9 +1295,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) alloc: if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) { - huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); - new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, - haddr, numa_node_id()); + huge_gfp = alloc_hugepage_direct_gfpmask(vma); + new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); } else new_page = NULL; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 69e278b469ef..d4496d9d34f5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1116,8 +1116,8 @@ static struct page *new_page(struct page *page, unsigned long start) } else if (PageTransHuge(page)) { struct page *thp; - thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma, - address, numa_node_id()); + thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, + HPAGE_PMD_ORDER); if (!thp) return NULL; prep_transhuge_page(thp); @@ -2011,6 +2011,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * @node: Which node to prefer for allocation (modulo policy). + * @hugepage: for hugepages try only the preferred node if possible * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. @@ -2021,7 +2022,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr, int node) + unsigned long addr, int node, bool hugepage) { struct mempolicy *pol; struct page *page; @@ -2039,6 +2040,31 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, goto out; } + if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { + int hpage_node = node; + + /* + * For hugepage allocation and non-interleave policy which + * allows the current node (or other explicitly preferred + * node) we only try to allocate from the current/preferred + * node and don't fall back to other nodes, as the cost of + * remote accesses would likely offset THP benefits. + * + * If the policy is interleave, or does not allow the current + * node in its nodemask, we allocate the standard way. + */ + if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) + hpage_node = pol->v.preferred_node; + + nmask = policy_nodemask(gfp, pol); + if (!nmask || node_isset(hpage_node, *nmask)) { + mpol_cond_put(pol); + page = __alloc_pages_node(hpage_node, + gfp | __GFP_THISNODE, order); + goto out; + } + } + nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); diff --git a/mm/shmem.c b/mm/shmem.c index cddc72ac44d8..921f80488bb3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1439,7 +1439,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, shmem_pseudo_vma_init(&pvma, info, hindex); page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id()); + HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); -- cgit v1.2.3 From d1402fc708e4c355813e49df6d15bc3466ba5114 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 14 Dec 2018 14:16:53 -0800 Subject: mm: introduce common STRUCT_PAGE_MAX_SHIFT define This define is used by arm64 to calculate the size of the vmemmap region. It is defined as the log2 of the upper bound on the size of a struct page. We move it into mm_types.h so it can be defined properly instead of set and checked with a build bug. This also allows us to use the same define for riscv. Link: http://lkml.kernel.org/r/20181107205433.3875-2-logang@deltatee.com Signed-off-by: Logan Gunthorpe Acked-by: Will Deacon Acked-by: Andrew Morton Acked-by: Ard Biesheuvel Acked-by: Catalin Marinas Cc: Arnd Bergmann Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/memory.h | 9 --------- arch/arm64/mm/init.c | 8 -------- include/asm-generic/fixmap.h | 1 + include/linux/mm_types.h | 5 +++++ 4 files changed, 6 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b96442960aea..f0a5c9531e8b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -34,15 +34,6 @@ */ #define PCI_IO_SIZE SZ_16M -/* - * Log2 of the upper bound of the size of a struct page. Used for sizing - * the vmemmap region only, does not affect actual memory footprint. - * We don't use sizeof(struct page) directly since taking its size here - * requires its definition to be available at this point in the inclusion - * chain, and it may not be a power of 2 in the first place. - */ -#define STRUCT_PAGE_MAX_SHIFT 6 - /* * VMEMMAP_SIZE - allows the whole linear region to be covered by * a struct page array diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9b432d9fcada..0340e45655c6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -610,14 +610,6 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); #endif -#ifdef CONFIG_SPARSEMEM_VMEMMAP - /* - * Make sure we chose the upper bound of sizeof(struct page) - * correctly when sizing the VMEMMAP array. - */ - BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); -#endif - if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index 827e4d3bbc7a..8cc7b09c1bc7 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h @@ -16,6 +16,7 @@ #define __ASM_GENERIC_FIXMAP_H #include +#include #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5ed8f6292a53..2c471a2c43fa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -206,6 +206,11 @@ struct page { #endif } _struct_page_alignment; +/* + * Used for sizing the vmemmap region on some architectures + */ +#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) + #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) -- cgit v1.2.3 From 9def36e0fa9a0d9c5393c039db59f1f2d3a388b3 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 14 Dec 2018 14:16:57 -0800 Subject: mm/sparse: add common helper to mark all memblocks present Presently the arches arm64, arm and sh have a function which loops through each memblock and calls memory present. riscv will require a similar function. Introduce a common memblocks_present() function that can be used by all the arches. Subsequent patches will cleanup the arches that make use of this. Link: http://lkml.kernel.org/r/20181107205433.3875-3-logang@deltatee.com Signed-off-by: Logan Gunthorpe Acked-by: Andrew Morton Cc: Michal Hocko Cc: Vlastimil Babka Cc: Oscar Salvador Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 6 ++++++ mm/sparse.c | 16 ++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 847705a6d0ec..db023a92f3a4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); static inline void memory_present(int nid, unsigned long start, unsigned long end) {} #endif +#if defined(CONFIG_SPARSEMEM) +void memblocks_present(void); +#else +static inline void memblocks_present(void) {} +#endif + #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else diff --git a/mm/sparse.c b/mm/sparse.c index 33307fc05c4d..3abc8cc50201 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -239,6 +239,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) } } +/* + * Mark all memblocks as present using memory_present(). This is a + * convienence function that is useful for a number of arches + * to mark all of the systems memory as present during initialization. + */ +void __init memblocks_present(void) +{ + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + memory_present(memblock_get_region_node(reg), + memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); + } +} + /* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual -- cgit v1.2.3