summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/bpf_verifier.h40
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/uapi/linux/bpf.h188
4 files changed, 167 insertions, 63 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a2132e09dc1c..f02367faa58d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -193,7 +193,6 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
- ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
};
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 69f7a3449eda..7d8228d1c898 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,6 +66,46 @@ struct bpf_reg_state {
* same reference to the socket, to determine proper reference freeing.
*/
u32 id;
+ /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+ * from a pointer-cast helper, bpf_sk_fullsock() and
+ * bpf_tcp_sock().
+ *
+ * Consider the following where "sk" is a reference counted
+ * pointer returned from "sk = bpf_sk_lookup_tcp();":
+ *
+ * 1: sk = bpf_sk_lookup_tcp();
+ * 2: if (!sk) { return 0; }
+ * 3: fullsock = bpf_sk_fullsock(sk);
+ * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+ * 5: tp = bpf_tcp_sock(fullsock);
+ * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+ * 7: bpf_sk_release(sk);
+ * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
+ *
+ * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+ * "tp" ptr should be invalidated also. In order to do that,
+ * the reg holding "fullsock" and "sk" need to remember
+ * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+ * such that the verifier can reset all regs which have
+ * ref_obj_id matching the sk_reg->id.
+ *
+ * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+ * sk_reg->id will stay as NULL-marking purpose only.
+ * After NULL-marking is done, sk_reg->id can be reset to 0.
+ *
+ * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+ * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+ *
+ * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+ * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+ * which is the same as sk_reg->ref_obj_id.
+ *
+ * From the verifier perspective, if sk, fullsock and tp
+ * are not NULL, they are the same ptr with different
+ * reg->type. In particular, bpf_sk_release(tp) is also
+ * allowed and has the same effect as bpf_sk_release(sk).
+ */
+ u32 ref_obj_id;
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 61cf7dbb6782..d074b6d60f8a 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -36,7 +36,6 @@ struct xdp_umem {
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
- struct pid *pid;
unsigned long address;
refcount_t users;
struct work_struct work;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c38ac9a92a7..929c8e537a14 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -502,16 +502,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- * Description
- * Push an element *value* in *map*. *flags* is one of:
- *
- * **BPF_EXIST**
- * If the queue/stack is full, the oldest element is removed to
- * make room for this.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
* A 8-byte long non-decreasing number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
* A 8-byte long non-decreasing number.
*
@@ -2098,52 +2088,52 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded key press with *scancode*,
- * *toggle* value in the given *protocol*. The scancode will be
- * translated to a keycode using the rc keymap, and reported as
- * an input key down event. After a period a key up event is
- * generated. This period can be extended by calling either
- * **bpf_rc_keydown**\ () again with the same values, or calling
- * **bpf_rc_repeat**\ ().
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
*
- * Some protocols include a toggle bit, in case the button was
- * released and pressed again between consecutive scancodes.
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
- * The *protocol* is the decoded protocol number (see
- * **enum rc_proto** for some predefined values).
- *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded repeat key message. This delays
- * the generation of a key up event for previously generated
- * key down event.
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
- * Some IR protocols like NEC have a special IR message for
- * repeating last button, for when a button is held down.
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- * Description
- * Return id of cgroup v2 that is ancestor of cgroup associated
- * with the *skb* at the *ancestor_level*. The root cgroup is at
- * *ancestor_level* zero and each step down the hierarchy
- * increments the level. If *ancestor_level* == level of cgroup
- * associated with *skb*, then return value will be same as that
- * of **bpf_skb_cgroup_id**\ ().
- *
- * The helper is useful to implement policies based on cgroups
- * that are upper in hierarchy than immediate cgroup associated
- * with *skb*.
- *
- * The format of returned id and helper limitations are same as in
- * **bpf_skb_cgroup_id**\ ().
- * Return
- * The id is returned or 0 in case the id could not be retrieved.
- *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
*
- * void* get_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
* Description
* Get the pointer to the local storage area.
* The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is
+ * removed to make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
* Return
* 0
*
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * Description
+ * Acquire a spinlock represented by the pointer *lock*, which is
+ * stored as part of a value of a map. Taking the lock allows to
+ * safely update the rest of the fields in that value. The
+ * spinlock can (and must) later be released with a call to
+ * **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ * Spinlocks in BPF programs come with a number of restrictions
+ * and constraints:
+ *
+ * * **bpf_spin_lock** objects are only allowed inside maps of
+ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ * list could be extended in the future).
+ * * BTF description of the map is mandatory.
+ * * The BPF program can take ONE lock at a time, since taking two
+ * or more could cause dead locks.
+ * * Only one **struct bpf_spin_lock** is allowed per map element.
+ * * When the lock is taken, calls (either BPF to BPF or helpers)
+ * are not allowed.
+ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ * allowed inside a spinlock-ed region.
+ * * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ * the lock, on all execution paths, before it returns.
+ * * The BPF program can access **struct bpf_spin_lock** only via
+ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ * helpers. Loading or storing data into the **struct
+ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ * * To use the **bpf_spin_lock**\ () helper, the BTF description
+ * of the map value must be a struct and have **struct
+ * bpf_spin_lock** *anyname*\ **;** field at the top level.
+ * Nested lock inside another struct is not allowed.
+ * * The **struct bpf_spin_lock** *lock* field in a map value must
+ * be aligned on a multiple of 4 bytes in that value.
+ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ * the **bpf_spin_lock** field to user space.
+ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ * a BPF program, do not update the **bpf_spin_lock** field.
+ * * **bpf_spin_lock** cannot be on the stack or inside a
+ * networking packet (it can only be inside of a map values).
+ * * **bpf_spin_lock** is available to root only.
+ * * Tracing programs and socket filter programs cannot use
+ * **bpf_spin_lock**\ () due to insufficient preemption checks
+ * (but this may change in the future).
+ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ * Return
+ * 0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * Description
+ * Release the *lock* previously locked by a call to
+ * **bpf_spin_lock**\ (\ *lock*\ ).
+ * Return
+ * 0
+ *
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_sock** pointer such
- * that all the fields in bpf_sock can be accessed.
+ * that all the fields in this **bpf_sock** can be accessed.
* Return
- * A **struct bpf_sock** pointer on success, or NULL in
+ * A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure.
*
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_tcp_sock** pointer from a
* **struct bpf_sock** pointer.
- *
* Return
- * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- * Description
- * Sets ECN of IP header to ce (congestion encountered) if
- * current value is ect (ECN capable). Works with IPv6 and IPv4.
- * Return
- * 1 if set, 0 if not set.
+ * Description
+ * Set ECN (Explicit Congestion Notification) field of IP header
+ * to **CE** (Congestion Encountered) if current value is **ECT**
+ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ * and IPv4.
+ * Return
+ * 1 if the **CE** flag is set (either by the current helper call
+ * or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ * Description
+ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ * **bpf_sk_release**\ () is unnecessary and not allowed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2465,7 +2530,8 @@ union bpf_attr {
FN(spin_unlock), \
FN(sk_fullsock), \
FN(tcp_sock), \
- FN(skb_ecn_set_ce),
+ FN(skb_ecn_set_ce), \
+ FN(get_listener_sock),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call