summaryrefslogtreecommitdiffstats
path: root/tools/include
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-07-03 16:52:03 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2019-07-03 16:52:03 +0200
commite5a3e259ef239f443951d401db10db7d426c9497 (patch)
tree6ef3c235c14a2ed5352d166637965af44cd8e103 /tools/include
parentd2f5bbbc350050895d9f33c2744a61f9e0af1caa (diff)
parentd78e3f0614f866d4456f860dc5773a6176eb9937 (diff)
downloadlinux-e5a3e259ef239f443951d401db10db7d426c9497.tar.bz2
Merge branch 'bpf-tcp-rtt-hook'
Stanislav Fomichev says: ==================== Congestion control team would like to have a periodic callback to track some TCP statistics. Let's add a sock_ops callback that can be selectively enabled on a socket by socket basis and is executed for every RTT. BPF program frequency can be further controlled by calling bpf_ktime_get_ns and bailing out early. I run neper tcp_stream and tcp_rr tests with the sample program from the last patch and didn't observe any noticeable performance difference. v2: * add a comment about second accept() in selftest (Yonghong Song) * refer to tcp_bpf.readme in sample program (Yonghong Song) ==================== Suggested-by: Eric Dumazet <edumazet@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Priyaranjan Jha <priyarjha@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Acked-by: Yonghong Song <yhs@fb.com> Acked-by: Lawrence Brakmo <brakmo@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools/include')
-rw-r--r--tools/include/uapi/linux/bpf.h12
1 files changed, 11 insertions, 1 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index a396b516a2b2..cecf42c871d4 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1767,6 +1767,7 @@ union bpf_attr {
* * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
* * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
* * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
+ * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
*
* Therefore, this function can be used to clear a callback flag by
* setting the appropriate bit to zero. e.g. to disable the RTO
@@ -3069,6 +3070,12 @@ struct bpf_tcp_sock {
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
+ * total number of DSACK blocks received
+ */
+ __u32 delivered; /* Total data packets delivered incl. rexmits */
+ __u32 delivered_ce; /* Like the above but only ECE marked packets */
+ __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
};
struct bpf_sock_tuple {
@@ -3311,7 +3318,8 @@ struct bpf_sock_ops {
#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
-#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
* supported cb flags
*/
@@ -3366,6 +3374,8 @@ enum {
BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
* socket transition to LISTEN state.
*/
+ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect