summaryrefslogtreecommitdiffstats
path: root/tools/bpf/bpftool
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 17:29:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 17:29:33 -0700
commit29d9f30d4ce6c7a38745a54a8cddface10013490 (patch)
tree85649ba6a7b39203584d8db9365e03f64e62c136 /tools/bpf/bpftool
parent56a451b780676bc1cdac011735fe2869fa2e9abf (diff)
parent7f80ccfe996871ca69648efee74a60ae7ad0dcd9 (diff)
downloadlinux-29d9f30d4ce6c7a38745a54a8cddface10013490.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: "Highlights: 1) Fix the iwlwifi regression, from Johannes Berg. 2) Support BSS coloring and 802.11 encapsulation offloading in hardware, from John Crispin. 3) Fix some potential Spectre issues in qtnfmac, from Sergey Matyukevich. 4) Add TTL decrement action to openvswitch, from Matteo Croce. 5) Allow paralleization through flow_action setup by not taking the RTNL mutex, from Vlad Buslov. 6) A lot of zero-length array to flexible-array conversions, from Gustavo A. R. Silva. 7) Align XDP statistics names across several drivers for consistency, from Lorenzo Bianconi. 8) Add various pieces of infrastructure for offloading conntrack, and make use of it in mlx5 driver, from Paul Blakey. 9) Allow using listening sockets in BPF sockmap, from Jakub Sitnicki. 10) Lots of parallelization improvements during configuration changes in mlxsw driver, from Ido Schimmel. 11) Add support to devlink for generic packet traps, which report packets dropped during ACL processing. And use them in mlxsw driver. From Jiri Pirko. 12) Support bcmgenet on ACPI, from Jeremy Linton. 13) Make BPF compatible with RT, from Thomas Gleixnet, Alexei Starovoitov, and your's truly. 14) Support XDP meta-data in virtio_net, from Yuya Kusakabe. 15) Fix sysfs permissions when network devices change namespaces, from Christian Brauner. 16) Add a flags element to ethtool_ops so that drivers can more simply indicate which coalescing parameters they actually support, and therefore the generic layer can validate the user's ethtool request. Use this in all drivers, from Jakub Kicinski. 17) Offload FIFO qdisc in mlxsw, from Petr Machata. 18) Support UDP sockets in sockmap, from Lorenz Bauer. 19) Fix stretch ACK bugs in several TCP congestion control modules, from Pengcheng Yang. 20) Support virtual functiosn in octeontx2 driver, from Tomasz Duszynski. 21) Add region operations for devlink and use it in ice driver to dump NVM contents, from Jacob Keller. 22) Add support for hw offload of MACSEC, from Antoine Tenart. 23) Add support for BPF programs that can be attached to LSM hooks, from KP Singh. 24) Support for multiple paths, path managers, and counters in MPTCP. From Peter Krystad, Paolo Abeni, Florian Westphal, Davide Caratti, and others. 25) More progress on adding the netlink interface to ethtool, from Michal Kubecek" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2121 commits) net: ipv6: rpl_iptunnel: Fix potential memory leak in rpl_do_srh_inline cxgb4/chcr: nic-tls stats in ethtool net: dsa: fix oops while probing Marvell DSA switches net/bpfilter: remove superfluous testing message net: macb: Fix handling of fixed-link node net: dsa: ksz: Select KSZ protocol tag netdevsim: dev: Fix memory leak in nsim_dev_take_snapshot_write net: stmmac: add EHL 2.5Gbps PCI info and PCI ID net: stmmac: add EHL PSE0 & PSE1 1Gbps PCI info and PCI ID net: stmmac: create dwmac-intel.c to contain all Intel platform net: dsa: bcm_sf2: Support specifying VLAN tag egress rule net: dsa: bcm_sf2: Add support for matching VLAN TCI net: dsa: bcm_sf2: Move writing of CFP_DATA(5) into slicing functions net: dsa: bcm_sf2: Check earlier for FLOW_EXT and FLOW_MAC_EXT net: dsa: bcm_sf2: Disable learning for ASP port net: dsa: b53: Deny enslaving port 7 for 7278 into a bridge net: dsa: b53: Prevent tagged VLAN on port 7 for 7278 net: dsa: b53: Restore VLAN entries upon (re)configuration net: dsa: bcm_sf2: Fix overflow checks hv_netvsc: Remove unnecessary round_up for recv_completion_cnt ...
Diffstat (limited to 'tools/bpf/bpftool')
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst19
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst22
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst116
-rw-r--r--tools/bpf/bpftool/Makefile36
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool110
-rw-r--r--tools/bpf/bpftool/btf.c5
-rw-r--r--tools/bpf/bpftool/btf_dumper.c199
-rw-r--r--tools/bpf/bpftool/common.c40
-rw-r--r--tools/bpf/bpftool/feature.c283
-rw-r--r--tools/bpf/bpftool/main.c10
-rw-r--r--tools/bpf/bpftool/main.h12
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/prog.c458
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c119
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.h46
-rw-r--r--tools/bpf/bpftool/struct_ops.c596
17 files changed, 1875 insertions, 200 deletions
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index b13926432b84..8d6e8901ed2b 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,7 +1,9 @@
*.d
+/_bpftool
/bpftool
bpftool*.8
bpf-helpers.*
FEATURE-DUMP.bpftool
feature
libbpf
+profiler.skel.h
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 4d08f35034a2..b04156cfd7a3 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -19,19 +19,24 @@ SYNOPSIS
FEATURE COMMANDS
================
-| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**macros** [**prefix** *PREFIX*]]
| **bpftool** **feature help**
|
| *COMPONENT* := { **kernel** | **dev** *NAME* }
DESCRIPTION
===========
- **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ **bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]]
Probe the running kernel and dump a number of eBPF-related
parameters, such as availability of the **bpf()** system call,
JIT status, eBPF program types availability, eBPF helper
functions availability, and more.
+ By default, bpftool **does not run probes** for
+ **bpf_probe_write_user**\ () and **bpf_trace_printk**\()
+ helpers which print warnings to kernel logs. To enable them
+ and run all probes, the **full** keyword should be used.
+
If the **macros** keyword (but not the **-j** option) is
passed, a subset of the output is dumped as a list of
**#define** macros that are ready to be included in a C
@@ -44,16 +49,12 @@ DESCRIPTION
Keyword **kernel** can be omitted. If no probe target is
specified, probing the kernel is the default behaviour.
- Note that when probed, some eBPF helpers (e.g.
- **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
- print warnings to kernel logs.
-
- **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ **bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]]
Probe network device for supported eBPF features and dump
results to the console.
- The two keywords **macros** and **prefix** have the same
- role as when probing the kernel.
+ The keywords **full**, **macros** and **prefix** have the
+ same role as when probing the kernel.
**bpftool feature help**
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 64ddf8a4c518..9f19404f470e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -30,6 +30,7 @@ PROG COMMANDS
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*]
+| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs*
| **bpftool** **prog help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -42,11 +43,15 @@ PROG COMMANDS
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-| **cgroup/getsockopt** | **cgroup/setsockopt**
+| **cgroup/getsockopt** | **cgroup/setsockopt** |
+| **struct_ops** | **fentry** | **fexit** | **freplace**
| }
| *ATTACH_TYPE* := {
| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
| }
+| *METRIC* := {
+| **cycles** | **instructions** | **l1d_loads** | **llc_misses**
+| }
DESCRIPTION
@@ -188,6 +193,12 @@ DESCRIPTION
not all of them can take the **ctx_in**/**ctx_out**
arguments. bpftool does not perform checks on program types.
+ **bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs*
+ Profile *METRICs* for bpf program *PROG* for *DURATION*
+ seconds or until user hits Ctrl-C. *DURATION* is optional.
+ If *DURATION* is not specified, the profiling will run up to
+ UINT_MAX seconds.
+
**bpftool prog help**
Print short help message.
@@ -310,6 +321,15 @@ EXAMPLES
**# rm /sys/fs/bpf/xdp1**
+|
+| **# bpftool prog profile id 337 duration 10 cycles instructions llc_misses**
+
+::
+ 51397 run_cnt
+ 40176203 cycles (83.05%)
+ 42518139 instructions # 1.06 insns per cycle (83.39%)
+ 123 llc_misses # 2.89 LLC misses per million insns (83.15%)
+
SEE ALSO
========
**bpf**\ (2),
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
new file mode 100644
index 000000000000..f045cc89dd6d
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
@@ -0,0 +1,116 @@
+==================
+bpftool-struct_ops
+==================
+-------------------------------------------------------------------------------
+tool to register/unregister/introspect BPF struct_ops
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **struct_ops** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* :=
+ { **show** | **list** | **dump** | **register** | **unregister** | **help** }
+
+STRUCT_OPS COMMANDS
+===================
+
+| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*]
+| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*]
+| **bpftool** **struct_ops register** *OBJ*
+| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP*
+| **bpftool** **struct_ops help**
+|
+| *STRUCT_OPS_MAP* := { **id** *STRUCT_OPS_MAP_ID* | **name** *STRUCT_OPS_MAP_NAME* }
+| *OBJ* := /a/file/of/bpf_struct_ops.o
+
+
+DESCRIPTION
+===========
+ **bpftool struct_ops { show | list }** [*STRUCT_OPS_MAP*]
+ Show brief information about the struct_ops in the system.
+ If *STRUCT_OPS_MAP* is specified, it shows information only
+ for the given struct_ops. Otherwise, it lists all struct_ops
+ currently existing in the system.
+
+ Output will start with struct_ops map ID, followed by its map
+ name and its struct_ops's kernel type.
+
+ **bpftool struct_ops dump** [*STRUCT_OPS_MAP*]
+ Dump details information about the struct_ops in the system.
+ If *STRUCT_OPS_MAP* is specified, it dumps information only
+ for the given struct_ops. Otherwise, it dumps all struct_ops
+ currently existing in the system.
+
+ **bpftool struct_ops register** *OBJ*
+ Register bpf struct_ops from *OBJ*. All struct_ops under
+ the ELF section ".struct_ops" will be registered to
+ its kernel subsystem.
+
+ **bpftool struct_ops unregister** *STRUCT_OPS_MAP*
+ Unregister the *STRUCT_OPS_MAP* from the kernel subsystem.
+
+ **bpftool struct_ops help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -V, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+ -d, --debug
+ Print all logs available, even debug-level information. This
+ includes logs from libbpf as well as from the verifier, when
+ attempting to load programs.
+
+EXAMPLES
+========
+**# bpftool struct_ops show**
+
+::
+
+ 100: dctcp tcp_congestion_ops
+ 105: cubic tcp_congestion_ops
+
+**# bpftool struct_ops unregister id 105**
+
+::
+
+ Unregistered tcp_congestion_ops cubic id 105
+
+**# bpftool struct_ops register bpf_cubic.o**
+
+::
+
+ Registered tcp_congestion_ops cubic id 110
+
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
+ **bpftool-gen**\ (8)
+
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index c4e810335810..f584d1fdfc64 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -59,10 +59,12 @@ LIBS = $(LIBBPF) -lelf -lz
INSTALL ?= install
RM ?= rm -f
+CLANG ?= clang
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib
-FEATURE_DISPLAY = libbfd disassembler-four-args zlib
+FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib \
+ clang-bpf-global-var
+FEATURE_DISPLAY = libbfd disassembler-four-args zlib clang-bpf-global-var
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -110,14 +112,39 @@ SRCS += $(BFD_SRCS)
endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+_OBJS = $(filter-out $(OUTPUT)prog.o,$(OBJS)) $(OUTPUT)_prog.o
+
+ifeq ($(feature-clang-bpf-global-var),1)
+ __OBJS = $(OBJS)
+else
+ __OBJS = $(_OBJS)
+endif
+
+$(OUTPUT)_prog.o: prog.c
+ $(QUIET_CC)$(COMPILE.c) -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $<
+
+$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS)
+
+skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF)
+ $(QUIET_CLANG)$(CLANG) \
+ -I$(srctree)/tools/include/uapi/ \
+ -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \
+ -g -O2 -target bpf -c $< -o $@
+
+profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o
+ $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@
+
+$(OUTPUT)prog.o: prog.c profiler.skel.h
+ $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)feature.o: | zdep
-$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
+$(OUTPUT)bpftool: $(__OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS)
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -125,6 +152,7 @@ $(OUTPUT)%.o: %.c
clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool)
$(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) -- $(OUTPUT)_bpftool profiler.skel.h skeleton/profiler.bpf.o
$(Q)$(RM) -r -- $(OUTPUT)libbpf/
$(call QUIET_CLEAN, core-gen)
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 754d8395e451..45ee99b159e2 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -337,6 +337,7 @@ _bpftool()
local PROG_TYPE='id pinned tag name'
local MAP_TYPE='id pinned name'
+ local METRIC_TYPE='cycles instructions l1d_loads llc_misses'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
@@ -388,7 +389,7 @@ _bpftool()
_bpftool_get_prog_ids
;;
name)
- _bpftool_get_map_names
+ _bpftool_get_prog_names
;;
pinned)
_filedir
@@ -469,7 +470,8 @@ _bpftool()
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
- cgroup/setsockopt" -- \
+ cgroup/setsockopt struct_ops \
+ fentry fexit freplace" -- \
"$cur" ) )
return 0
;;
@@ -497,9 +499,51 @@ _bpftool()
tracelog)
return 0
;;
+ profile)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ name)
+ _bpftool_get_prog_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 5)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE duration" -- "$cur" ) )
+ return 0
+ ;;
+ 6)
+ case $prev in
+ duration)
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ ;;
run)
- if [[ ${#words[@]} -lt 5 ]]; then
- _filedir
+ if [[ ${#words[@]} -eq 4 ]]; then
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
return 0
fi
case $prev in
@@ -507,6 +551,10 @@ _bpftool()
_bpftool_get_prog_ids
return 0
;;
+ name)
+ _bpftool_get_prog_names
+ return 0
+ ;;
data_in|data_out|ctx_in|ctx_out)
_filedir
return 0
@@ -524,7 +572,35 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'dump help pin attach detach \
- load loadall show list tracelog run' -- "$cur" ) )
+ load loadall show list tracelog run profile' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ struct_ops)
+ local STRUCT_OPS_TYPE='id name'
+ case $command in
+ show|list|dump|unregister)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$STRUCT_OPS_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids_for_type struct_ops
+ ;;
+ name)
+ _bpftool_get_map_names_for_type struct_ops
+ ;;
+ esac
+ return 0
+ ;;
+ register)
+ _filedir
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'register unregister show list dump help' \
+ -- "$cur" ) )
;;
esac
;;
@@ -712,11 +788,17 @@ _bpftool()
esac
;;
pin)
- if [[ $prev == "$command" ]]; then
- COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
- else
- _filedir
- fi
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ esac
return 0
;;
event_pipe)
@@ -843,7 +925,7 @@ _bpftool()
case $command in
skeleton)
_filedir
- ;;
+ ;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
@@ -943,6 +1025,9 @@ _bpftool()
id)
_bpftool_get_prog_ids
;;
+ name)
+ _bpftool_get_prog_names
+ ;;
pinned)
_filedir
;;
@@ -983,11 +1068,12 @@ _bpftool()
probe)
[[ $prev == "prefix" ]] && return 0
if _bpftool_search_list 'macros'; then
- COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ _bpftool_once_attr 'prefix'
else
COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
fi
_bpftool_one_of_list 'kernel dev'
+ _bpftool_once_attr 'full'
return 0
;;
*)
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index b3745ed711ba..bcaf55b59498 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -389,6 +389,9 @@ static int dump_btf_c(const struct btf *btf,
if (IS_ERR(d))
return PTR_ERR(d);
+ printf("#ifndef __VMLINUX_H__\n");
+ printf("#define __VMLINUX_H__\n");
+ printf("\n");
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
printf("#endif\n\n");
@@ -412,6 +415,8 @@ static int dump_btf_c(const struct btf *btf,
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute pop\n");
printf("#endif\n");
+ printf("\n");
+ printf("#endif /* __VMLINUX_H__ */\n");
done:
btf_dump__free(d);
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 01cc52b834fa..497807bec675 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -4,11 +4,13 @@
#include <ctype.h>
#include <stdio.h> /* for (FILE *) used by json_writer */
#include <string.h>
+#include <unistd.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <bpf/btf.h>
+#include <bpf/bpf.h>
#include "json_writer.h"
#include "main.h"
@@ -22,13 +24,102 @@
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data);
-static void btf_dumper_ptr(const void *data, json_writer_t *jw,
- bool is_plain_text)
+static int btf_dump_func(const struct btf *btf, char *func_sig,
+ const struct btf_type *func_proto,
+ const struct btf_type *func, int pos, int size);
+
+static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
+ const struct btf_type *func_proto,
+ __u32 prog_id)
{
- if (is_plain_text)
- jsonw_printf(jw, "%p", *(void **)data);
+ struct bpf_prog_info_linear *prog_info = NULL;
+ const struct btf_type *func_type;
+ const char *prog_name = NULL;
+ struct bpf_func_info *finfo;
+ struct btf *prog_btf = NULL;
+ struct bpf_prog_info *info;
+ int prog_fd, func_sig_len;
+ char prog_str[1024];
+
+ /* Get the ptr's func_proto */
+ func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
+ sizeof(prog_str));
+ if (func_sig_len == -1)
+ return -1;
+
+ if (!prog_id)
+ goto print;
+
+ /* Get the bpf_prog's name. Obtain from func_info. */
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd == -1)
+ goto print;
+
+ prog_info = bpf_program__get_prog_info_linear(prog_fd,
+ 1UL << BPF_PROG_INFO_FUNC_INFO);
+ close(prog_fd);
+ if (IS_ERR(prog_info)) {
+ prog_info = NULL;
+ goto print;
+ }
+ info = &prog_info->info;
+
+ if (!info->btf_id || !info->nr_func_info ||
+ btf__get_from_id(info->btf_id, &prog_btf))
+ goto print;
+ finfo = (struct bpf_func_info *)info->func_info;
+ func_type = btf__type_by_id(prog_btf, finfo->type_id);
+ if (!func_type || !btf_is_func(func_type))
+ goto print;
+
+ prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
+
+print:
+ if (!prog_id)
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len, " 0");
+ else if (prog_name)
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len,
+ " %s/prog_id:%u", prog_name, prog_id);
else
- jsonw_printf(jw, "%lu", *(unsigned long *)data);
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len,
+ " <unknown_prog_name>/prog_id:%u", prog_id);
+
+ prog_str[sizeof(prog_str) - 1] = '\0';
+ jsonw_string(d->jw, prog_str);
+ btf__free(prog_btf);
+ free(prog_info);
+ return 0;
+}
+
+static void btf_dumper_ptr(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ unsigned long value = *(unsigned long *)data;
+ const struct btf_type *ptr_type;
+ __s32 ptr_type_id;
+
+ if (!d->prog_id_as_func_ptr || value > UINT32_MAX)
+ goto print_ptr_value;
+
+ ptr_type_id = btf__resolve_type(d->btf, t->type);
+ if (ptr_type_id < 0)
+ goto print_ptr_value;
+ ptr_type = btf__type_by_id(d->btf, ptr_type_id);
+ if (!ptr_type || !btf_is_func_proto(ptr_type))
+ goto print_ptr_value;
+
+ if (!dump_prog_id_as_func_ptr(d, ptr_type, value))
+ return;
+
+print_ptr_value:
+ if (d->is_plain_text)
+ jsonw_printf(d->jw, "%p", (void *)value);
+ else
+ jsonw_printf(d->jw, "%lu", value);
}
static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
@@ -43,9 +134,78 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
}
-static void btf_dumper_enum(const void *data, json_writer_t *jw)
+static int btf_dumper_enum(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ const struct btf_enum *enums = btf_enum(t);
+ __s64 value;
+ __u16 i;
+
+ switch (t->size) {
+ case 8:
+ value = *(__s64 *)data;
+ break;
+ case 4:
+ value = *(__s32 *)data;
+ break;
+ case 2:
+ value = *(__s16 *)data;
+ break;
+ case 1:
+ value = *(__s8 *)data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < btf_vlen(t); i++) {
+ if (value == enums[i].val) {
+ jsonw_string(d->jw,
+ btf__name_by_offset(d->btf,
+ enums[i].name_off));
+ return 0;
+ }
+ }
+
+ jsonw_int(d->jw, value);
+ return 0;
+}
+
+static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
+ const char *s)
{
- jsonw_printf(jw, "%d", *(int *)data);
+ const struct btf_type *elem_type;
+ const char *end_s;
+
+ if (!arr->nelems)
+ return false;
+
+ elem_type = btf__type_by_id(btf, arr->type);
+ /* Not skipping typedef. typedef to char does not count as
+ * a string now.
+ */
+ while (elem_type && btf_is_mod(elem_type))
+ elem_type = btf__type_by_id(btf, elem_type->type);
+
+ if (!elem_type || !btf_is_int(elem_type) || elem_type->size != 1)
+ return false;
+
+ if (btf_int_encoding(elem_type) != BTF_INT_CHAR &&
+ strcmp("char", btf__name_by_offset(btf, elem_type->name_off)))
+ return false;
+
+ end_s = s + arr->nelems;
+ while (s < end_s) {
+ if (!*s)
+ return true;
+ if (*s <= 0x1f || *s >= 0x7f)
+ return false;
+ s++;
+ }
+
+ /* '\0' is not found */
+ return false;
}
static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
@@ -57,6 +217,11 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
int ret = 0;
__u32 i;
+ if (is_str_array(d->btf, arr, data)) {
+ jsonw_string(d->jw, data);
+ return 0;
+ }
+
elem_size = btf__resolve_size(d->btf, arr->type);
if (elem_size < 0)
return elem_size;
@@ -366,10 +531,9 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
case BTF_KIND_ARRAY:
return btf_dumper_array(d, type_id, data);
case BTF_KIND_ENUM:
- btf_dumper_enum(data, d->jw);
- return 0;
+ return btf_dumper_enum(d, t, data);
case BTF_KIND_PTR:
- btf_dumper_ptr(data, d->jw, d->is_plain_text);
+ btf_dumper_ptr(d, t, data);
return 0;
case BTF_KIND_UNKN:
jsonw_printf(d->jw, "(unknown)");
@@ -414,10 +578,6 @@ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
return -1; \
} while (0)
-static int btf_dump_func(const struct btf *btf, char *func_sig,
- const struct btf_type *func_proto,
- const struct btf_type *func, int pos, int size);
-
static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
char *func_sig, int pos, int size)
{
@@ -526,8 +686,15 @@ static int btf_dump_func(const struct btf *btf, char *func_sig,
BTF_PRINT_ARG(", ");
if (arg->type) {
BTF_PRINT_TYPE(arg->type);
- BTF_PRINT_ARG("%s",
- btf__name_by_offset(btf, arg->name_off));
+ if (arg->name_off)
+ BTF_PRINT_ARG("%s",
+ btf__name_by_offset(btf, arg->name_off));
+ else if (pos && func_sig[pos - 1] == ' ')
+ /* Remove unnecessary space for
+ * FUNC_PROTO that does not have
+ * arg->name_off
+ */
+ func_sig[--pos] = '\0';
} else {
BTF_PRINT_ARG("...");
}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index b75b8ec5469c..f2223dbdfb0a 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -211,39 +211,14 @@ int do_pin_fd(int fd, const char *name)
return err;
}
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
{
- unsigned int id;
- char *endptr;
int err;
int fd;
- if (argc < 3) {
- p_err("too few arguments, id ID and FILE path is required");
- return -1;
- } else if (argc > 3) {
- p_err("too many arguments");
- return -1;
- }
-
- if (!is_prefix(*argv, "id")) {
- p_err("expected 'id' got %s", *argv);
- return -1;
- }
- NEXT_ARG();
-
- id = strtoul(*argv, &endptr, 0);
- if (*endptr) {
- p_err("can't parse %s as ID", *argv);
- return -1;
- }
- NEXT_ARG();
-
- fd = get_fd_by_id(id);
- if (fd < 0) {
- p_err("can't open object by id (%u): %s", id, strerror(errno));
- return -1;
- }
+ fd = get_fd(&argc, &argv);
+ if (fd < 0)
+ return fd;
err = do_pin_fd(fd, *argv);
@@ -597,3 +572,10 @@ int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
return 0;
}
+
+int __printf(2, 0)
+print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ return vfprintf(stderr, format, args);
+}
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 941873d778d8..88718ee6a438 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -112,18 +112,12 @@ print_start_section(const char *json_title, const char *plain_title,
}
}
-static void
-print_end_then_start_section(const char *json_title, const char *plain_title,
- const char *define_comment,
- const char *define_prefix)
+static void print_end_section(void)
{
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
-
- print_start_section(json_title, plain_title, define_comment,
- define_prefix);
}
/* Probing functions */
@@ -520,13 +514,38 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
}
static void
+probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, unsigned int id,
+ const char *ptype_name, __u32 ifindex)
+{
+ bool res;
+
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+}
+
+static void
probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
- const char *define_prefix, __u32 ifindex)
+ const char *define_prefix, bool full_mode,
+ __u32 ifindex)
{
const char *ptype_name = prog_type_name[prog_type];
char feat_name[128];
unsigned int id;
- bool res;
if (ifindex)
/* Only test helpers for offload-able program types */
@@ -548,21 +567,19 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
}
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
- if (!supported_type)
- res = false;
- else
- res = bpf_probe_helper(id, prog_type, ifindex);
-
- if (json_output) {
- if (res)
- jsonw_string(json_wtr, helper_name[id]);
- } else if (define_prefix) {
- printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
- define_prefix, ptype_name, helper_name[id],
- res ? "1" : "0");
- } else {
- if (res)
- printf("\n\t- %s", helper_name[id]);
+ /* Skip helper functions which emit dmesg messages when not in
+ * the full mode.
+ */
+ switch (id) {
+ case BPF_FUNC_trace_printk:
+ case BPF_FUNC_probe_write_user:
+ if (!full_mode)
+ continue;
+ /* fallthrough */
+ default:
+ probe_helper_for_progtype(prog_type, supported_type,
+ define_prefix, id, ptype_name,
+ ifindex);
}
}
@@ -584,13 +601,132 @@ probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
res, define_prefix);
}
+static void
+section_system_config(enum probe_component target, const char *define_prefix)
+{
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ print_end_section();
+ break;
+ default:
+ break;
+ }
+}
+
+static bool section_syscall_config(const char *define_prefix)
+{
+ bool res;
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+ res = probe_bpf_syscall(define_prefix);
+ print_end_section();
+
+ return res;
+}
+
+static void
+section_program_types(bool *supported_types, const char *define_prefix,
+ __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_section();
+}
+
+static void section_map_types(const char *define_prefix, __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_section();
+}
+
+static void
+section_helpers(bool *supported_types, const char *define_prefix,
+ bool full_mode, __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, full_mode, ifindex);
+
+ print_end_section();
+}
+
+static void section_misc(const char *define_prefix, __u32 ifindex)
+{
+ print_start_section("misc",
+ "Scanning miscellaneous eBPF features...",
+ "/*** eBPF misc features ***/",
+ define_prefix);
+ probe_large_insn_limit(define_prefix, ifindex);
+ print_end_section();
+}
+
static int do_probe(int argc, char **argv)
{
enum probe_component target = COMPONENT_UNSPEC;
const char *define_prefix = NULL;
bool supported_types[128] = {};
+ bool full_mode = false;
__u32 ifindex = 0;
- unsigned int i;
char *ifname;
/* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
@@ -629,6 +765,9 @@ static int do_probe(int argc, char **argv)
strerror(errno));
return -1;
}
+ } else if (is_prefix(*argv, "full")) {
+ full_mode = true;
+ NEXT_ARG();
} else if (is_prefix(*argv, "macros") && !define_prefix) {
define_prefix = "";
NEXT_ARG();
@@ -658,97 +797,19 @@ static int do_probe(int argc, char **argv)
jsonw_start_object(json_wtr);
}
- switch (target) {
- case COMPONENT_KERNEL:
- case COMPONENT_UNSPEC:
- if (define_prefix)
- break;
-
- print_start_section("system_config",
- "Scanning system configuration...",
- NULL, /* define_comment never used here */
- NULL); /* define_prefix always NULL here */
- if (check_procfs()) {
- probe_unprivileged_disabled();
- probe_jit_enable();
- probe_jit_harden();
- probe_jit_kallsyms();
- probe_jit_limit();
- } else {
- p_info("/* procfs not mounted, skipping related probes */");
- }
- probe_kernel_image_config();
- if (json_output)
- jsonw_end_object(json_wtr);
- else
- printf("\n");
- break;
- default:
- break;
- }
-
- print_start_section("syscall_config",
- "Scanning system call availability...",
- "/*** System call availability ***/",
- define_prefix);
-
- if (!probe_bpf_syscall(define_prefix))
+ section_system_config(target, define_prefix);
+ if (!section_syscall_config(define_prefix))
/* bpf() syscall unavailable, don't probe other BPF features */
goto exit_close_json;
-
- print_end_then_start_section("program_types",
- "Scanning eBPF program types...",
- "/*** eBPF program types ***/",
- define_prefix);
-
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_prog_type(i, supported_types, define_prefix, ifindex);
-
- print_end_then_start_section("map_types",
- "Scanning eBPF map types...",
- "/*** eBPF map types ***/",
- define_prefix);
-
- for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
- probe_map_type(i, define_prefix, ifindex);
-
- print_end_then_start_section("helpers",
- "Scanning eBPF helper functions...",
- "/*** eBPF helper functions ***/",
- define_prefix);
-
- if (define_prefix)
- printf("/*\n"
- " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
- " * to determine if <helper_name> is available for <prog_type_name>,\n"
- " * e.g.\n"
- " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
- " * // do stuff with this helper\n"
- " * #elif\n"
- " * // use a workaround\n"
- " * #endif\n"
- " */\n"
- "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
- " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
- define_prefix, define_prefix, define_prefix,
- define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_helpers_for_progtype(i, supported_types[i],
- define_prefix, ifindex);
-
- print_end_then_start_section("misc",
- "Scanning miscellaneous eBPF features...",
- "/*** eBPF misc features ***/",
- define_prefix);
- probe_large_insn_limit(define_prefix, ifindex);
+ section_program_types(supported_types, define_prefix, ifindex);
+ section_map_types(define_prefix, ifindex);
+ section_helpers(supported_types, define_prefix, full_mode, ifindex);
+ section_misc(define_prefix, ifindex);
exit_close_json:
- if (json_output) {
- /* End current "section" of probes */
- jsonw_end_object(json_wtr);
+ if (json_output)
/* End root object */
jsonw_end_object(json_wtr);
- }
return 0;
}
@@ -761,7 +822,7 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ "Usage: %s %s probe [COMPONENT] [full] [macros [prefix PREFIX]]\n"
" %s %s help\n"
"\n"
" COMPONENT := { kernel | dev NAME }\n"
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 6d41bbfc6459..466c269eabdd 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -58,7 +58,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net | feature | btf | gen }\n"
+ " OBJECT := { prog | map | cgroup | perf | net | feature | btf | gen | struct_ops }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -79,13 +79,6 @@ static int do_version(int argc, char **argv)
return 0;
}
-static int __printf(2, 0)
-print_all_levels(__maybe_unused enum libbpf_print_level level,
- const char *format, va_list args)
-{
- return vfprintf(stderr, format, args);
-}
-
int cmd_select(const struct cmd *cmds, int argc, char **argv,
int (*help)(int argc, char **argv))
{
@@ -228,6 +221,7 @@ static const struct cmd cmds[] = {
{ "feature", do_feature },
{ "btf", do_btf },
{ "gen", do_gen },
+ { "struct_ops", do_struct_ops },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 4e75b58d3989..86f14ce26fd7 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -14,6 +14,8 @@
#include <linux/hashtable.h>
#include <tools/libc_compat.h>
+#include <bpf/libbpf.h>
+
#include "json_writer.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@@ -76,6 +78,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
+ [BPF_PROG_TYPE_TRACING] = "tracing",
+ [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_PROG_TYPE_EXT] = "ext",
};
extern const char * const map_type_name[];
@@ -143,7 +148,7 @@ char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(char *path, bool quiet);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
int mount_bpffs_for_pin(const char *name);
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
int do_pin_fd(int fd, const char *name);
int do_prog(int argc, char **arg);
@@ -156,6 +161,7 @@ int do_tracelog(int argc, char **arg);
int do_feature(int argc, char **argv);
int do_btf(int argc, char **argv);
int do_gen(int argc, char **argv);
+int do_struct_ops(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
@@ -200,6 +206,7 @@ struct btf_dumper {
const struct btf *btf;
json_writer_t *jw;
bool is_plain_text;
+ bool prog_id_as_func_ptr;
};
/* btf_dumper_type - print data along with type information
@@ -226,4 +233,7 @@ struct tcmsg;
int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
const char *devname, int ifindex);
+
+int print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e6c85680b34d..693a632f6813 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1384,7 +1384,7 @@ static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, bpf_map_get_fd_by_id);
+ err = do_pin_any(argc, argv, map_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index b352ab041160..f6a5974a7b0a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -4,6 +4,7 @@
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
+#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
@@ -11,10 +12,13 @@
#include <time.h>
#include <unistd.h>
#include <net/if.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
#include <linux/err.h>
+#include <linux/perf_event.h>
#include <linux/sizes.h>
#include <bpf/bpf.h>
@@ -809,7 +813,7 @@ static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id);
+ err = do_pin_any(argc, argv, prog_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
@@ -1243,6 +1247,25 @@ free_data_in:
return err;
}
+static int
+get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
+{
+ libbpf_print_fn_t print_backup;
+ int ret;
+
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ if (!ret)
+ return ret;
+
+ /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
+ print_backup = libbpf_set_print(print_all_levels);
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ libbpf_set_print(print_backup);
+
+ return ret;
+}
+
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
@@ -1292,8 +1315,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
strcat(type, *argv);
strcat(type, "/");
- err = libbpf_prog_type_by_name(type, &common_prog_type,
- &expected_attach_type);
+ err = get_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
free(type);
if (err < 0)
goto err_free_reuse_maps;
@@ -1392,8 +1415,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
const char *sec_name = bpf_program__title(pos, false);
- err = libbpf_prog_type_by_name(sec_name, &prog_type,
- &expected_attach_type);
+ err = get_prog_type_by_name(sec_name, &prog_type,
+ &expected_attach_type);
if (err < 0)
goto err_close_obj;
}
@@ -1537,6 +1560,422 @@ static int do_loadall(int argc, char **argv)
return load_with_options(argc, argv, false);
}
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+static int do_profile(int argc, char **argv)
+{
+ p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
+ return 0;
+}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "profiler.skel.h"
+
+struct profile_metric {
+ const char *name;
+ struct bpf_perf_event_value val;
+ struct perf_event_attr attr;
+ bool selected;
+
+ /* calculate ratios like instructions per cycle */
+ const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
+ const char *ratio_desc;
+ const float ratio_mul;
+} metrics[] = {
+ {
+ .name = "cycles",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "instructions",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .exclude_user = 1,
+ },
+ .ratio_metric = 1,
+ .ratio_desc = "insns per cycle",
+ .ratio_mul = 1.0,
+ },
+ {
+ .name = "l1d_loads",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "llc_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_LL |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "LLC misses per million insns",
+ .ratio_mul = 1e6,
+ },
+};
+
+static __u64 profile_total_count;
+
+#define MAX_NUM_PROFILE_METRICS 4
+
+static int profile_parse_metrics(int argc, char **argv)
+{
+ unsigned int metric_cnt;
+ int selected_cnt = 0;
+ unsigned int i;
+
+ metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+
+ while (argc > 0) {
+ for (i = 0; i < metric_cnt; i++) {
+ if (is_prefix(argv[0], metrics[i].name)) {
+ if (!metrics[i].selected)
+ selected_cnt++;
+ metrics[i].selected = true;
+ break;
+ }
+ }
+ if (i == metric_cnt) {
+ p_err("unknown metric %s", argv[0]);
+ return -1;
+ }
+ NEXT_ARG();
+ }
+ if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
+ p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
+ selected_cnt, MAX_NUM_PROFILE_METRICS);
+ return -1;
+ }
+ return selected_cnt;
+}
+
+static void profile_read_values(struct profiler_bpf *obj)
+{
+ __u32 m, cpu, num_cpu = obj->rodata->num_cpu;
+ int reading_map_fd, count_map_fd;
+ __u64 counts[num_cpu];
+ __u32 key = 0;
+ int err;
+
+ reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
+ count_map_fd = bpf_map__fd(obj->maps.counts);
+ if (reading_map_fd < 0 || count_map_fd < 0) {
+ p_err("failed to get fd for map");
+ return;
+ }
+
+ err = bpf_map_lookup_elem(count_map_fd, &key, counts);
+ if (err) {
+ p_err("failed to read count_map: %s", strerror(errno));
+ return;
+ }
+
+ profile_total_count = 0;
+ for (cpu = 0; cpu < num_cpu; cpu++)
+ profile_total_count += counts[cpu];
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value values[num_cpu];
+
+ if (!metrics[m].selected)
+ continue;
+
+ err = bpf_map_lookup_elem(reading_map_fd, &key, values);
+ if (err) {
+ p_err("failed to read reading_map: %s",
+ strerror(errno));
+ return;
+ }
+ for (cpu = 0; cpu < num_cpu; cpu++) {
+ metrics[m].val.counter += values[cpu].counter;
+ metrics[m].val.enabled += values[cpu].enabled;
+ metrics[m].val.running += values[cpu].running;
+ }
+ key++;
+ }
+}
+
+static void profile_print_readings_json(void)
+{
+ __u32 m;
+
+ jsonw_start_array(json_wtr);
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "metric", metrics[m].name);
+ jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
+ jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
+ jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
+ jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
+
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+static void profile_print_readings_plain(void)
+{
+ __u32 m;
+
+ printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value *val = &metrics[m].val;
+ int r;
+
+ if (!metrics[m].selected)
+ continue;
+ printf("%18llu %-20s", val->counter, metrics[m].name);
+
+ r = metrics[m].ratio_metric - 1;
+ if (r >= 0 && metrics[r].selected &&
+ metrics[r].val.counter > 0) {
+ printf("# %8.2f %-30s",
+ val->counter * metrics[m].ratio_mul /
+ metrics[r].val.counter,
+ metrics[m].ratio_desc);
+ } else {
+ printf("%-41s", "");
+ }
+
+ if (val->enabled > val->running)
+ printf("(%4.2f%%)",
+ val->running * 100.0 / val->enabled);
+ printf("\n");
+ }
+}
+
+static void profile_print_readings(void)
+{
+ if (json_output)
+ profile_print_readings_json();
+ else
+ profile_print_readings_plain();
+}
+
+static char *profile_target_name(int tgt_fd)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_func_info *func_info;
+ const struct btf_type *t;
+ char *name = NULL;
+ struct btf *btf;
+
+ info_linear = bpf_program__get_prog_info_linear(
+ tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ p_err("failed to get info_linear for prog FD %d", tgt_fd);
+ return NULL;
+ }
+
+ if (info_linear->info.btf_id == 0 ||
+ btf__get_from_id(info_linear->info.btf_id, &btf)) {
+ p_err("prog FD %d doesn't have valid btf", tgt_fd);
+ goto out;
+ }
+
+ func_info = (struct bpf_func_info *)(info_linear->info.func_info);
+ t = btf__type_by_id(btf, func_info[0].type_id);
+ if (!t) {
+ p_err("btf %d doesn't have type %d",
+ info_linear->info.btf_id, func_info[0].type_id);
+ goto out;
+ }
+ name = strdup(btf__name_by_offset(btf, t->name_off));
+out:
+ free(info_linear);
+ return name;
+}
+
+static struct profiler_bpf *profile_obj;
+static int profile_tgt_fd = -1;
+static char *profile_tgt_name;
+static int *profile_perf_events;
+static int profile_perf_event_cnt;
+
+static void profile_close_perf_events(struct profiler_bpf *obj)
+{
+ int i;
+
+ for (i = profile_perf_event_cnt - 1; i >= 0; i--)
+ close(profile_perf_events[i]);
+
+ free(profile_perf_events);
+ profile_perf_event_cnt = 0;
+}
+
+static int profile_open_perf_events(struct profiler_bpf *obj)
+{
+ unsigned int cpu, m;
+ int map_fd, pmu_fd;
+
+ profile_perf_events = calloc(
+ sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ if (!profile_perf_events) {
+ p_err("failed to allocate memory for perf_event array: %s",
+ strerror(errno));
+ return -1;
+ }
+ map_fd = bpf_map__fd(obj->maps.events);
+ if (map_fd < 0) {
+ p_err("failed to get fd for events map");
+ return -1;
+ }
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
+ -1/*pid*/, cpu, -1/*group_fd*/, 0);
+ if (pmu_fd < 0 ||
+ bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ p_err("failed to create event %s on cpu %d",
+ metrics[m].name, cpu);
+ return -1;
+ }
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ }
+ }
+ return 0;
+}
+
+static void profile_print_and_cleanup(void)
+{
+ profile_close_perf_events(profile_obj);
+ profile_read_values(profile_obj);
+ profile_print_readings();
+ profiler_bpf__destroy(profile_obj);
+
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+}
+
+static void int_exit(int signo)
+{
+ profile_print_and_cleanup();
+ exit(0);
+}
+
+static int do_profile(int argc, char **argv)
+{
+ int num_metric, num_cpu, err = -1;
+ struct bpf_program *prog;
+ unsigned long duration;
+ char *endptr;
+
+ /* we at least need two args for the prog and one metric */
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ /* parse target fd */
+ profile_tgt_fd = prog_parse_fd(&argc, &argv);
+ if (profile_tgt_fd < 0) {
+ p_err("failed to parse fd");
+ return -1;
+ }
+
+ /* parse profiling optional duration */
+ if (argc > 2 && is_prefix(argv[0], "duration")) {
+ NEXT_ARG();
+ duration = strtoul(*argv, &endptr, 0);
+ if (*endptr)
+ usage();
+ NEXT_ARG();
+ } else {
+ duration = UINT_MAX;
+ }
+
+ num_metric = profile_parse_metrics(argc, argv);
+ if (num_metric <= 0)
+ goto out;
+
+ num_cpu = libbpf_num_possible_cpus();
+ if (num_cpu <= 0) {
+ p_err("failed to identify number of CPUs");
+ goto out;
+ }
+
+ profile_obj = profiler_bpf__open();
+ if (!profile_obj) {
+ p_err("failed to open and/or load BPF object");
+ goto out;
+ }
+
+ profile_obj->rodata->num_cpu = num_cpu;
+ profile_obj->rodata->num_metric = num_metric;
+
+ /* adjust map sizes */
+ bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
+ bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
+ bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
+ bpf_map__resize(profile_obj->maps.counts, 1);
+
+ /* change target name */
+ profile_tgt_name = profile_target_name(profile_tgt_fd);
+ if (!profile_tgt_name)
+ goto out;
+
+ bpf_object__for_each_program(prog, profile_obj->obj) {
+ err = bpf_program__set_attach_target(prog, profile_tgt_fd,
+ profile_tgt_name);
+ if (err) {
+ p_err("failed to set attach target\n");
+ goto out;
+ }
+ }
+
+ set_max_rlimit();
+ err = profiler_bpf__load(profile_obj);
+ if (err) {
+ p_err("failed to load profile_obj");
+ goto out;
+ }
+
+ err = profile_open_perf_events(profile_obj);
+ if (err)
+ goto out;
+
+ err = profiler_bpf__attach(profile_obj);
+ if (err) {
+ p_err("failed to attach profile_obj");
+ goto out;
+ }
+ signal(SIGINT, int_exit);
+
+ sleep(duration);
+ profile_print_and_cleanup();
+ return 0;
+
+out:
+ profile_close_perf_events(profile_obj);
+ if (profile_obj)
+ profiler_bpf__destroy(profile_obj);
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+ return err;
+}
+
+#endif /* BPFTOOL_WITHOUT_SKELETONS */
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1560,6 +1999,7 @@ static int do_help(int argc, char **argv)
" [data_out FILE [data_size_out L]] \\\n"
" [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
" [repeat N]\n"
+ " %s %s profile PROG [duration DURATION] METRICs\n"
" %s %s tracelog\n"
" %s %s help\n"
"\n"
@@ -1573,16 +2013,17 @@ static int do_help(int argc, char **argv)
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
- " cgroup/recvmsg6 | cgroup/getsockopt |\n"
- " cgroup/setsockopt }\n"
+ " cgroup/recvmsg6 | cgroup/getsockopt | cgroup/setsockopt |\n"
+ " struct_ops | fentry | fexit | freplace }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
+ " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1599,6 +2040,7 @@ static const struct cmd cmds[] = {
{ "detach", do_detach },
{ "tracelog", do_tracelog },
{ "run", do_run },
+ { "profile", do_profile },
{ 0 }
};
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
new file mode 100644
index 000000000000..20034c12f7c5
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#include "profiler.h"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* map of perf event fds, num_cpu * num_metric entries */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(int));
+} events SEC(".maps");
+
+/* readings at fentry */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} fentry_readings SEC(".maps");
+
+/* accumulated readings */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} accum_readings SEC(".maps");
+
+/* sample counts, one per cpu */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+} counts SEC(".maps");
+
+const volatile __u32 num_cpu = 1;
+const volatile __u32 num_metric = 1;
+#define MAX_NUM_MATRICS 4
+
+SEC("fentry/XXX")
+int BPF_PROG(fentry_XXX)
+{
+ struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
+ u32 key = bpf_get_smp_processor_id();
+ u32 i;
+
+ /* look up before reading, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ u32 flag = i;
+
+ ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
+ if (!ptrs[i])
+ return 0;
+ }
+
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ struct bpf_perf_event_value reading;
+ int err;
+
+ err = bpf_perf_event_read_value(&events, key, &reading,
+ sizeof(reading));
+ if (err)
+ return 0;
+ *(ptrs[i]) = reading;
+ key += num_cpu;
+ }
+
+ return 0;
+}
+
+static inline void
+fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
+{
+ struct bpf_perf_event_value *before, diff, *accum;
+
+ before = bpf_map_lookup_elem(&fentry_readings, &id);
+ /* only account samples with a valid fentry_reading */
+ if (before && before->counter) {
+ struct bpf_perf_event_value *accum;
+
+ diff.counter = after->counter - before->counter;
+ diff.enabled = after->enabled - before->enabled;
+ diff.running = after->running - before->running;
+
+ accum = bpf_map_lookup_elem(&accum_readings, &id);
+ if (accum) {
+ accum->counter += diff.counter;
+ accum->enabled += diff.enabled;
+ accum->running += diff.running;
+ }
+ }
+}
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 i, one = 1, zero = 0;
+ int err;
+ u64 *count;
+
+ /* read all events before updating the maps, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
+ readings + i, sizeof(*readings));
+ if (err)
+ return 0;
+ }
+ count = bpf_map_lookup_elem(&counts, &zero);
+ if (count) {
+ *count += 1;
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
+ fexit_update_maps(i, &readings[i]);
+ }
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/bpf/bpftool/skeleton/profiler.h b/tools/bpf/bpftool/skeleton/profiler.h
new file mode 100644
index 000000000000..1f767e9510f7
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __PROFILER_H
+#define __PROFILER_H
+
+/* useful typedefs from vmlinux.h */
+
+typedef signed char __s8;
+typedef unsigned char __u8;
+typedef short int __s16;
+typedef short unsigned int __u16;
+typedef int __s32;
+typedef unsigned int __u32;
+typedef long long int __s64;
+typedef long long unsigned int __u64;
+
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
+
+enum {
+ false = 0,
+ true = 1,
+};
+
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+
+typedef __u16 __bitwise__ __le16;
+typedef __u16 __bitwise__ __be16;
+typedef __u32 __bitwise__ __le32;
+typedef __u32 __bitwise__ __be32;
+typedef __u64 __bitwise__ __le64;
+typedef __u64 __bitwise__ __be64;
+
+typedef __u16 __bitwise__ __sum16;
+typedef __u32 __bitwise__ __wsum;
+
+#endif /* __PROFILER_H */
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
new file mode 100644
index 000000000000..2a7befbd11ad
--- /dev/null
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <linux/err.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
+
+static const struct btf_type *map_info_type;
+static __u32 map_info_alloc_len;
+static struct btf *btf_vmlinux;
+static __s32 map_info_type_id;
+
+struct res {
+ unsigned int nr_maps;
+ unsigned int nr_errs;
+};
+
+static const struct btf *get_btf_vmlinux(void)
+{
+ if (btf_vmlinux)
+ return btf_vmlinux;
+
+ btf_vmlinux = libbpf_find_kernel_btf();
+ if (IS_ERR(btf_vmlinux))
+ p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
+
+ return btf_vmlinux;
+}
+
+static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
+{
+ const struct btf *kern_btf;
+ const struct btf_type *t;
+ const char *st_ops_name;
+
+ kern_btf = get_btf_vmlinux();
+ if (IS_ERR(kern_btf))
+ return "<btf_vmlinux_not_found>";
+
+ t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
+ st_ops_name = btf__name_by_offset(kern_btf, t->name_off);
+ st_ops_name += strlen(STRUCT_OPS_VALUE_PREFIX);
+
+ return st_ops_name;
+}
+
+static __s32 get_map_info_type_id(void)
+{
+ const struct btf *kern_btf;
+
+ if (map_info_type_id)
+ return map_info_type_id;
+
+ kern_btf = get_btf_vmlinux();
+ if (IS_ERR(kern_btf)) {
+ map_info_type_id = PTR_ERR(kern_btf);
+ return map_info_type_id;
+ }
+
+ map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info",
+ BTF_KIND_STRUCT);
+ if (map_info_type_id < 0) {
+ p_err("can't find bpf_map_info from btf_vmlinux");
+ return map_info_type_id;
+ }
+ map_info_type = btf__type_by_id(kern_btf, map_info_type_id);
+
+ /* Ensure map_info_alloc() has at least what the bpftool needs */
+ map_info_alloc_len = map_info_type->size;
+ if (map_info_alloc_len < sizeof(struct bpf_map_info))
+ map_info_alloc_len = sizeof(struct bpf_map_info);
+
+ return map_info_type_id;
+}
+
+/* If the subcmd needs to print out the bpf_map_info,
+ * it should always call map_info_alloc to allocate
+ * a bpf_map_info object instead of allocating it
+ * on the stack.
+ *
+ * map_info_alloc() will take the running kernel's btf
+ * into account. i.e. it will consider the
+ * sizeof(struct bpf_map_info) of the running kernel.
+ *
+ * It will enable the "struct_ops" cmd to print the latest
+ * "struct bpf_map_info".
+ *
+ * [ Recall that "struct_ops" requires the kernel's btf to
+ * be available ]
+ */
+static struct bpf_map_info *map_info_alloc(__u32 *alloc_len)
+{
+ struct bpf_map_info *info;
+
+ if (get_map_info_type_id() < 0)
+ return NULL;
+
+ info = calloc(1, map_info_alloc_len);
+ if (!info)
+ p_err("mem alloc failed");
+ else
+ *alloc_len = map_info_alloc_len;
+
+ return info;
+}
+
+/* It iterates all struct_ops maps of the system.
+ * It returns the fd in "*res_fd" and map_info in "*info".
+ * In the very first iteration, info->id should be 0.
+ * An optional map "*name" filter can be specified.
+ * The filter can be made more flexible in the future.
+ * e.g. filter by kernel-struct-ops-name, regex-name, glob-name, ...etc.
+ *
+ * Return value:
+ * 1: A struct_ops map found. It is returned in "*res_fd" and "*info".
+ * The caller can continue to call get_next in the future.
+ * 0: No struct_ops map is returned.
+ * All struct_ops map has been found.
+ * -1: Error and the caller should abort the iteration.
+ */
+static int get_next_struct_ops_map(const char *name, int *res_fd,
+ struct bpf_map_info *info, __u32 info_len)
+{
+ __u32 id = info->id;
+ int err, fd;
+
+ while (true) {
+ err = bpf_map_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ return 0;
+ p_err("can't get next map: %s", strerror(errno));
+ return -1;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get map by id (%u): %s",
+ id, strerror(errno));
+ return -1;
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, info, &info_len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ if (info->type == BPF_MAP_TYPE_STRUCT_OPS &&
+ (!name || !strcmp(name, info->name))) {
+ *res_fd = fd;
+ return 1;
+ }
+ close(fd);
+ }
+}
+
+static int cmd_retval(const struct res *res, bool must_have_one_map)
+{
+ if (res->nr_errs || (!res->nr_maps && must_have_one_map))
+ return -1;
+
+ return 0;
+}
+
+/* "data" is the work_func private storage */
+typedef int (*work_func)(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr);
+
+/* Find all struct_ops map in the system.
+ * Filter out by "name" (if specified).
+ * Then call "func(fd, info, data, wtr)" on each struct_ops map found.
+ */
+static struct res do_search(const char *name, work_func func, void *data,
+ struct json_writer *wtr)
+{
+ struct bpf_map_info *info;
+ struct res res = {};
+ __u32 info_len;
+ int fd, err;
+
+ info = map_info_alloc(&info_len);
+ if (!info) {
+ res.nr_errs++;
+ return res;
+ }
+
+ if (wtr)
+ jsonw_start_array(wtr);
+ while ((err = get_next_struct_ops_map(name, &fd, info, info_len)) == 1) {
+ res.nr_maps++;
+ err = func(fd, info, data, wtr);
+ if (err)
+ res.nr_errs++;
+ close(fd);
+ }
+ if (wtr)
+ jsonw_end_array(wtr);
+
+ if (err)
+ res.nr_errs++;
+
+ if (!wtr && name && !res.nr_errs && !res.nr_maps)
+ /* It is not printing empty [].
+ * Thus, needs to specifically say nothing found
+ * for "name" here.
+ */
+ p_err("no struct_ops found for %s", name);
+ else if (!wtr && json_output && !res.nr_errs)
+ /* The "func()" above is not writing any json (i.e. !wtr
+ * test here).
+ *
+ * However, "-j" is enabled and there is no errs here,
+ * so call json_null() as the current convention of
+ * other cmds.
+ */
+ jsonw_null(json_wtr);
+
+ free(info);
+ return res;
+}
+
+static struct res do_one_id(const char *id_str, work_func func, void *data,
+ struct json_writer *wtr)
+{
+ struct bpf_map_info *info;
+ struct res res = {};
+ unsigned long id;
+ __u32 info_len;
+ char *endptr;
+ int fd;
+
+ id = strtoul(id_str, &endptr, 0);
+ if (*endptr || !id || id > UINT32_MAX) {
+ p_err("invalid id %s", id_str);
+ res.nr_errs++;
+ return res;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd == -1) {
+ p_err("can't get map by id (%lu): %s", id, strerror(errno));
+ res.nr_errs++;
+ return res;
+ }
+
+ info = map_info_alloc(&info_len);
+ if (!info) {
+ res.nr_errs++;
+ goto done;
+ }
+
+ if (bpf_obj_get_info_by_fd(fd, info, &info_len)) {
+ p_err("can't get map info: %s", strerror(errno));
+ res.nr_errs++;
+ goto done;
+ }
+
+ if (info->type != BPF_MAP_TYPE_STRUCT_OPS) {
+ p_err("%s id %u is not a struct_ops map", info->name, info->id);
+ res.nr_errs++;
+ goto done;
+ }
+
+ res.nr_maps++;
+
+ if (func(fd, info, data, wtr))
+ res.nr_errs++;
+ else if (!wtr && json_output)
+ /* The "func()" above is not writing any json (i.e. !wtr
+ * test here).
+ *
+ * However, "-j" is enabled and there is no errs here,
+ * so call json_null() as the current convention of
+ * other cmds.
+ */
+ jsonw_null(json_wtr);
+
+done:
+ free(info);
+ close(fd);
+
+ return res;
+}
+
+static struct res do_work_on_struct_ops(const char *search_type,
+ const char *search_term,
+ work_func func, void *data,
+ struct json_writer *wtr)
+{
+ if (search_type) {
+ if (is_prefix(search_type, "id"))
+ return do_one_id(search_term, func, data, wtr);
+ else if (!is_prefix(search_type, "name"))
+ usage();
+ }
+
+ return do_search(search_term, func, data, wtr);
+}
+
+static int __do_show(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ if (wtr) {
+ jsonw_start_object(wtr);
+ jsonw_uint_field(wtr, "id", info->id);
+ jsonw_string_field(wtr, "name", info->name);
+ jsonw_string_field(wtr, "kernel_struct_ops",
+ get_kern_struct_ops_name(info));
+ jsonw_end_object(wtr);
+ } else {
+ printf("%u: %-15s %-32s\n", info->id, info->name,
+ get_kern_struct_ops_name(info));
+ }
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ const char *search_type = NULL, *search_term = NULL;
+ struct res res;
+
+ if (argc && argc != 2)
+ usage();
+
+ if (argc == 2) {
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+ }
+
+ res = do_work_on_struct_ops(search_type, search_term, __do_show,
+ NULL, json_wtr);
+
+ return cmd_retval(&res, !!search_term);
+}
+
+static int __do_dump(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ struct btf_dumper *d = (struct btf_dumper *)data;
+ const struct btf_type *struct_ops_type;
+ const struct btf *kern_btf = d->btf;
+ const char *struct_ops_name;
+ int zero = 0;
+ void *value;
+
+ /* note: d->jw == wtr */
+
+ kern_btf = d->btf;
+
+ /* The kernel supporting BPF_MAP_TYPE_STRUCT_OPS must have
+ * btf_vmlinux_value_type_id.
+ */
+ struct_ops_type = btf__type_by_id(kern_btf,
+ info->btf_vmlinux_value_type_id);
+ struct_ops_name = btf__name_by_offset(kern_btf,
+ struct_ops_type->name_off);
+ value = calloc(1, info->value_size);
+ if (!value) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
+ if (bpf_map_lookup_elem(fd, &zero, value)) {
+ p_err("can't lookup struct_ops map %s id %u",
+ info->name, info->id);
+ free(value);
+ return -1;
+ }
+
+ jsonw_start_object(wtr);
+ jsonw_name(wtr, "bpf_map_info");
+ btf_dumper_type(d, map_info_type_id, (void *)info);
+ jsonw_end_object(wtr);
+
+ jsonw_start_object(wtr);
+ jsonw_name(wtr, struct_ops_name);
+ btf_dumper_type(d, info->btf_vmlinux_value_type_id, value);
+ jsonw_end_object(wtr);
+
+ free(value);
+
+ return 0;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ const char *search_type = NULL, *search_term = NULL;
+ json_writer_t *wtr = json_wtr;
+ const struct btf *kern_btf;
+ struct btf_dumper d = {};
+ struct res res;
+
+ if (argc && argc != 2)
+ usage();
+
+ if (argc == 2) {
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+ }
+
+ kern_btf = get_btf_vmlinux();
+ if (IS_ERR(kern_btf))
+ return -1;
+
+ if (!json_output) {
+ wtr = jsonw_new(stdout);
+ if (!wtr) {
+ p_err("can't create json writer");
+ return -1;
+ }
+ jsonw_pretty(wtr, true);
+ }
+
+ d.btf = kern_btf;
+ d.jw = wtr;
+ d.is_plain_text = !json_output;
+ d.prog_id_as_func_ptr = true;
+
+ res = do_work_on_struct_ops(search_type, search_term, __do_dump, &d,
+ wtr);
+
+ if (!json_output)
+ jsonw_destroy(&wtr);
+
+ return cmd_retval(&res, !!search_term);
+}
+
+static int __do_unregister(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ int zero = 0;
+
+ if (bpf_map_delete_elem(fd, &zero)) {
+ p_err("can't unload %s %s id %u: %s",
+ get_kern_struct_ops_name(info), info->name,
+ info->id, strerror(errno));
+ return -1;
+ }
+
+ p_info("Unregistered %s %s id %u",
+ get_kern_struct_ops_name(info), info->name,
+ info->id);
+
+ return 0;
+}
+
+static int do_unregister(int argc, char **argv)
+{
+ const char *search_type, *search_term;
+ struct res res;
+
+ if (argc != 2)
+ usage();
+
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+
+ res = do_work_on_struct_ops(search_type, search_term,
+ __do_unregister, NULL, NULL);
+
+ return cmd_retval(&res, true);
+}
+
+static int do_register(int argc, char **argv)
+{
+ const struct bpf_map_def *def;
+ struct bpf_map_info info = {};
+ __u32 info_len = sizeof(info);
+ int nr_errs = 0, nr_maps = 0;
+ struct bpf_object *obj;
+ struct bpf_link *link;
+ struct bpf_map *map;
+ const char *file;
+
+ if (argc != 1)
+ usage();
+
+ file = GET_ARG();
+
+ obj = bpf_object__open(file);
+ if (IS_ERR_OR_NULL(obj))
+ return -1;
+
+ set_max_rlimit();
+
+ if (bpf_object__load(obj)) {
+ bpf_object__close(obj);
+ return -1;
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ def = bpf_map__def(map);
+ if (def->type != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+
+ link = bpf_map__attach_struct_ops(map);
+ if (IS_ERR(link)) {
+ p_err("can't register struct_ops %s: %s",
+ bpf_map__name(map),
+ strerror(-PTR_ERR(link)));
+ nr_errs++;
+ continue;
+ }
+ nr_maps++;
+
+ bpf_link__disconnect(link);
+ bpf_link__destroy(link);
+
+ if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info,
+ &info_len))
+ p_info("Registered %s %s id %u",
+ get_kern_struct_ops_name(&info),
+ bpf_map__name(map),
+ info.id);
+ else
+ /* Not p_err. The struct_ops was attached
+ * successfully.
+ */
+ p_info("Registered %s but can't find id: %s",
+ bpf_map__name(map), strerror(errno));
+ }
+
+ bpf_object__close(obj);
+
+ if (nr_errs)
+ return -1;
+
+ if (!nr_maps) {
+ p_err("no struct_ops found in %s", file);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s { show | list } [STRUCT_OPS_MAP]\n"
+ " %s %s dump [STRUCT_OPS_MAP]\n"
+ " %s %s register OBJ\n"
+ " %s %s unregister STRUCT_OPS_MAP\n"
+ " %s %s help\n"
+ "\n"
+ " OPTIONS := { {-j|--json} [{-p|--pretty}] }\n"
+ " STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n",
+ bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "register", do_register },
+ { "unregister", do_unregister },
+ { "dump", do_dump },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_struct_ops(int argc, char **argv)
+{
+ int err;
+
+ err = cmd_select(cmds, argc, argv, do_help);
+
+ btf__free(btf_vmlinux);
+ return err;
+}