summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/test_maps.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-11-26 13:08:17 -0800
committerDavid S. Miller <davem@davemloft.net>2018-11-26 13:08:17 -0800
commit4afe60a97ba6ffacc4d030b13653dc64099fea26 (patch)
tree282370f57c3681e87154ec0d503e459a5aaec093 /tools/testing/selftests/bpf/test_maps.c
parent4bffc669d6248d655aeb985a0e51bfaaf21c8b40 (diff)
parentffac28f95a98a87db0850801cd98771a08bb1dec (diff)
downloadlinux-4afe60a97ba6ffacc4d030b13653dc64099fea26.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-11-26 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Extend BTF to support function call types and improve the BPF symbol handling with this info for kallsyms and bpftool program dump to make debugging easier, from Martin and Yonghong. 2) Optimize LPM lookups by making longest_prefix_match() handle multiple bytes at a time, from Eric. 3) Adds support for loading and attaching flow dissector BPF progs from bpftool, from Stanislav. 4) Extend the sk_lookup() helper to be supported from XDP, from Nitin. 5) Enable verifier to support narrow context loads with offset > 0 to adapt to LLVM code generation (currently only offset of 0 was supported). Add test cases as well, from Andrey. 6) Simplify passing device functions for offloaded BPF progs by adding callbacks to bpf_prog_offload_ops instead of ndo_bpf. Also convert nfp and netdevsim to make use of them, from Quentin. 7) Add support for sock_ops based BPF programs to send events to the perf ring-buffer through perf_event_output helper, from Sowmini and Daniel. 8) Add read / write support for skb->tstamp from tc BPF and cg BPF programs to allow for supporting rate-limiting in EDT qdiscs like fq from BPF side, from Vlad. 9) Extend libbpf API to support map in map types and add test cases for it as well to BPF kselftests, from Nikita. 10) Account the maximum packet offset accessed by a BPF program in the verifier and use it for optimizing nfp JIT, from Jiong. 11) Fix error handling regarding kprobe_events in BPF sample loader, from Daniel T. 12) Add support for queue and stack map type in bpftool, from David. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing/selftests/bpf/test_maps.c')
-rw-r--r--tools/testing/selftests/bpf/test_maps.c154
1 files changed, 145 insertions, 9 deletions
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 4db2116e52be..9c79ee017df3 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -258,24 +258,36 @@ static void test_hashmap_percpu(int task, void *data)
close(fd);
}
-static void test_hashmap_walk(int task, void *data)
+static int helper_fill_hashmap(int max_entries)
{
- int fd, i, max_entries = 1000;
- long long key, value, next_key;
- bool next_key_valid = true;
+ int i, fd, ret;
+ long long key, value;
fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
max_entries, map_flags);
- if (fd < 0) {
- printf("Failed to create hashmap '%s'!\n", strerror(errno));
- exit(1);
- }
+ CHECK(fd < 0,
+ "failed to create hashmap",
+ "err: %s, flags: 0x%x\n", strerror(errno), map_flags);
for (i = 0; i < max_entries; i++) {
key = i; value = key;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
+ ret = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(ret != 0,
+ "can't update hashmap",
+ "err: %s\n", strerror(ret));
}
+ return fd;
+}
+
+static void test_hashmap_walk(int task, void *data)
+{
+ int fd, i, max_entries = 1000;
+ long long key, value, next_key;
+ bool next_key_valid = true;
+
+ fd = helper_fill_hashmap(max_entries);
+
for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
&next_key) == 0; i++) {
key = next_key;
@@ -306,6 +318,39 @@ static void test_hashmap_walk(int task, void *data)
close(fd);
}
+static void test_hashmap_zero_seed(void)
+{
+ int i, first, second, old_flags;
+ long long key, next_first, next_second;
+
+ old_flags = map_flags;
+ map_flags |= BPF_F_ZERO_SEED;
+
+ first = helper_fill_hashmap(3);
+ second = helper_fill_hashmap(3);
+
+ for (i = 0; ; i++) {
+ void *key_ptr = !i ? NULL : &key;
+
+ if (bpf_map_get_next_key(first, key_ptr, &next_first) != 0)
+ break;
+
+ CHECK(bpf_map_get_next_key(second, key_ptr, &next_second) != 0,
+ "next_key for second map must succeed",
+ "key_ptr: %p", key_ptr);
+ CHECK(next_first != next_second,
+ "keys must match",
+ "i: %d first: %lld second: %lld\n", i,
+ next_first, next_second);
+
+ key = next_first;
+ }
+
+ map_flags = old_flags;
+ close(first);
+ close(second);
+}
+
static void test_arraymap(int task, void *data)
{
int key, next_key, fd;
@@ -1080,6 +1125,94 @@ out_sockmap:
exit(1);
}
+#define MAPINMAP_PROG "./test_map_in_map.o"
+static void test_map_in_map(void)
+{
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int mim_fd, fd, err;
+ int pos = 0;
+
+ obj = bpf_object__open(MAPINMAP_PROG);
+
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), sizeof(int),
+ 2, 0);
+ if (fd < 0) {
+ printf("Failed to create hashmap '%s'!\n", strerror(errno));
+ exit(1);
+ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_array");
+ if (IS_ERR(map)) {
+ printf("Failed to load array of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ err = bpf_map__set_inner_map_fd(map, fd);
+ if (err) {
+ printf("Failed to set inner_map_fd for array of maps\n");
+ goto out_map_in_map;
+ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_hash");
+ if (IS_ERR(map)) {
+ printf("Failed to load hash of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ err = bpf_map__set_inner_map_fd(map, fd);
+ if (err) {
+ printf("Failed to set inner_map_fd for hash of maps\n");
+ goto out_map_in_map;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ bpf_program__set_xdp(prog);
+ }
+ bpf_object__load(obj);
+
+ map = bpf_object__find_map_by_name(obj, "mim_array");
+ if (IS_ERR(map)) {
+ printf("Failed to load array of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ mim_fd = bpf_map__fd(map);
+ if (mim_fd < 0) {
+ printf("Failed to get descriptor for array of maps\n");
+ goto out_map_in_map;
+ }
+
+ err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
+ if (err) {
+ printf("Failed to update array of maps\n");
+ goto out_map_in_map;
+ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_hash");
+ if (IS_ERR(map)) {
+ printf("Failed to load hash of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ mim_fd = bpf_map__fd(map);
+ if (mim_fd < 0) {
+ printf("Failed to get descriptor for hash of maps\n");
+ goto out_map_in_map;
+ }
+
+ err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
+ if (err) {
+ printf("Failed to update hash of maps\n");
+ goto out_map_in_map;
+ }
+
+ close(fd);
+ bpf_object__close(obj);
+ return;
+
+out_map_in_map:
+ close(fd);
+ exit(1);
+}
+
#define MAP_SIZE (32 * 1024)
static void test_map_large(void)
@@ -1534,6 +1667,7 @@ static void run_all_tests(void)
test_hashmap(0, NULL);
test_hashmap_percpu(0, NULL);
test_hashmap_walk(0, NULL);
+ test_hashmap_zero_seed();
test_arraymap(0, NULL);
test_arraymap_percpu(0, NULL);
@@ -1554,6 +1688,8 @@ static void run_all_tests(void)
test_queuemap(0, NULL);
test_stackmap(0, NULL);
+
+ test_map_in_map();
}
int main(void)