summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/progs/core_kern.c
blob: 13499cc15c7d0c0694ff9865d4aa5a5e5a3514a9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"

#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>

#define ATTR __always_inline
#include "test_jhash.h"

struct {
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__type(key, u32);
	__type(value, u32);
	__uint(max_entries, 256);
} array1 SEC(".maps");

struct {
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__type(key, u32);
	__type(value, u32);
	__uint(max_entries, 256);
} array2 SEC(".maps");

static __noinline int randmap(int v, const struct net_device *dev)
{
	struct bpf_map *map = (struct bpf_map *)&array1;
	int key = bpf_get_prandom_u32() & 0xff;
	int *val;

	if (bpf_get_prandom_u32() & 1)
		map = (struct bpf_map *)&array2;

	val = bpf_map_lookup_elem(map, &key);
	if (val)
		*val = bpf_get_prandom_u32() + v + dev->mtu;

	return 0;
}

SEC("tp_btf/xdp_devmap_xmit")
int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device
	     *from_dev, const struct net_device *to_dev, int sent, int drops,
	     int err)
{
	return randmap(from_dev->ifindex, from_dev);
}

SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb,
	     struct net_device *dev, unsigned short protocol)
{
	return randmap(dev->ifindex + skb->len, dev);
}

SEC("fexit/eth_type_trans")
int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb,
	     struct net_device *dev, unsigned short protocol)
{
	return randmap(dev->ifindex + skb->len, dev);
}

volatile const int never;

struct __sk_bUfF /* it will not exist in vmlinux */ {
	int len;
} __attribute__((preserve_access_index));

struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ {
	size_t len;
} __attribute__((preserve_access_index));

SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
	void *data_end = (void *)(long)ctx->data_end;
	void *data = (void *)(long)ctx->data;
	void *ptr;
	int ret = 0, nh_off, i = 0;

	nh_off = 14;

	/* pragma unroll doesn't work on large loops */
#define C do { \
	ptr = data + i; \
	if (ptr + nh_off > data_end) \
		break; \
	ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
	if (never) { \
		/* below is a dead code with unresolvable CO-RE relo */ \
		i += ((struct __sk_bUfF *)ctx)->len; \
		/* this CO-RE relo may or may not resolve
		 * depending on whether bpf_testmod is loaded.
		 */ \
		i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \
	} \
	} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
	C30;C30;C30; /* 90 calls */
	return 0;
}

char LICENSE[] SEC("license") = "GPL";