Codebase list suricata / 45315ae
remove obsolete patches Sascha Steinbiss 4 years ago
3 changed file(s) with 0 addition(s) and 2398 deletion(s). Raw diff Collapse all Expand all
+0
-2289
debian/patches/add-missing-ebpf-programs.patch less more
0 From: Hilko Bengen <bengen@debian.org>
1 Date: Wed, 10 Jul 2019 13:34:31 +0200
2 Subject: Add missing ebpf/*.[ch]
3
4 ---
5 ebpf/bpf_helpers.h | 262 ++++++++++
6 ebpf/bypass_filter.c | 225 +++++++++
7 ebpf/filter.c | 58 +++
8 ebpf/hash_func01.h | 55 +++
9 ebpf/include/linux/bpf.h | 1027 +++++++++++++++++++++++++++++++++++++++
10 ebpf/include/linux/bpf_common.h | 57 +++
11 ebpf/lb.c | 109 +++++
12 ebpf/vlan_filter.c | 40 ++
13 ebpf/xdp_filter.c | 377 ++++++++++++++
14 9 files changed, 2210 insertions(+)
15 create mode 100644 ebpf/bpf_helpers.h
16 create mode 100644 ebpf/bypass_filter.c
17 create mode 100644 ebpf/filter.c
18 create mode 100644 ebpf/hash_func01.h
19 create mode 100644 ebpf/include/linux/bpf.h
20 create mode 100644 ebpf/include/linux/bpf_common.h
21 create mode 100644 ebpf/lb.c
22 create mode 100644 ebpf/vlan_filter.c
23 create mode 100644 ebpf/xdp_filter.c
24
25 diff --git a/ebpf/bpf_helpers.h b/ebpf/bpf_helpers.h
26 new file mode 100644
27 index 0000000..33cb00e
28 --- /dev/null
29 +++ b/ebpf/bpf_helpers.h
30 @@ -0,0 +1,262 @@
31 +/* SPDX-License-Identifier: GPL-2.0 */
32 +#ifndef __BPF_HELPERS_H
33 +#define __BPF_HELPERS_H
34 +
35 +/* helper macro to place programs, maps, license in
36 + * different sections in elf_bpf file. Section names
37 + * are interpreted by elf_bpf loader
38 + */
39 +#define SEC(NAME) __attribute__((section(NAME), used))
40 +
41 +/* helper functions called from eBPF programs written in C */
42 +static void *(*bpf_map_lookup_elem)(void *map, void *key) =
43 + (void *) BPF_FUNC_map_lookup_elem;
44 +static int (*bpf_map_update_elem)(void *map, void *key, void *value,
45 + unsigned long long flags) =
46 + (void *) BPF_FUNC_map_update_elem;
47 +static int (*bpf_map_delete_elem)(void *map, void *key) =
48 + (void *) BPF_FUNC_map_delete_elem;
49 +static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
50 + (void *) BPF_FUNC_probe_read;
51 +static unsigned long long (*bpf_ktime_get_ns)(void) =
52 + (void *) BPF_FUNC_ktime_get_ns;
53 +static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
54 + (void *) BPF_FUNC_trace_printk;
55 +static void (*bpf_tail_call)(void *ctx, void *map, int index) =
56 + (void *) BPF_FUNC_tail_call;
57 +static unsigned long long (*bpf_get_smp_processor_id)(void) =
58 + (void *) BPF_FUNC_get_smp_processor_id;
59 +static unsigned long long (*bpf_get_current_pid_tgid)(void) =
60 + (void *) BPF_FUNC_get_current_pid_tgid;
61 +static unsigned long long (*bpf_get_current_uid_gid)(void) =
62 + (void *) BPF_FUNC_get_current_uid_gid;
63 +static int (*bpf_get_current_comm)(void *buf, int buf_size) =
64 + (void *) BPF_FUNC_get_current_comm;
65 +static unsigned long long (*bpf_perf_event_read)(void *map,
66 + unsigned long long flags) =
67 + (void *) BPF_FUNC_perf_event_read;
68 +static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
69 + (void *) BPF_FUNC_clone_redirect;
70 +static int (*bpf_redirect)(int ifindex, int flags) =
71 + (void *) BPF_FUNC_redirect;
72 +static int (*bpf_redirect_map)(void *map, int key, int flags) =
73 + (void *) BPF_FUNC_redirect_map;
74 +static int (*bpf_perf_event_output)(void *ctx, void *map,
75 + unsigned long long flags, void *data,
76 + int size) =
77 + (void *) BPF_FUNC_perf_event_output;
78 +static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
79 + (void *) BPF_FUNC_get_stackid;
80 +static int (*bpf_probe_write_user)(void *dst, void *src, int size) =
81 + (void *) BPF_FUNC_probe_write_user;
82 +static int (*bpf_current_task_under_cgroup)(void *map, int index) =
83 + (void *) BPF_FUNC_current_task_under_cgroup;
84 +static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
85 + (void *) BPF_FUNC_skb_get_tunnel_key;
86 +static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
87 + (void *) BPF_FUNC_skb_set_tunnel_key;
88 +static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
89 + (void *) BPF_FUNC_skb_get_tunnel_opt;
90 +static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
91 + (void *) BPF_FUNC_skb_set_tunnel_opt;
92 +static unsigned long long (*bpf_get_prandom_u32)(void) =
93 + (void *) BPF_FUNC_get_prandom_u32;
94 +static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
95 + (void *) BPF_FUNC_xdp_adjust_head;
96 +static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
97 + (void *) BPF_FUNC_xdp_adjust_meta;
98 +static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
99 + int optlen) =
100 + (void *) BPF_FUNC_setsockopt;
101 +static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
102 + int optlen) =
103 + (void *) BPF_FUNC_getsockopt;
104 +static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
105 + (void *) BPF_FUNC_sk_redirect_map;
106 +static int (*bpf_sock_map_update)(void *map, void *key, void *value,
107 + unsigned long long flags) =
108 + (void *) BPF_FUNC_sock_map_update;
109 +static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
110 + void *buf, unsigned int buf_size) =
111 + (void *) BPF_FUNC_perf_event_read_value;
112 +static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
113 + unsigned int buf_size) =
114 + (void *) BPF_FUNC_perf_prog_read_value;
115 +static int (*bpf_override_return)(void *ctx, unsigned long rc) =
116 + (void *) BPF_FUNC_override_return;
117 +
118 +/* llvm builtin functions that eBPF C program may use to
119 + * emit BPF_LD_ABS and BPF_LD_IND instructions
120 + */
121 +struct sk_buff;
122 +unsigned long long load_byte(void *skb,
123 + unsigned long long off) asm("llvm.bpf.load.byte");
124 +unsigned long long load_half(void *skb,
125 + unsigned long long off) asm("llvm.bpf.load.half");
126 +unsigned long long load_word(void *skb,
127 + unsigned long long off) asm("llvm.bpf.load.word");
128 +
129 +/* a helper structure used by eBPF C program
130 + * to describe map attributes to elf_bpf loader
131 + */
132 +struct bpf_map_def {
133 + unsigned int type;
134 + unsigned int key_size;
135 + unsigned int value_size;
136 + unsigned int max_entries;
137 + unsigned int map_flags;
138 + unsigned int inner_map_idx;
139 + unsigned int numa_node;
140 +};
141 +
142 +static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
143 + (void *) BPF_FUNC_skb_load_bytes;
144 +static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
145 + (void *) BPF_FUNC_skb_store_bytes;
146 +static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
147 + (void *) BPF_FUNC_l3_csum_replace;
148 +static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
149 + (void *) BPF_FUNC_l4_csum_replace;
150 +static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
151 + (void *) BPF_FUNC_skb_under_cgroup;
152 +static int (*bpf_skb_change_head)(void *, int len, int flags) =
153 + (void *) BPF_FUNC_skb_change_head;
154 +
155 +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
156 +#if defined(__TARGET_ARCH_x86)
157 + #define bpf_target_x86
158 + #define bpf_target_defined
159 +#elif defined(__TARGET_ARCH_s930x)
160 + #define bpf_target_s930x
161 + #define bpf_target_defined
162 +#elif defined(__TARGET_ARCH_arm64)
163 + #define bpf_target_arm64
164 + #define bpf_target_defined
165 +#elif defined(__TARGET_ARCH_mips)
166 + #define bpf_target_mips
167 + #define bpf_target_defined
168 +#elif defined(__TARGET_ARCH_powerpc)
169 + #define bpf_target_powerpc
170 + #define bpf_target_defined
171 +#elif defined(__TARGET_ARCH_sparc)
172 + #define bpf_target_sparc
173 + #define bpf_target_defined
174 +#else
175 + #undef bpf_target_defined
176 +#endif
177 +
178 +/* Fall back to what the compiler says */
179 +#ifndef bpf_target_defined
180 +#if defined(__x86_64__)
181 + #define bpf_target_x86
182 +#elif defined(__s390x__)
183 + #define bpf_target_s930x
184 +#elif defined(__aarch64__)
185 + #define bpf_target_arm64
186 +#elif defined(__mips__)
187 + #define bpf_target_mips
188 +#elif defined(__powerpc__)
189 + #define bpf_target_powerpc
190 +#elif defined(__sparc__)
191 + #define bpf_target_sparc
192 +#endif
193 +#endif
194 +
195 +#if defined(bpf_target_x86)
196 +
197 +#define PT_REGS_PARM1(x) ((x)->di)
198 +#define PT_REGS_PARM2(x) ((x)->si)
199 +#define PT_REGS_PARM3(x) ((x)->dx)
200 +#define PT_REGS_PARM4(x) ((x)->cx)
201 +#define PT_REGS_PARM5(x) ((x)->r8)
202 +#define PT_REGS_RET(x) ((x)->sp)
203 +#define PT_REGS_FP(x) ((x)->bp)
204 +#define PT_REGS_RC(x) ((x)->ax)
205 +#define PT_REGS_SP(x) ((x)->sp)
206 +#define PT_REGS_IP(x) ((x)->ip)
207 +
208 +#elif defined(bpf_target_s390x)
209 +
210 +#define PT_REGS_PARM1(x) ((x)->gprs[2])
211 +#define PT_REGS_PARM2(x) ((x)->gprs[3])
212 +#define PT_REGS_PARM3(x) ((x)->gprs[4])
213 +#define PT_REGS_PARM4(x) ((x)->gprs[5])
214 +#define PT_REGS_PARM5(x) ((x)->gprs[6])
215 +#define PT_REGS_RET(x) ((x)->gprs[14])
216 +#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
217 +#define PT_REGS_RC(x) ((x)->gprs[2])
218 +#define PT_REGS_SP(x) ((x)->gprs[15])
219 +#define PT_REGS_IP(x) ((x)->psw.addr)
220 +
221 +#elif defined(bpf_target_arm64)
222 +
223 +#define PT_REGS_PARM1(x) ((x)->regs[0])
224 +#define PT_REGS_PARM2(x) ((x)->regs[1])
225 +#define PT_REGS_PARM3(x) ((x)->regs[2])
226 +#define PT_REGS_PARM4(x) ((x)->regs[3])
227 +#define PT_REGS_PARM5(x) ((x)->regs[4])
228 +#define PT_REGS_RET(x) ((x)->regs[30])
229 +#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
230 +#define PT_REGS_RC(x) ((x)->regs[0])
231 +#define PT_REGS_SP(x) ((x)->sp)
232 +#define PT_REGS_IP(x) ((x)->pc)
233 +
234 +#elif defined(bpf_target_mips)
235 +
236 +#define PT_REGS_PARM1(x) ((x)->regs[4])
237 +#define PT_REGS_PARM2(x) ((x)->regs[5])
238 +#define PT_REGS_PARM3(x) ((x)->regs[6])
239 +#define PT_REGS_PARM4(x) ((x)->regs[7])
240 +#define PT_REGS_PARM5(x) ((x)->regs[8])
241 +#define PT_REGS_RET(x) ((x)->regs[31])
242 +#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
243 +#define PT_REGS_RC(x) ((x)->regs[1])
244 +#define PT_REGS_SP(x) ((x)->regs[29])
245 +#define PT_REGS_IP(x) ((x)->cp0_epc)
246 +
247 +#elif defined(bpf_target_powerpc)
248 +
249 +#define PT_REGS_PARM1(x) ((x)->gpr[3])
250 +#define PT_REGS_PARM2(x) ((x)->gpr[4])
251 +#define PT_REGS_PARM3(x) ((x)->gpr[5])
252 +#define PT_REGS_PARM4(x) ((x)->gpr[6])
253 +#define PT_REGS_PARM5(x) ((x)->gpr[7])
254 +#define PT_REGS_RC(x) ((x)->gpr[3])
255 +#define PT_REGS_SP(x) ((x)->sp)
256 +#define PT_REGS_IP(x) ((x)->nip)
257 +
258 +#elif defined(bpf_target_sparc)
259 +
260 +#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
261 +#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
262 +#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
263 +#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
264 +#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
265 +#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
266 +#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
267 +#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
268 +
269 +/* Should this also be a bpf_target check for the sparc case? */
270 +#if defined(__arch64__)
271 +#define PT_REGS_IP(x) ((x)->tpc)
272 +#else
273 +#define PT_REGS_IP(x) ((x)->pc)
274 +#endif
275 +
276 +#endif
277 +
278 +#ifdef bpf_target_powerpc
279 +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
280 +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
281 +#elif bpf_target_sparc
282 +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
283 +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
284 +#else
285 +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
286 + bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
287 +#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
288 + bpf_probe_read(&(ip), sizeof(ip), \
289 + (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
290 +#endif
291 +
292 +#endif
293 diff --git a/ebpf/bypass_filter.c b/ebpf/bypass_filter.c
294 new file mode 100644
295 index 0000000..be81032
296 --- /dev/null
297 +++ b/ebpf/bypass_filter.c
298 @@ -0,0 +1,225 @@
299 +/* Copyright (C) 2018 Open Information Security Foundation
300 + *
301 + * You can copy, redistribute or modify this Program under the terms of
302 + * the GNU General Public License version 2 as published by the Free
303 + * Software Foundation.
304 + *
305 + * This program is distributed in the hope that it will be useful,
306 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
307 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
308 + * GNU General Public License for more details.
309 + *
310 + * You should have received a copy of the GNU General Public License
311 + * version 2 along with this program; if not, write to the Free Software
312 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
313 + * 02110-1301, USA.
314 + */
315 +
316 +#include <stddef.h>
317 +#include <linux/bpf.h>
318 +
319 +#include <linux/if_ether.h>
320 +#include <linux/in.h>
321 +#include <linux/ip.h>
322 +#include <linux/in6.h>
323 +#include <linux/ipv6.h>
324 +#include <linux/filter.h>
325 +
326 +#include "bpf_helpers.h"
327 +
328 +#define LINUX_VERSION_CODE 263682
329 +
330 +struct flowv4_keys {
331 + __be32 src;
332 + __be32 dst;
333 + union {
334 + __be32 ports;
335 + __be16 port16[2];
336 + };
337 + __u32 ip_proto;
338 +} __attribute__((__aligned__(8)));
339 +
340 +struct flowv6_keys {
341 + __be32 src[4];
342 + __be32 dst[4];
343 + union {
344 + __be32 ports;
345 + __be16 port16[2];
346 + };
347 + __u32 ip_proto;
348 +} __attribute__((__aligned__(8)));
349 +
350 +struct pair {
351 + __u64 time;
352 + __u64 packets;
353 + __u64 bytes;
354 +} __attribute__((__aligned__(8)));
355 +
356 +struct bpf_map_def SEC("maps") flow_table_v4 = {
357 + .type = BPF_MAP_TYPE_PERCPU_HASH,
358 + .key_size = sizeof(struct flowv4_keys),
359 + .value_size = sizeof(struct pair),
360 + .max_entries = 32768,
361 +};
362 +
363 +struct bpf_map_def SEC("maps") flow_table_v6 = {
364 + .type = BPF_MAP_TYPE_PERCPU_HASH,
365 + .key_size = sizeof(struct flowv6_keys),
366 + .value_size = sizeof(struct pair),
367 + .max_entries = 32768,
368 +};
369 +
370 +/**
371 + * IPv4 filter
372 + *
373 + * \return 0 to drop packet out and -1 to accept it
374 + */
375 +static __always_inline int ipv4_filter(struct __sk_buff *skb)
376 +{
377 + __u32 nhoff, verlen;
378 + struct flowv4_keys tuple;
379 + struct pair *value;
380 + __u16 port;
381 +
382 + nhoff = skb->cb[0];
383 +
384 + tuple.ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
385 + /* only support TCP and UDP for now */
386 + switch (tuple.ip_proto) {
387 + case IPPROTO_TCP:
388 + case IPPROTO_UDP:
389 + break;
390 + default:
391 + return -1;
392 + }
393 +
394 + tuple.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
395 + tuple.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
396 +
397 + verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
398 + nhoff += (verlen & 0xF) << 2;
399 + tuple.ports = load_word(skb, nhoff);
400 + port = tuple.port16[1];
401 + tuple.port16[1] = tuple.port16[0];
402 + tuple.port16[0] = port;
403 +
404 +#if 0
405 + if ((tuple.port16[0] == 22) || (tuple.port16[1] == 22))
406 + {
407 + __u16 sp = tuple.port16[0];
408 + //__u16 dp = tuple.port16[1];
409 + char fmt[] = "Parsed SSH flow: %u %d -> %u\n";
410 + bpf_trace_printk(fmt, sizeof(fmt), tuple.src, sp, tuple.dst);
411 + }
412 +#endif
413 + /* Test if src is in hash */
414 + value = bpf_map_lookup_elem(&flow_table_v4, &tuple);
415 + if (value) {
416 +#if 0
417 + {
418 + __u16 sp = tuple.port16[0];
419 + //__u16 dp = tuple.port16[1];
420 + char bfmt[] = "Found flow: %u %d -> %u\n";
421 + bpf_trace_printk(bfmt, sizeof(bfmt), tuple.src, sp, tuple.dst);
422 + }
423 +#endif
424 + value->packets++;
425 + value->bytes += skb->len;
426 + value->time = bpf_ktime_get_ns();
427 + return 0;
428 + }
429 + return -1;
430 +}
431 +
432 +/**
433 + * IPv6 filter
434 + *
435 + * \return 0 to drop packet out and -1 to accept it
436 + */
437 +static __always_inline int ipv6_filter(struct __sk_buff *skb)
438 +{
439 + __u32 nhoff;
440 + __u8 nhdr;
441 + struct flowv6_keys tuple;
442 + struct pair *value;
443 + __u16 port;
444 +
445 + nhoff = skb->cb[0];
446 +
447 + /* get next header */
448 + nhdr = load_byte(skb, nhoff + offsetof(struct ipv6hdr, nexthdr));
449 +
450 + /* only support direct TCP and UDP for now */
451 + switch (nhdr) {
452 + case IPPROTO_TCP:
453 + case IPPROTO_UDP:
454 + break;
455 + default:
456 + return -1;
457 + }
458 +
459 + tuple.src[0] = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr));
460 + tuple.src[1] = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 4);
461 + tuple.src[2] = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 8);
462 + tuple.src[3] = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 12);
463 + tuple.dst[0] = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr));
464 + tuple.dst[1] = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 4);
465 + tuple.dst[2] = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 8);
466 + tuple.dst[3] = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 12);
467 +
468 + /* Parse TCP */
469 + tuple.ports = load_word(skb, nhoff + 40 /* IPV6_HEADER_LEN */);
470 + port = tuple.port16[1];
471 + tuple.port16[1] = tuple.port16[0];
472 + tuple.port16[0] = port;
473 + tuple.ip_proto = nhdr;
474 +
475 + //char fmt[] = "Now Got IPv6 port %u and %u\n";
476 + //bpf_trace_printk(fmt, sizeof(fmt), tuple.port16[0], tuple.port16[1]);
477 + /* Test if src is in hash */
478 + value = bpf_map_lookup_elem(&flow_table_v6, &tuple);
479 + if (value) {
480 + //char fmt[] = "Got a match IPv6: %u and %u\n";
481 + //bpf_trace_printk(fmt, sizeof(fmt), tuple.port16[0], tuple.port16[1]);
482 + value->packets++;
483 + value->bytes += skb->len;
484 + value->time = bpf_ktime_get_ns();
485 + return 0;
486 + }
487 + return -1;
488 +}
489 +
490 +/**
491 + * filter function
492 + *
493 + * It is loaded in kernel by Suricata that uses the section name specified
494 + * by the SEC call to find it in the Elf binary object and load it.
495 + *
496 + * \return 0 to drop packet out and -1 to accept it
497 + */
498 +int SEC("filter") hashfilter(struct __sk_buff *skb) {
499 + __u32 nhoff = BPF_LL_OFF + ETH_HLEN;
500 +
501 + skb->cb[0] = nhoff;
502 + switch (skb->protocol) {
503 + case __constant_htons(ETH_P_IP):
504 + return ipv4_filter(skb);
505 + case __constant_htons(ETH_P_IPV6):
506 + return ipv6_filter(skb);
507 + default:
508 +#if 0
509 + {
510 + char fmt[] = "Got proto %u\n";
511 + bpf_trace_printk(fmt, sizeof(fmt), h_proto);
512 + break;
513 + }
514 +#else
515 + break;
516 +#endif
517 + }
518 + return -1;
519 +}
520 +
521 +char __license[] SEC("license") = "GPL";
522 +
523 +__u32 __version SEC("version") = LINUX_VERSION_CODE;
524 diff --git a/ebpf/filter.c b/ebpf/filter.c
525 new file mode 100644
526 index 0000000..4fe95d4
527 --- /dev/null
528 +++ b/ebpf/filter.c
529 @@ -0,0 +1,58 @@
530 +/* Copyright (C) 2018 Open Information Security Foundation
531 + *
532 + * You can copy, redistribute or modify this Program under the terms of
533 + * the GNU General Public License version 2 as published by the Free
534 + * Software Foundation.
535 + *
536 + * This program is distributed in the hope that it will be useful,
537 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
538 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
539 + * GNU General Public License for more details.
540 + *
541 + * You should have received a copy of the GNU General Public License
542 + * version 2 along with this program; if not, write to the Free Software
543 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
544 + * 02110-1301, USA.
545 + */
546 +
547 +#include <stddef.h>
548 +#include <linux/bpf.h>
549 +
550 +#include <linux/if_ether.h>
551 +#include <linux/in.h>
552 +#include <linux/ip.h>
553 +#include <linux/in6.h>
554 +#include <linux/ipv6.h>
555 +#include <linux/filter.h>
556 +
557 +#include "bpf_helpers.h"
558 +
559 +#define LINUX_VERSION_CODE 263682
560 +
561 +int SEC("filter") hashfilter(struct __sk_buff *skb) {
562 + __u32 nhoff = BPF_LL_OFF + ETH_HLEN;
563 +
564 + skb->cb[0] = nhoff;
565 + switch (skb->protocol) {
566 + case __constant_htons(ETH_P_IP):
567 + return -1;
568 + case __constant_htons(ETH_P_IPV6):
569 + return 0;
570 + default:
571 +#if 0
572 + {
573 + char fmt[] = "Got proto %u\n";
574 + bpf_trace_printk(fmt, sizeof(fmt), h_proto);
575 + break;
576 + }
577 +#else
578 + break;
579 +#endif
580 + }
581 + return -1;
582 +}
583 +
584 +
585 +char __license[] SEC("license") = "GPL";
586 +
587 +__u32 __version SEC("version") = LINUX_VERSION_CODE;
588 diff --git a/ebpf/hash_func01.h b/ebpf/hash_func01.h
589 new file mode 100644
590 index 0000000..3825581
591 --- /dev/null
592 +++ b/ebpf/hash_func01.h
593 @@ -0,0 +1,55 @@
594 +/* SPDX-License-Identifier: LGPL-2.1
595 + *
596 + * Based on Paul Hsieh's (LGPG 2.1) hash function
597 + * From: http://www.azillionmonkeys.com/qed/hash.html
598 + */
599 +
600 +#define get16bits(d) (*((const __u16 *) (d)))
601 +
602 +static __always_inline
603 +__u32 SuperFastHash (const char *data, int len, __u32 initval) {
604 + __u32 hash = initval;
605 + __u32 tmp;
606 + int rem;
607 +
608 + if (len <= 0 || data == NULL) return 0;
609 +
610 + rem = len & 3;
611 + len >>= 2;
612 +
613 + /* Main loop */
614 +#pragma clang loop unroll(full)
615 + for (;len > 0; len--) {
616 + hash += get16bits (data);
617 + tmp = (get16bits (data+2) << 11) ^ hash;
618 + hash = (hash << 16) ^ tmp;
619 + data += 2*sizeof (__u16);
620 + hash += hash >> 11;
621 + }
622 +
623 + /* Handle end cases */
624 + switch (rem) {
625 + case 3: hash += get16bits (data);
626 + hash ^= hash << 16;
627 + hash ^= ((signed char)data[sizeof (__u16)]) << 18;
628 + hash += hash >> 11;
629 + break;
630 + case 2: hash += get16bits (data);
631 + hash ^= hash << 11;
632 + hash += hash >> 17;
633 + break;
634 + case 1: hash += (signed char)*data;
635 + hash ^= hash << 10;
636 + hash += hash >> 1;
637 + }
638 +
639 + /* Force "avalanching" of final 127 bits */
640 + hash ^= hash << 3;
641 + hash += hash >> 5;
642 + hash ^= hash << 4;
643 + hash += hash >> 17;
644 + hash ^= hash << 25;
645 + hash += hash >> 6;
646 +
647 + return hash;
648 +}
649 diff --git a/ebpf/include/linux/bpf.h b/ebpf/include/linux/bpf.h
650 new file mode 100644
651 index 0000000..c308487
652 --- /dev/null
653 +++ b/ebpf/include/linux/bpf.h
654 @@ -0,0 +1,1027 @@
655 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
656 +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
657 + *
658 + * This program is free software; you can redistribute it and/or
659 + * modify it under the terms of version 2 of the GNU General Public
660 + * License as published by the Free Software Foundation.
661 + */
662 +#ifndef __LINUX_BPF_H__
663 +#define __LINUX_BPF_H__
664 +
665 +#include <linux/types.h>
666 +#include <linux/bpf_common.h>
667 +
668 +/* Extended instruction set based on top of classic BPF */
669 +
670 +/* instruction classes */
671 +#define BPF_ALU64 0x07 /* alu mode in double word width */
672 +
673 +/* ld/ldx fields */
674 +#define BPF_DW 0x18 /* double word (u64) */
675 +#define BPF_XADD 0xc0 /* exclusive add */
676 +
677 +/* alu/jmp fields */
678 +#define BPF_MOV 0xb0 /* mov reg to reg */
679 +#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
680 +
681 +/* change endianness of a register */
682 +#define BPF_END 0xd0 /* flags for endianness conversion: */
683 +#define BPF_TO_LE 0x00 /* convert to little-endian */
684 +#define BPF_TO_BE 0x08 /* convert to big-endian */
685 +#define BPF_FROM_LE BPF_TO_LE
686 +#define BPF_FROM_BE BPF_TO_BE
687 +
688 +/* jmp encodings */
689 +#define BPF_JNE 0x50 /* jump != */
690 +#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
691 +#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
692 +#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
693 +#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
694 +#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
695 +#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
696 +#define BPF_CALL 0x80 /* function call */
697 +#define BPF_EXIT 0x90 /* function return */
698 +
699 +/* Register numbers */
700 +enum {
701 + BPF_REG_0 = 0,
702 + BPF_REG_1,
703 + BPF_REG_2,
704 + BPF_REG_3,
705 + BPF_REG_4,
706 + BPF_REG_5,
707 + BPF_REG_6,
708 + BPF_REG_7,
709 + BPF_REG_8,
710 + BPF_REG_9,
711 + BPF_REG_10,
712 + __MAX_BPF_REG,
713 +};
714 +
715 +/* BPF has 10 general purpose 64-bit registers and stack frame. */
716 +#define MAX_BPF_REG __MAX_BPF_REG
717 +
718 +struct bpf_insn {
719 + __u8 code; /* opcode */
720 + __u8 dst_reg:4; /* dest register */
721 + __u8 src_reg:4; /* source register */
722 + __s16 off; /* signed offset */
723 + __s32 imm; /* signed immediate constant */
724 +};
725 +
726 +/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
727 +struct bpf_lpm_trie_key {
728 + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
729 + __u8 data[0]; /* Arbitrary size */
730 +};
731 +
732 +/* BPF syscall commands, see bpf(2) man-page for details. */
733 +enum bpf_cmd {
734 + BPF_MAP_CREATE,
735 + BPF_MAP_LOOKUP_ELEM,
736 + BPF_MAP_UPDATE_ELEM,
737 + BPF_MAP_DELETE_ELEM,
738 + BPF_MAP_GET_NEXT_KEY,
739 + BPF_PROG_LOAD,
740 + BPF_OBJ_PIN,
741 + BPF_OBJ_GET,
742 + BPF_PROG_ATTACH,
743 + BPF_PROG_DETACH,
744 + BPF_PROG_TEST_RUN,
745 + BPF_PROG_GET_NEXT_ID,
746 + BPF_MAP_GET_NEXT_ID,
747 + BPF_PROG_GET_FD_BY_ID,
748 + BPF_MAP_GET_FD_BY_ID,
749 + BPF_OBJ_GET_INFO_BY_FD,
750 + BPF_PROG_QUERY,
751 +};
752 +
753 +enum bpf_map_type {
754 + BPF_MAP_TYPE_UNSPEC,
755 + BPF_MAP_TYPE_HASH,
756 + BPF_MAP_TYPE_ARRAY,
757 + BPF_MAP_TYPE_PROG_ARRAY,
758 + BPF_MAP_TYPE_PERF_EVENT_ARRAY,
759 + BPF_MAP_TYPE_PERCPU_HASH,
760 + BPF_MAP_TYPE_PERCPU_ARRAY,
761 + BPF_MAP_TYPE_STACK_TRACE,
762 + BPF_MAP_TYPE_CGROUP_ARRAY,
763 + BPF_MAP_TYPE_LRU_HASH,
764 + BPF_MAP_TYPE_LRU_PERCPU_HASH,
765 + BPF_MAP_TYPE_LPM_TRIE,
766 + BPF_MAP_TYPE_ARRAY_OF_MAPS,
767 + BPF_MAP_TYPE_HASH_OF_MAPS,
768 + BPF_MAP_TYPE_DEVMAP,
769 + BPF_MAP_TYPE_SOCKMAP,
770 + BPF_MAP_TYPE_CPUMAP,
771 +};
772 +
773 +enum bpf_prog_type {
774 + BPF_PROG_TYPE_UNSPEC,
775 + BPF_PROG_TYPE_SOCKET_FILTER,
776 + BPF_PROG_TYPE_KPROBE,
777 + BPF_PROG_TYPE_SCHED_CLS,
778 + BPF_PROG_TYPE_SCHED_ACT,
779 + BPF_PROG_TYPE_TRACEPOINT,
780 + BPF_PROG_TYPE_XDP,
781 + BPF_PROG_TYPE_PERF_EVENT,
782 + BPF_PROG_TYPE_CGROUP_SKB,
783 + BPF_PROG_TYPE_CGROUP_SOCK,
784 + BPF_PROG_TYPE_LWT_IN,
785 + BPF_PROG_TYPE_LWT_OUT,
786 + BPF_PROG_TYPE_LWT_XMIT,
787 + BPF_PROG_TYPE_SOCK_OPS,
788 + BPF_PROG_TYPE_SK_SKB,
789 + BPF_PROG_TYPE_CGROUP_DEVICE,
790 +};
791 +
792 +enum bpf_attach_type {
793 + BPF_CGROUP_INET_INGRESS,
794 + BPF_CGROUP_INET_EGRESS,
795 + BPF_CGROUP_INET_SOCK_CREATE,
796 + BPF_CGROUP_SOCK_OPS,
797 + BPF_SK_SKB_STREAM_PARSER,
798 + BPF_SK_SKB_STREAM_VERDICT,
799 + BPF_CGROUP_DEVICE,
800 + __MAX_BPF_ATTACH_TYPE
801 +};
802 +
803 +#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
804 +
805 +/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
806 + *
807 + * NONE(default): No further bpf programs allowed in the subtree.
808 + *
809 + * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
810 + * the program in this cgroup yields to sub-cgroup program.
811 + *
812 + * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
813 + * that cgroup program gets run in addition to the program in this cgroup.
814 + *
815 + * Only one program is allowed to be attached to a cgroup with
816 + * NONE or BPF_F_ALLOW_OVERRIDE flag.
817 + * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
818 + * release old program and attach the new one. Attach flags has to match.
819 + *
820 + * Multiple programs are allowed to be attached to a cgroup with
821 + * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
822 + * (those that were attached first, run first)
823 + * The programs of sub-cgroup are executed first, then programs of
824 + * this cgroup and then programs of parent cgroup.
825 + * When children program makes decision (like picking TCP CA or sock bind)
826 + * parent program has a chance to override it.
827 + *
828 + * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
829 + * A cgroup with NONE doesn't allow any programs in sub-cgroups.
830 + * Ex1:
831 + * cgrp1 (MULTI progs A, B) ->
832 + * cgrp2 (OVERRIDE prog C) ->
833 + * cgrp3 (MULTI prog D) ->
834 + * cgrp4 (OVERRIDE prog E) ->
835 + * cgrp5 (NONE prog F)
836 + * the event in cgrp5 triggers execution of F,D,A,B in that order.
837 + * if prog F is detached, the execution is E,D,A,B
838 + * if prog F and D are detached, the execution is E,A,B
839 + * if prog F, E and D are detached, the execution is C,A,B
840 + *
841 + * All eligible programs are executed regardless of return code from
842 + * earlier programs.
843 + */
844 +#define BPF_F_ALLOW_OVERRIDE (1U << 0)
845 +#define BPF_F_ALLOW_MULTI (1U << 1)
846 +
847 +/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
848 + * verifier will perform strict alignment checking as if the kernel
849 + * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
850 + * and NET_IP_ALIGN defined to 2.
851 + */
852 +#define BPF_F_STRICT_ALIGNMENT (1U << 0)
853 +
854 +/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
855 +#define BPF_PSEUDO_MAP_FD 1
856 +
857 +/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
858 + * offset to another bpf function
859 + */
860 +#define BPF_PSEUDO_CALL 1
861 +
862 +/* flags for BPF_MAP_UPDATE_ELEM command */
863 +#define BPF_ANY 0 /* create new element or update existing */
864 +#define BPF_NOEXIST 1 /* create new element if it didn't exist */
865 +#define BPF_EXIST 2 /* update existing element */
866 +
867 +/* flags for BPF_MAP_CREATE command */
868 +#define BPF_F_NO_PREALLOC (1U << 0)
869 +/* Instead of having one common LRU list in the
870 + * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
871 + * which can scale and perform better.
872 + * Note, the LRU nodes (including free nodes) cannot be moved
873 + * across different LRU lists.
874 + */
875 +#define BPF_F_NO_COMMON_LRU (1U << 1)
876 +/* Specify numa node during map creation */
877 +#define BPF_F_NUMA_NODE (1U << 2)
878 +
879 +/* flags for BPF_PROG_QUERY */
880 +#define BPF_F_QUERY_EFFECTIVE (1U << 0)
881 +
882 +#define BPF_OBJ_NAME_LEN 16U
883 +
884 +/* Flags for accessing BPF object */
885 +#define BPF_F_RDONLY (1U << 3)
886 +#define BPF_F_WRONLY (1U << 4)
887 +
888 +union bpf_attr {
889 + struct { /* anonymous struct used by BPF_MAP_CREATE command */
890 + __u32 map_type; /* one of enum bpf_map_type */
891 + __u32 key_size; /* size of key in bytes */
892 + __u32 value_size; /* size of value in bytes */
893 + __u32 max_entries; /* max number of entries in a map */
894 + __u32 map_flags; /* BPF_MAP_CREATE related
895 + * flags defined above.
896 + */
897 + __u32 inner_map_fd; /* fd pointing to the inner map */
898 + __u32 numa_node; /* numa node (effective only if
899 + * BPF_F_NUMA_NODE is set).
900 + */
901 + char map_name[BPF_OBJ_NAME_LEN];
902 + };
903 +
904 + struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
905 + __u32 map_fd;
906 + __aligned_u64 key;
907 + union {
908 + __aligned_u64 value;
909 + __aligned_u64 next_key;
910 + };
911 + __u64 flags;
912 + };
913 +
914 + struct { /* anonymous struct used by BPF_PROG_LOAD command */
915 + __u32 prog_type; /* one of enum bpf_prog_type */
916 + __u32 insn_cnt;
917 + __aligned_u64 insns;
918 + __aligned_u64 license;
919 + __u32 log_level; /* verbosity level of verifier */
920 + __u32 log_size; /* size of user buffer */
921 + __aligned_u64 log_buf; /* user supplied buffer */
922 + __u32 kern_version; /* checked when prog_type=kprobe */
923 + __u32 prog_flags;
924 + char prog_name[BPF_OBJ_NAME_LEN];
925 + __u32 prog_ifindex; /* ifindex of netdev to prep for */
926 + };
927 +
928 + struct { /* anonymous struct used by BPF_OBJ_* commands */
929 + __aligned_u64 pathname;
930 + __u32 bpf_fd;
931 + __u32 file_flags;
932 + };
933 +
934 + struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
935 + __u32 target_fd; /* container object to attach to */
936 + __u32 attach_bpf_fd; /* eBPF program to attach */
937 + __u32 attach_type;
938 + __u32 attach_flags;
939 + };
940 +
941 + struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
942 + __u32 prog_fd;
943 + __u32 retval;
944 + __u32 data_size_in;
945 + __u32 data_size_out;
946 + __aligned_u64 data_in;
947 + __aligned_u64 data_out;
948 + __u32 repeat;
949 + __u32 duration;
950 + } test;
951 +
952 + struct { /* anonymous struct used by BPF_*_GET_*_ID */
953 + union {
954 + __u32 start_id;
955 + __u32 prog_id;
956 + __u32 map_id;
957 + };
958 + __u32 next_id;
959 + __u32 open_flags;
960 + };
961 +
962 + struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
963 + __u32 bpf_fd;
964 + __u32 info_len;
965 + __aligned_u64 info;
966 + } info;
967 +
968 + struct { /* anonymous struct used by BPF_PROG_QUERY command */
969 + __u32 target_fd; /* container object to query */
970 + __u32 attach_type;
971 + __u32 query_flags;
972 + __u32 attach_flags;
973 + __aligned_u64 prog_ids;
974 + __u32 prog_cnt;
975 + } query;
976 +} __attribute__((aligned(8)));
977 +
978 +/* BPF helper function descriptions:
979 + *
980 + * void *bpf_map_lookup_elem(&map, &key)
981 + * Return: Map value or NULL
982 + *
983 + * int bpf_map_update_elem(&map, &key, &value, flags)
984 + * Return: 0 on success or negative error
985 + *
986 + * int bpf_map_delete_elem(&map, &key)
987 + * Return: 0 on success or negative error
988 + *
989 + * int bpf_probe_read(void *dst, int size, void *src)
990 + * Return: 0 on success or negative error
991 + *
992 + * u64 bpf_ktime_get_ns(void)
993 + * Return: current ktime
994 + *
995 + * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
996 + * Return: length of buffer written or negative error
997 + *
998 + * u32 bpf_prandom_u32(void)
999 + * Return: random value
1000 + *
1001 + * u32 bpf_raw_smp_processor_id(void)
1002 + * Return: SMP processor ID
1003 + *
1004 + * int bpf_skb_store_bytes(skb, offset, from, len, flags)
1005 + * store bytes into packet
1006 + * @skb: pointer to skb
1007 + * @offset: offset within packet from skb->mac_header
1008 + * @from: pointer where to copy bytes from
1009 + * @len: number of bytes to store into packet
1010 + * @flags: bit 0 - if true, recompute skb->csum
1011 + * other bits - reserved
1012 + * Return: 0 on success or negative error
1013 + *
1014 + * int bpf_l3_csum_replace(skb, offset, from, to, flags)
1015 + * recompute IP checksum
1016 + * @skb: pointer to skb
1017 + * @offset: offset within packet where IP checksum is located
1018 + * @from: old value of header field
1019 + * @to: new value of header field
1020 + * @flags: bits 0-3 - size of header field
1021 + * other bits - reserved
1022 + * Return: 0 on success or negative error
1023 + *
1024 + * int bpf_l4_csum_replace(skb, offset, from, to, flags)
1025 + * recompute TCP/UDP checksum
1026 + * @skb: pointer to skb
1027 + * @offset: offset within packet where TCP/UDP checksum is located
1028 + * @from: old value of header field
1029 + * @to: new value of header field
1030 + * @flags: bits 0-3 - size of header field
1031 + * bit 4 - is pseudo header
1032 + * other bits - reserved
1033 + * Return: 0 on success or negative error
1034 + *
1035 + * int bpf_tail_call(ctx, prog_array_map, index)
1036 + * jump into another BPF program
1037 + * @ctx: context pointer passed to next program
1038 + * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
1039 + * @index: 32-bit index inside array that selects specific program to run
1040 + * Return: 0 on success or negative error
1041 + *
1042 + * int bpf_clone_redirect(skb, ifindex, flags)
1043 + * redirect to another netdev
1044 + * @skb: pointer to skb
1045 + * @ifindex: ifindex of the net device
1046 + * @flags: bit 0 - if set, redirect to ingress instead of egress
1047 + * other bits - reserved
1048 + * Return: 0 on success or negative error
1049 + *
1050 + * u64 bpf_get_current_pid_tgid(void)
1051 + * Return: current->tgid << 32 | current->pid
1052 + *
1053 + * u64 bpf_get_current_uid_gid(void)
1054 + * Return: current_gid << 32 | current_uid
1055 + *
1056 + * int bpf_get_current_comm(char *buf, int size_of_buf)
1057 + * stores current->comm into buf
1058 + * Return: 0 on success or negative error
1059 + *
1060 + * u32 bpf_get_cgroup_classid(skb)
1061 + * retrieve a proc's classid
1062 + * @skb: pointer to skb
1063 + * Return: classid if != 0
1064 + *
1065 + * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
1066 + * Return: 0 on success or negative error
1067 + *
1068 + * int bpf_skb_vlan_pop(skb)
1069 + * Return: 0 on success or negative error
1070 + *
1071 + * int bpf_skb_get_tunnel_key(skb, key, size, flags)
1072 + * int bpf_skb_set_tunnel_key(skb, key, size, flags)
1073 + * retrieve or populate tunnel metadata
1074 + * @skb: pointer to skb
1075 + * @key: pointer to 'struct bpf_tunnel_key'
1076 + * @size: size of 'struct bpf_tunnel_key'
1077 + * @flags: room for future extensions
1078 + * Return: 0 on success or negative error
1079 + *
1080 + * u64 bpf_perf_event_read(map, flags)
1081 + * read perf event counter value
1082 + * @map: pointer to perf_event_array map
1083 + * @flags: index of event in the map or bitmask flags
1084 + * Return: value of perf event counter read or error code
1085 + *
1086 + * int bpf_redirect(ifindex, flags)
1087 + * redirect to another netdev
1088 + * @ifindex: ifindex of the net device
1089 + * @flags:
1090 + * cls_bpf:
1091 + * bit 0 - if set, redirect to ingress instead of egress
1092 + * other bits - reserved
1093 + * xdp_bpf:
1094 + * all bits - reserved
1095 + * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
1096 + * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
1097 + * int bpf_redirect_map(map, key, flags)
1098 + * redirect to endpoint in map
1099 + * @map: pointer to dev map
1100 + * @key: index in map to lookup
1101 + * @flags: --
1102 + * Return: XDP_REDIRECT on success or XDP_ABORT on error
1103 + *
1104 + * u32 bpf_get_route_realm(skb)
1105 + * retrieve a dst's tclassid
1106 + * @skb: pointer to skb
1107 + * Return: realm if != 0
1108 + *
1109 + * int bpf_perf_event_output(ctx, map, flags, data, size)
1110 + * output perf raw sample
1111 + * @ctx: struct pt_regs*
1112 + * @map: pointer to perf_event_array map
1113 + * @flags: index of event in the map or bitmask flags
1114 + * @data: data on stack to be output as raw data
1115 + * @size: size of data
1116 + * Return: 0 on success or negative error
1117 + *
1118 + * int bpf_get_stackid(ctx, map, flags)
1119 + * walk user or kernel stack and return id
1120 + * @ctx: struct pt_regs*
1121 + * @map: pointer to stack_trace map
1122 + * @flags: bits 0-7 - numer of stack frames to skip
1123 + * bit 8 - collect user stack instead of kernel
1124 + * bit 9 - compare stacks by hash only
1125 + * bit 10 - if two different stacks hash into the same stackid
1126 + * discard old
1127 + * other bits - reserved
1128 + * Return: >= 0 stackid on success or negative error
1129 + *
1130 + * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
1131 + * calculate csum diff
1132 + * @from: raw from buffer
1133 + * @from_size: length of from buffer
1134 + * @to: raw to buffer
1135 + * @to_size: length of to buffer
1136 + * @seed: optional seed
1137 + * Return: csum result or negative error code
1138 + *
1139 + * int bpf_skb_get_tunnel_opt(skb, opt, size)
1140 + * retrieve tunnel options metadata
1141 + * @skb: pointer to skb
1142 + * @opt: pointer to raw tunnel option data
1143 + * @size: size of @opt
1144 + * Return: option size
1145 + *
1146 + * int bpf_skb_set_tunnel_opt(skb, opt, size)
1147 + * populate tunnel options metadata
1148 + * @skb: pointer to skb
1149 + * @opt: pointer to raw tunnel option data
1150 + * @size: size of @opt
1151 + * Return: 0 on success or negative error
1152 + *
1153 + * int bpf_skb_change_proto(skb, proto, flags)
1154 + * Change protocol of the skb. Currently supported is v4 -> v6,
1155 + * v6 -> v4 transitions. The helper will also resize the skb. eBPF
1156 + * program is expected to fill the new headers via skb_store_bytes
1157 + * and lX_csum_replace.
1158 + * @skb: pointer to skb
1159 + * @proto: new skb->protocol type
1160 + * @flags: reserved
1161 + * Return: 0 on success or negative error
1162 + *
1163 + * int bpf_skb_change_type(skb, type)
1164 + * Change packet type of skb.
1165 + * @skb: pointer to skb
1166 + * @type: new skb->pkt_type type
1167 + * Return: 0 on success or negative error
1168 + *
1169 + * int bpf_skb_under_cgroup(skb, map, index)
1170 + * Check cgroup2 membership of skb
1171 + * @skb: pointer to skb
1172 + * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
1173 + * @index: index of the cgroup in the bpf_map
1174 + * Return:
1175 + * == 0 skb failed the cgroup2 descendant test
1176 + * == 1 skb succeeded the cgroup2 descendant test
1177 + * < 0 error
1178 + *
1179 + * u32 bpf_get_hash_recalc(skb)
1180 + * Retrieve and possibly recalculate skb->hash.
1181 + * @skb: pointer to skb
1182 + * Return: hash
1183 + *
1184 + * u64 bpf_get_current_task(void)
1185 + * Returns current task_struct
1186 + * Return: current
1187 + *
1188 + * int bpf_probe_write_user(void *dst, void *src, int len)
1189 + * safely attempt to write to a location
1190 + * @dst: destination address in userspace
1191 + * @src: source address on stack
1192 + * @len: number of bytes to copy
1193 + * Return: 0 on success or negative error
1194 + *
1195 + * int bpf_current_task_under_cgroup(map, index)
1196 + * Check cgroup2 membership of current task
1197 + * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
1198 + * @index: index of the cgroup in the bpf_map
1199 + * Return:
1200 + * == 0 current failed the cgroup2 descendant test
1201 + * == 1 current succeeded the cgroup2 descendant test
1202 + * < 0 error
1203 + *
1204 + * int bpf_skb_change_tail(skb, len, flags)
1205 + * The helper will resize the skb to the given new size, to be used f.e.
1206 + * with control messages.
1207 + * @skb: pointer to skb
1208 + * @len: new skb length
1209 + * @flags: reserved
1210 + * Return: 0 on success or negative error
1211 + *
1212 + * int bpf_skb_pull_data(skb, len)
1213 + * The helper will pull in non-linear data in case the skb is non-linear
1214 + * and not all of len are part of the linear section. Only needed for
1215 + * read/write with direct packet access.
1216 + * @skb: pointer to skb
1217 + * @len: len to make read/writeable
1218 + * Return: 0 on success or negative error
1219 + *
1220 + * s64 bpf_csum_update(skb, csum)
1221 + * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
1222 + * @skb: pointer to skb
1223 + * @csum: csum to add
1224 + * Return: csum on success or negative error
1225 + *
1226 + * void bpf_set_hash_invalid(skb)
1227 + * Invalidate current skb->hash.
1228 + * @skb: pointer to skb
1229 + *
1230 + * int bpf_get_numa_node_id()
1231 + * Return: Id of current NUMA node.
1232 + *
1233 + * int bpf_skb_change_head()
1234 + * Grows headroom of skb and adjusts MAC header offset accordingly.
1235 + * Will extends/reallocae as required automatically.
1236 + * May change skb data pointer and will thus invalidate any check
1237 + * performed for direct packet access.
1238 + * @skb: pointer to skb
1239 + * @len: length of header to be pushed in front
1240 + * @flags: Flags (unused for now)
1241 + * Return: 0 on success or negative error
1242 + *
1243 + * int bpf_xdp_adjust_head(xdp_md, delta)
1244 + * Adjust the xdp_md.data by delta
1245 + * @xdp_md: pointer to xdp_md
1246 + * @delta: An positive/negative integer to be added to xdp_md.data
1247 + * Return: 0 on success or negative on error
1248 + *
1249 + * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
1250 + * Copy a NUL terminated string from unsafe address. In case the string
1251 + * length is smaller than size, the target is not padded with further NUL
1252 + * bytes. In case the string length is larger than size, just count-1
1253 + * bytes are copied and the last byte is set to NUL.
1254 + * @dst: destination address
1255 + * @size: maximum number of bytes to copy, including the trailing NUL
1256 + * @unsafe_ptr: unsafe address
1257 + * Return:
1258 + * > 0 length of the string including the trailing NUL on success
1259 + * < 0 error
1260 + *
1261 + * u64 bpf_get_socket_cookie(skb)
1262 + * Get the cookie for the socket stored inside sk_buff.
1263 + * @skb: pointer to skb
1264 + * Return: 8 Bytes non-decreasing number on success or 0 if the socket
1265 + * field is missing inside sk_buff
1266 + *
1267 + * u32 bpf_get_socket_uid(skb)
1268 + * Get the owner uid of the socket stored inside sk_buff.
1269 + * @skb: pointer to skb
1270 + * Return: uid of the socket owner on success or overflowuid if failed.
1271 + *
1272 + * u32 bpf_set_hash(skb, hash)
1273 + * Set full skb->hash.
1274 + * @skb: pointer to skb
1275 + * @hash: hash to set
1276 + *
1277 + * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
1278 + * Calls setsockopt. Not all opts are available, only those with
1279 + * integer optvals plus TCP_CONGESTION.
1280 + * Supported levels: SOL_SOCKET and IPPROTO_TCP
1281 + * @bpf_socket: pointer to bpf_socket
1282 + * @level: SOL_SOCKET or IPPROTO_TCP
1283 + * @optname: option name
1284 + * @optval: pointer to option value
1285 + * @optlen: length of optval in bytes
1286 + * Return: 0 or negative error
1287 + *
1288 + * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
1289 + * Calls getsockopt. Not all opts are available.
1290 + * Supported levels: IPPROTO_TCP
1291 + * @bpf_socket: pointer to bpf_socket
1292 + * @level: IPPROTO_TCP
1293 + * @optname: option name
1294 + * @optval: pointer to option value
1295 + * @optlen: length of optval in bytes
1296 + * Return: 0 or negative error
1297 + *
1298 + * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
1299 + * Grow or shrink room in sk_buff.
1300 + * @skb: pointer to skb
1301 + * @len_diff: (signed) amount of room to grow/shrink
1302 + * @mode: operation mode (enum bpf_adj_room_mode)
1303 + * @flags: reserved for future use
1304 + * Return: 0 on success or negative error code
1305 + *
1306 + * int bpf_sk_redirect_map(map, key, flags)
1307 + * Redirect skb to a sock in map using key as a lookup key for the
1308 + * sock in map.
1309 + * @map: pointer to sockmap
1310 + * @key: key to lookup sock in map
1311 + * @flags: reserved for future use
1312 + * Return: SK_PASS
1313 + *
1314 + * int bpf_sock_map_update(skops, map, key, flags)
1315 + * @skops: pointer to bpf_sock_ops
1316 + * @map: pointer to sockmap to update
1317 + * @key: key to insert/update sock in map
1318 + * @flags: same flags as map update elem
1319 + *
1320 + * int bpf_xdp_adjust_meta(xdp_md, delta)
1321 + * Adjust the xdp_md.data_meta by delta
1322 + * @xdp_md: pointer to xdp_md
1323 + * @delta: An positive/negative integer to be added to xdp_md.data_meta
1324 + * Return: 0 on success or negative on error
1325 + *
1326 + * int bpf_perf_event_read_value(map, flags, buf, buf_size)
1327 + * read perf event counter value and perf event enabled/running time
1328 + * @map: pointer to perf_event_array map
1329 + * @flags: index of event in the map or bitmask flags
1330 + * @buf: buf to fill
1331 + * @buf_size: size of the buf
1332 + * Return: 0 on success or negative error code
1333 + *
1334 + * int bpf_perf_prog_read_value(ctx, buf, buf_size)
1335 + * read perf prog attached perf event counter and enabled/running time
1336 + * @ctx: pointer to ctx
1337 + * @buf: buf to fill
1338 + * @buf_size: size of the buf
1339 + * Return : 0 on success or negative error code
1340 + *
1341 + * int bpf_override_return(pt_regs, rc)
1342 + * @pt_regs: pointer to struct pt_regs
1343 + * @rc: the return value to set
1344 + */
1345 +#define __BPF_FUNC_MAPPER(FN) \
1346 + FN(unspec), \
1347 + FN(map_lookup_elem), \
1348 + FN(map_update_elem), \
1349 + FN(map_delete_elem), \
1350 + FN(probe_read), \
1351 + FN(ktime_get_ns), \
1352 + FN(trace_printk), \
1353 + FN(get_prandom_u32), \
1354 + FN(get_smp_processor_id), \
1355 + FN(skb_store_bytes), \
1356 + FN(l3_csum_replace), \
1357 + FN(l4_csum_replace), \
1358 + FN(tail_call), \
1359 + FN(clone_redirect), \
1360 + FN(get_current_pid_tgid), \
1361 + FN(get_current_uid_gid), \
1362 + FN(get_current_comm), \
1363 + FN(get_cgroup_classid), \
1364 + FN(skb_vlan_push), \
1365 + FN(skb_vlan_pop), \
1366 + FN(skb_get_tunnel_key), \
1367 + FN(skb_set_tunnel_key), \
1368 + FN(perf_event_read), \
1369 + FN(redirect), \
1370 + FN(get_route_realm), \
1371 + FN(perf_event_output), \
1372 + FN(skb_load_bytes), \
1373 + FN(get_stackid), \
1374 + FN(csum_diff), \
1375 + FN(skb_get_tunnel_opt), \
1376 + FN(skb_set_tunnel_opt), \
1377 + FN(skb_change_proto), \
1378 + FN(skb_change_type), \
1379 + FN(skb_under_cgroup), \
1380 + FN(get_hash_recalc), \
1381 + FN(get_current_task), \
1382 + FN(probe_write_user), \
1383 + FN(current_task_under_cgroup), \
1384 + FN(skb_change_tail), \
1385 + FN(skb_pull_data), \
1386 + FN(csum_update), \
1387 + FN(set_hash_invalid), \
1388 + FN(get_numa_node_id), \
1389 + FN(skb_change_head), \
1390 + FN(xdp_adjust_head), \
1391 + FN(probe_read_str), \
1392 + FN(get_socket_cookie), \
1393 + FN(get_socket_uid), \
1394 + FN(set_hash), \
1395 + FN(setsockopt), \
1396 + FN(skb_adjust_room), \
1397 + FN(redirect_map), \
1398 + FN(sk_redirect_map), \
1399 + FN(sock_map_update), \
1400 + FN(xdp_adjust_meta), \
1401 + FN(perf_event_read_value), \
1402 + FN(perf_prog_read_value), \
1403 + FN(getsockopt), \
1404 + FN(override_return),
1405 +
1406 +/* integer value in 'imm' field of BPF_CALL instruction selects which helper
1407 + * function eBPF program intends to call
1408 + */
1409 +#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
1410 +enum bpf_func_id {
1411 + __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
1412 + __BPF_FUNC_MAX_ID,
1413 +};
1414 +#undef __BPF_ENUM_FN
1415 +
1416 +/* All flags used by eBPF helper functions, placed here. */
1417 +
1418 +/* BPF_FUNC_skb_store_bytes flags. */
1419 +#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
1420 +#define BPF_F_INVALIDATE_HASH (1ULL << 1)
1421 +
1422 +/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
1423 + * First 4 bits are for passing the header field size.
1424 + */
1425 +#define BPF_F_HDR_FIELD_MASK 0xfULL
1426 +
1427 +/* BPF_FUNC_l4_csum_replace flags. */
1428 +#define BPF_F_PSEUDO_HDR (1ULL << 4)
1429 +#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
1430 +#define BPF_F_MARK_ENFORCE (1ULL << 6)
1431 +
1432 +/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
1433 +#define BPF_F_INGRESS (1ULL << 0)
1434 +
1435 +/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
1436 +#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
1437 +
1438 +/* BPF_FUNC_get_stackid flags. */
1439 +#define BPF_F_SKIP_FIELD_MASK 0xffULL
1440 +#define BPF_F_USER_STACK (1ULL << 8)
1441 +#define BPF_F_FAST_STACK_CMP (1ULL << 9)
1442 +#define BPF_F_REUSE_STACKID (1ULL << 10)
1443 +
1444 +/* BPF_FUNC_skb_set_tunnel_key flags. */
1445 +#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
1446 +#define BPF_F_DONT_FRAGMENT (1ULL << 2)
1447 +
1448 +/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
1449 + * BPF_FUNC_perf_event_read_value flags.
1450 + */
1451 +#define BPF_F_INDEX_MASK 0xffffffffULL
1452 +#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
1453 +/* BPF_FUNC_perf_event_output for sk_buff input context. */
1454 +#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
1455 +
1456 +/* Mode for BPF_FUNC_skb_adjust_room helper. */
1457 +enum bpf_adj_room_mode {
1458 + BPF_ADJ_ROOM_NET,
1459 +};
1460 +
1461 +/* user accessible mirror of in-kernel sk_buff.
1462 + * new fields can only be added to the end of this structure
1463 + */
1464 +struct __sk_buff {
1465 + __u32 len;
1466 + __u32 pkt_type;
1467 + __u32 mark;
1468 + __u32 queue_mapping;
1469 + __u32 protocol;
1470 + __u32 vlan_present;
1471 + __u32 vlan_tci;
1472 + __u32 vlan_proto;
1473 + __u32 priority;
1474 + __u32 ingress_ifindex;
1475 + __u32 ifindex;
1476 + __u32 tc_index;
1477 + __u32 cb[5];
1478 + __u32 hash;
1479 + __u32 tc_classid;
1480 + __u32 data;
1481 + __u32 data_end;
1482 + __u32 napi_id;
1483 +
1484 + /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
1485 + __u32 family;
1486 + __u32 remote_ip4; /* Stored in network byte order */
1487 + __u32 local_ip4; /* Stored in network byte order */
1488 + __u32 remote_ip6[4]; /* Stored in network byte order */
1489 + __u32 local_ip6[4]; /* Stored in network byte order */
1490 + __u32 remote_port; /* Stored in network byte order */
1491 + __u32 local_port; /* stored in host byte order */
1492 + /* ... here. */
1493 +
1494 + __u32 data_meta;
1495 +};
1496 +
1497 +struct bpf_tunnel_key {
1498 + __u32 tunnel_id;
1499 + union {
1500 + __u32 remote_ipv4;
1501 + __u32 remote_ipv6[4];
1502 + };
1503 + __u8 tunnel_tos;
1504 + __u8 tunnel_ttl;
1505 + __u16 tunnel_ext;
1506 + __u32 tunnel_label;
1507 +};
1508 +
1509 +/* Generic BPF return codes which all BPF program types may support.
1510 + * The values are binary compatible with their TC_ACT_* counter-part to
1511 + * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
1512 + * programs.
1513 + *
1514 + * XDP is handled seprately, see XDP_*.
1515 + */
1516 +enum bpf_ret_code {
1517 + BPF_OK = 0,
1518 + /* 1 reserved */
1519 + BPF_DROP = 2,
1520 + /* 3-6 reserved */
1521 + BPF_REDIRECT = 7,
1522 + /* >127 are reserved for prog type specific return codes */
1523 +};
1524 +
1525 +struct bpf_sock {
1526 + __u32 bound_dev_if;
1527 + __u32 family;
1528 + __u32 type;
1529 + __u32 protocol;
1530 + __u32 mark;
1531 + __u32 priority;
1532 +};
1533 +
1534 +#define XDP_PACKET_HEADROOM 256
1535 +
1536 +/* User return codes for XDP prog type.
1537 + * A valid XDP program must return one of these defined values. All other
1538 + * return codes are reserved for future use. Unknown return codes will
1539 + * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
1540 + */
1541 +enum xdp_action {
1542 + XDP_ABORTED = 0,
1543 + XDP_DROP,
1544 + XDP_PASS,
1545 + XDP_TX,
1546 + XDP_REDIRECT,
1547 +};
1548 +
1549 +/* user accessible metadata for XDP packet hook
1550 + * new fields must be added to the end of this structure
1551 + */
1552 +struct xdp_md {
1553 + __u32 data;
1554 + __u32 data_end;
1555 + __u32 data_meta;
1556 + /* Below access go through struct xdp_rxq_info */
1557 + __u32 ingress_ifindex; /* rxq->dev->ifindex */
1558 + __u32 rx_queue_index; /* rxq->queue_index */
1559 +};
1560 +
1561 +enum sk_action {
1562 + SK_DROP = 0,
1563 + SK_PASS,
1564 +};
1565 +
1566 +#define BPF_TAG_SIZE 8
1567 +
1568 +struct bpf_prog_info {
1569 + __u32 type;
1570 + __u32 id;
1571 + __u8 tag[BPF_TAG_SIZE];
1572 + __u32 jited_prog_len;
1573 + __u32 xlated_prog_len;
1574 + __aligned_u64 jited_prog_insns;
1575 + __aligned_u64 xlated_prog_insns;
1576 + __u64 load_time; /* ns since boottime */
1577 + __u32 created_by_uid;
1578 + __u32 nr_map_ids;
1579 + __aligned_u64 map_ids;
1580 + char name[BPF_OBJ_NAME_LEN];
1581 + __u32 ifindex;
1582 + __u64 netns_dev;
1583 + __u64 netns_ino;
1584 +} __attribute__((aligned(8)));
1585 +
1586 +struct bpf_map_info {
1587 + __u32 type;
1588 + __u32 id;
1589 + __u32 key_size;
1590 + __u32 value_size;
1591 + __u32 max_entries;
1592 + __u32 map_flags;
1593 + char name[BPF_OBJ_NAME_LEN];
1594 +} __attribute__((aligned(8)));
1595 +
1596 +/* User bpf_sock_ops struct to access socket values and specify request ops
1597 + * and their replies.
1598 + * Some of this fields are in network (bigendian) byte order and may need
1599 + * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
1600 + * New fields can only be added at the end of this structure
1601 + */
1602 +struct bpf_sock_ops {
1603 + __u32 op;
1604 + union {
1605 + __u32 reply;
1606 + __u32 replylong[4];
1607 + };
1608 + __u32 family;
1609 + __u32 remote_ip4; /* Stored in network byte order */
1610 + __u32 local_ip4; /* Stored in network byte order */
1611 + __u32 remote_ip6[4]; /* Stored in network byte order */
1612 + __u32 local_ip6[4]; /* Stored in network byte order */
1613 + __u32 remote_port; /* Stored in network byte order */
1614 + __u32 local_port; /* stored in host byte order */
1615 + __u32 is_fullsock; /* Some TCP fields are only valid if
1616 + * there is a full socket. If not, the
1617 + * fields read as zero.
1618 + */
1619 + __u32 snd_cwnd;
1620 + __u32 srtt_us; /* Averaged RTT << 3 in usecs */
1621 +};
1622 +
1623 +/* List of known BPF sock_ops operators.
1624 + * New entries can only be added at the end
1625 + */
1626 +enum {
1627 + BPF_SOCK_OPS_VOID,
1628 + BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
1629 + * -1 if default value should be used
1630 + */
1631 + BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
1632 + * window (in packets) or -1 if default
1633 + * value should be used
1634 + */
1635 + BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
1636 + * active connection is initialized
1637 + */
1638 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
1639 + * active connection is
1640 + * established
1641 + */
1642 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
1643 + * passive connection is
1644 + * established
1645 + */
1646 + BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
1647 + * needs ECN
1648 + */
1649 + BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
1650 + * based on the path and may be
1651 + * dependent on the congestion control
1652 + * algorithm. In general it indicates
1653 + * a congestion threshold. RTTs above
1654 + * this indicate congestion
1655 + */
1656 +};
1657 +
1658 +#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
1659 +#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
1660 +
1661 +struct bpf_perf_event_value {
1662 + __u64 counter;
1663 + __u64 enabled;
1664 + __u64 running;
1665 +};
1666 +
1667 +#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
1668 +#define BPF_DEVCG_ACC_READ (1ULL << 1)
1669 +#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
1670 +
1671 +#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
1672 +#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
1673 +
1674 +struct bpf_cgroup_dev_ctx {
1675 + /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1676 + __u32 access_type;
1677 + __u32 major;
1678 + __u32 minor;
1679 +};
1680 +
1681 +#endif /* __LINUX_BPF_H__ */
1682 diff --git a/ebpf/include/linux/bpf_common.h b/ebpf/include/linux/bpf_common.h
1683 new file mode 100644
1684 index 0000000..8b9c72e
1685 --- /dev/null
1686 +++ b/ebpf/include/linux/bpf_common.h
1687 @@ -0,0 +1,57 @@
1688 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1689 +#ifndef __LINUX_BPF_COMMON_H__
1690 +#define __LINUX_BPF_COMMON_H__
1691 +
1692 +/* Instruction classes */
1693 +#define BPF_CLASS(code) ((code) & 0x07)
1694 +#define BPF_LD 0x00
1695 +#define BPF_LDX 0x01
1696 +#define BPF_ST 0x02
1697 +#define BPF_STX 0x03
1698 +#define BPF_ALU 0x04
1699 +#define BPF_JMP 0x05
1700 +#define BPF_RET 0x06
1701 +#define BPF_MISC 0x07
1702 +
1703 +/* ld/ldx fields */
1704 +#define BPF_SIZE(code) ((code) & 0x18)
1705 +#define BPF_W 0x00 /* u32 */
1706 +#define BPF_H 0x08 /* u16 */
1707 +#define BPF_B 0x10 /* u8 */
1708 +/* eBPF BPF_DW 0x18 u64 */
1709 +#define BPF_MODE(code) ((code) & 0xe0)
1710 +#define BPF_IMM 0x00
1711 +#define BPF_ABS 0x20
1712 +#define BPF_IND 0x40
1713 +#define BPF_MEM 0x60
1714 +#define BPF_LEN 0x80
1715 +#define BPF_MSH 0xa0
1716 +
1717 +/* alu/jmp fields */
1718 +#define BPF_OP(code) ((code) & 0xf0)
1719 +#define BPF_ADD 0x00
1720 +#define BPF_SUB 0x10
1721 +#define BPF_MUL 0x20
1722 +#define BPF_DIV 0x30
1723 +#define BPF_OR 0x40
1724 +#define BPF_AND 0x50
1725 +#define BPF_LSH 0x60
1726 +#define BPF_RSH 0x70
1727 +#define BPF_NEG 0x80
1728 +#define BPF_MOD 0x90
1729 +#define BPF_XOR 0xa0
1730 +
1731 +#define BPF_JA 0x00
1732 +#define BPF_JEQ 0x10
1733 +#define BPF_JGT 0x20
1734 +#define BPF_JGE 0x30
1735 +#define BPF_JSET 0x40
1736 +#define BPF_SRC(code) ((code) & 0x08)
1737 +#define BPF_K 0x00
1738 +#define BPF_X 0x08
1739 +
1740 +#ifndef BPF_MAXINSNS
1741 +#define BPF_MAXINSNS 4096
1742 +#endif
1743 +
1744 +#endif /* __LINUX_BPF_COMMON_H__ */
1745 diff --git a/ebpf/lb.c b/ebpf/lb.c
1746 new file mode 100644
1747 index 0000000..7551781
1748 --- /dev/null
1749 +++ b/ebpf/lb.c
1750 @@ -0,0 +1,109 @@
1751 +/* Copyright (C) 2018 Open Information Security Foundation
1752 + *
1753 + * You can copy, redistribute or modify this Program under the terms of
1754 + * the GNU General Public License version 2 as published by the Free
1755 + * Software Foundation.
1756 + *
1757 + * This program is distributed in the hope that it will be useful,
1758 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1759 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1760 + * GNU General Public License for more details.
1761 + *
1762 + * You should have received a copy of the GNU General Public License
1763 + * version 2 along with this program; if not, write to the Free Software
1764 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
1765 + * 02110-1301, USA.
1766 + */
1767 +
1768 +#include <stddef.h>
1769 +#include <linux/bpf.h>
1770 +
1771 +#include <linux/if_ether.h>
1772 +#include <linux/in.h>
1773 +#include <linux/ip.h>
1774 +#include <linux/in6.h>
1775 +#include <linux/ipv6.h>
1776 +#include <linux/filter.h>
1777 +
1778 +#include "bpf_helpers.h"
1779 +
1780 +#define LINUX_VERSION_CODE 263682
1781 +
1782 +#ifndef __section
1783 +# define __section(x) __attribute__((section(x), used))
1784 +#endif
1785 +
1786 +static __always_inline int ipv4_hash(struct __sk_buff *skb)
1787 +{
1788 + __u32 nhoff;
1789 + __u32 src, dst;
1790 +
1791 + nhoff = skb->cb[0];
1792 + src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
1793 + dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
1794 +
1795 +#if 0
1796 + char fmt[] = "Got addr: %u -> %u\n";
1797 + bpf_trace_printk(fmt, sizeof(fmt), src, dst);
1798 + char fmt2[] = "Got hash %u\n";
1799 + bpf_trace_printk(fmt2, sizeof(fmt2), src + dst);
1800 +#endif
1801 + return src + dst;
1802 +}
1803 +
1804 +static __always_inline int ipv6_hash(struct __sk_buff *skb)
1805 +{
1806 + __u32 nhoff;
1807 + __u32 src, dst, hash;
1808 +
1809 + nhoff = skb->cb[0];
1810 + hash = 0;
1811 + src = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 4 * 0 );
1812 + dst = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 4 * 0 );
1813 + hash += src + dst;
1814 +
1815 + src = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 4 * 1 );
1816 + dst = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 4 * 1 );
1817 + hash += src + dst;
1818 +
1819 + src = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 4 * 2 );
1820 + dst = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 4 * 2 );
1821 + hash += src + dst;
1822 +
1823 + src = load_word(skb, nhoff + offsetof(struct ipv6hdr, saddr) + 4 * 3 );
1824 + dst = load_word(skb, nhoff + offsetof(struct ipv6hdr, daddr) + 4 * 3 );
1825 + hash += src + dst;
1826 +
1827 + return hash;
1828 +}
1829 +
1830 +int __section("loadbalancer") lb(struct __sk_buff *skb) {
1831 + __u32 nhoff = BPF_LL_OFF + ETH_HLEN;
1832 +
1833 + skb->cb[0] = nhoff;
1834 +
1835 + switch (skb->protocol) {
1836 + case __constant_htons(ETH_P_IP):
1837 + return ipv4_hash(skb);
1838 + case __constant_htons(ETH_P_IPV6):
1839 + return ipv6_hash(skb);
1840 + default:
1841 +#if 0
1842 + {
1843 + char fmt[] = "Got proto %u\n";
1844 + bpf_trace_printk(fmt, sizeof(fmt), h_proto);
1845 + break;
1846 + }
1847 +#else
1848 + break;
1849 +#endif
1850 + }
1851 + /* hash on proto by default */
1852 + return skb->protocol;
1853 +}
1854 +
1855 +char __license[] __section("license") = "GPL";
1856 +
1857 +/* libbpf needs version section to check sync of eBPF code and kernel
1858 + * but socket filter don't need it */
1859 +__u32 __version __section("version") = LINUX_VERSION_CODE;
1860 diff --git a/ebpf/vlan_filter.c b/ebpf/vlan_filter.c
1861 new file mode 100644
1862 index 0000000..d797b94
1863 --- /dev/null
1864 +++ b/ebpf/vlan_filter.c
1865 @@ -0,0 +1,40 @@
1866 +/* Copyright (C) 2018 Open Information Security Foundation
1867 + *
1868 + * You can copy, redistribute or modify this Program under the terms of
1869 + * the GNU General Public License version 2 as published by the Free
1870 + * Software Foundation.
1871 + *
1872 + * This program is distributed in the hope that it will be useful,
1873 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1874 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1875 + * GNU General Public License for more details.
1876 + *
1877 + * You should have received a copy of the GNU General Public License
1878 + * version 2 along with this program; if not, write to the Free Software
1879 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
1880 + * 02110-1301, USA.
1881 + */
1882 +
1883 +#include <stddef.h>
1884 +#include <linux/bpf.h>
1885 +
1886 +#include "bpf_helpers.h"
1887 +
1888 +#define LINUX_VERSION_CODE 263682
1889 +
1890 +int SEC("filter") hashfilter(struct __sk_buff *skb) {
1891 + __u16 vlan_id = skb->vlan_tci & 0x0fff;
1892 + /* accept VLAN 2 and 4 and drop the rest */
1893 + switch (vlan_id) {
1894 + case 2:
1895 + case 4:
1896 + return -1;
1897 + default:
1898 + return 0;
1899 + }
1900 + return 0;
1901 +}
1902 +
1903 +char __license[] SEC("license") = "GPL";
1904 +
1905 +__u32 __version SEC("version") = LINUX_VERSION_CODE;
1906 diff --git a/ebpf/xdp_filter.c b/ebpf/xdp_filter.c
1907 new file mode 100644
1908 index 0000000..c83a6fa
1909 --- /dev/null
1910 +++ b/ebpf/xdp_filter.c
1911 @@ -0,0 +1,377 @@
1912 +/* Copyright (C) 2018 Open Information Security Foundation
1913 + *
1914 + * You can copy, redistribute or modify this Program under the terms of
1915 + * the GNU General Public License version 2 as published by the Free
1916 + * Software Foundation.
1917 + *
1918 + * This program is distributed in the hope that it will be useful,
1919 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1920 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1921 + * GNU General Public License for more details.
1922 + *
1923 + * You should have received a copy of the GNU General Public License
1924 + * version 2 along with this program; if not, write to the Free Software
1925 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
1926 + * 02110-1301, USA.
1927 + */
1928 +
1929 +#define KBUILD_MODNAME "foo"
1930 +#include <stddef.h>
1931 +#include <linux/bpf.h>
1932 +
1933 +#include <linux/in.h>
1934 +#include <linux/if_ether.h>
1935 +#include <linux/if_packet.h>
1936 +#include <linux/if_vlan.h>
1937 +#include <linux/ip.h>
1938 +#include <linux/ipv6.h>
1939 +#include <linux/tcp.h>
1940 +#include <linux/udp.h>
1941 +#include "bpf_helpers.h"
1942 +
1943 +#include "hash_func01.h"
1944 +
1945 +#define LINUX_VERSION_CODE 263682
1946 +
1947 +/* Hashing initval */
1948 +#define INITVAL 15485863
1949 +
1950 +/* Set BUILD_CPUMAP to 0 if you want to run XDP bypass on kernel
1951 + * older than 4.15 */
1952 +#define BUILD_CPUMAP 1
1953 +/* Increase CPUMAP_MAX_CPUS if ever you have more than 64 CPUs */
1954 +#define CPUMAP_MAX_CPUS 64
1955 +
1956 +struct vlan_hdr {
1957 + __u16 h_vlan_TCI;
1958 + __u16 h_vlan_encapsulated_proto;
1959 +};
1960 +
1961 +struct flowv4_keys {
1962 + __u32 src;
1963 + __u32 dst;
1964 + union {
1965 + __u32 ports;
1966 + __u16 port16[2];
1967 + };
1968 + __u32 ip_proto;
1969 +} __attribute__((__aligned__(8)));
1970 +
1971 +struct flowv6_keys {
1972 + __u32 src[4];
1973 + __u32 dst[4];
1974 + union {
1975 + __u32 ports;
1976 + __u16 port16[2];
1977 + };
1978 + __u32 ip_proto;
1979 +} __attribute__((__aligned__(8)));
1980 +
1981 +struct pair {
1982 + __u64 time;
1983 + __u64 packets;
1984 + __u64 bytes;
1985 +} __attribute__((__aligned__(8)));
1986 +
1987 +struct bpf_map_def SEC("maps") flow_table_v4 = {
1988 + .type = BPF_MAP_TYPE_PERCPU_HASH,
1989 + .key_size = sizeof(struct flowv4_keys),
1990 + .value_size = sizeof(struct pair),
1991 + .max_entries = 32768,
1992 +};
1993 +
1994 +struct bpf_map_def SEC("maps") flow_table_v6 = {
1995 + .type = BPF_MAP_TYPE_PERCPU_HASH,
1996 + .key_size = sizeof(struct flowv6_keys),
1997 + .value_size = sizeof(struct pair),
1998 + .max_entries = 32768,
1999 +};
2000 +
2001 +#if BUILD_CPUMAP
2002 +/* Special map type that can XDP_REDIRECT frames to another CPU */
2003 +struct bpf_map_def SEC("maps") cpu_map = {
2004 + .type = BPF_MAP_TYPE_CPUMAP,
2005 + .key_size = sizeof(__u32),
2006 + .value_size = sizeof(__u32),
2007 + .max_entries = CPUMAP_MAX_CPUS,
2008 +};
2009 +
2010 +struct bpf_map_def SEC("maps") cpus_available = {
2011 + .type = BPF_MAP_TYPE_ARRAY,
2012 + .key_size = sizeof(__u32),
2013 + .value_size = sizeof(__u32),
2014 + .max_entries = CPUMAP_MAX_CPUS,
2015 +};
2016 +
2017 +struct bpf_map_def SEC("maps") cpus_count = {
2018 + .type = BPF_MAP_TYPE_ARRAY,
2019 + .key_size = sizeof(__u32),
2020 + .value_size = sizeof(__u32),
2021 + .max_entries = 1,
2022 +};
2023 +#endif
2024 +
2025 +struct bpf_map_def SEC("maps") tx_peer = {
2026 + .type = BPF_MAP_TYPE_DEVMAP,
2027 + .key_size = sizeof(int),
2028 + .value_size = sizeof(int),
2029 + .max_entries = 1,
2030 +};
2031 +
2032 +struct bpf_map_def SEC("maps") tx_peer_int = {
2033 + .type = BPF_MAP_TYPE_ARRAY,
2034 + .key_size = sizeof(int),
2035 + .value_size = sizeof(int),
2036 + .max_entries = 1,
2037 +};
2038 +
2039 +static __always_inline int get_sport(void *trans_data, void *data_end,
2040 + __u8 protocol)
2041 +{
2042 + struct tcphdr *th;
2043 + struct udphdr *uh;
2044 +
2045 + switch (protocol) {
2046 + case IPPROTO_TCP:
2047 + th = (struct tcphdr *)trans_data;
2048 + if ((void *)(th + 1) > data_end)
2049 + return -1;
2050 + return th->source;
2051 + case IPPROTO_UDP:
2052 + uh = (struct udphdr *)trans_data;
2053 + if ((void *)(uh + 1) > data_end)
2054 + return -1;
2055 + return uh->dest;
2056 + default:
2057 + return 0;
2058 + }
2059 +}
2060 +
2061 +static __always_inline int get_dport(void *trans_data, void *data_end,
2062 + __u8 protocol)
2063 +{
2064 + struct tcphdr *th;
2065 + struct udphdr *uh;
2066 +
2067 + switch (protocol) {
2068 + case IPPROTO_TCP:
2069 + th = (struct tcphdr *)trans_data;
2070 + if ((void *)(th + 1) > data_end)
2071 + return -1;
2072 + return th->dest;
2073 + case IPPROTO_UDP:
2074 + uh = (struct udphdr *)trans_data;
2075 + if ((void *)(uh + 1) > data_end)
2076 + return -1;
2077 + return uh->dest;
2078 + default:
2079 + return 0;
2080 + }
2081 +}
2082 +
2083 +static int __always_inline filter_ipv4(void *data, __u64 nh_off, void *data_end)
2084 +{
2085 + struct iphdr *iph = data + nh_off;
2086 + int dport;
2087 + int sport;
2088 + struct flowv4_keys tuple;
2089 + struct pair *value;
2090 + __u32 key0 = 0;
2091 +#if BUILD_CPUMAP
2092 + __u32 cpu_dest;
2093 + __u32 *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
2094 + __u32 *cpu_selected;
2095 + __u32 cpu_hash;
2096 +#endif
2097 + int *iface_peer;
2098 + int tx_port = 0;
2099 +
2100 + if ((void *)(iph + 1) > data_end)
2101 + return XDP_PASS;
2102 +
2103 + tuple.ip_proto = (__u32) iph->protocol;
2104 + tuple.src = iph->saddr;
2105 + tuple.dst = iph->daddr;
2106 +
2107 + dport = get_dport(iph + 1, data_end, iph->protocol);
2108 + if (dport == -1)
2109 + return XDP_PASS;
2110 +
2111 + sport = get_sport(iph + 1, data_end, iph->protocol);
2112 + if (sport == -1)
2113 + return XDP_PASS;
2114 +
2115 + tuple.port16[0] = (__u16)sport;
2116 + tuple.port16[1] = (__u16)dport;
2117 + value = bpf_map_lookup_elem(&flow_table_v4, &tuple);
2118 +#if 0
2119 + {
2120 + char fmt[] = "Current flow src: %u:%d\n";
2121 + char fmt1[] = "Current flow dst: %u:%d\n";
2122 + bpf_trace_printk(fmt, sizeof(fmt), tuple.src, tuple.port16[0]);
2123 + bpf_trace_printk(fmt1, sizeof(fmt1), tuple.dst, tuple.port16[1]);
2124 + }
2125 +#endif
2126 + if (value) {
2127 +#if 0
2128 + char fmt[] = "Found flow v4: %u %d -> %d\n";
2129 + bpf_trace_printk(fmt, sizeof(fmt), tuple.src, sport, dport);
2130 + char fmt[] = "Data: t:%lu p:%lu n:%lu\n";
2131 + bpf_trace_printk(fmt, sizeof(fmt), value->time, value->packets, value->bytes);
2132 +#endif
2133 + value->time = bpf_ktime_get_ns();
2134 + value->packets++;
2135 + value->bytes += data_end - data;
2136 +
2137 + iface_peer = bpf_map_lookup_elem(&tx_peer_int, &key0);
2138 + if (!iface_peer) {
2139 + return XDP_DROP;
2140 + } else {
2141 + return bpf_redirect_map(&tx_peer, tx_port, 0);
2142 + }
2143 + }
2144 +
2145 +#if BUILD_CPUMAP
2146 + /* IP-pairs + protocol (UDP/TCP/ICMP) hit same CPU */
2147 + cpu_hash = tuple.src + tuple.dst;
2148 + cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol);
2149 +
2150 + if (cpu_max && *cpu_max) {
2151 + cpu_dest = cpu_hash % *cpu_max;
2152 + cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
2153 + if (!cpu_selected)
2154 + return XDP_ABORTED;
2155 + cpu_dest = *cpu_selected;
2156 + return bpf_redirect_map(&cpu_map, cpu_dest, 0);
2157 + } else {
2158 + return XDP_PASS;
2159 + }
2160 +#else
2161 + return XDP_PASS;
2162 +#endif
2163 +}
2164 +
2165 +static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
2166 +{
2167 + struct ipv6hdr *ip6h = data + nh_off;
2168 + int dport;
2169 + int sport;
2170 + struct flowv6_keys tuple;
2171 + struct pair *value;
2172 + __u32 key0 = 0;
2173 +#if BUILD_CPUMAP
2174 + __u32 cpu_dest;
2175 + int *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
2176 + __u32 *cpu_selected;
2177 + __u32 cpu_hash;
2178 +#endif
2179 + int tx_port = 0;
2180 + int *iface_peer;
2181 +
2182 + if ((void *)(ip6h + 1) > data_end)
2183 + return 0;
2184 + if (!((ip6h->nexthdr == IPPROTO_UDP) || (ip6h->nexthdr == IPPROTO_TCP)))
2185 + return XDP_PASS;
2186 +
2187 + dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
2188 + if (dport == -1)
2189 + return XDP_PASS;
2190 +
2191 + sport = get_sport(ip6h + 1, data_end, ip6h->nexthdr);
2192 + if (sport == -1)
2193 + return XDP_PASS;
2194 +
2195 + tuple.ip_proto = ip6h->nexthdr;
2196 + __builtin_memcpy(tuple.src, ip6h->saddr.s6_addr32, sizeof(tuple.src));
2197 + __builtin_memcpy(tuple.dst, ip6h->daddr.s6_addr32, sizeof(tuple.dst));
2198 + tuple.port16[0] = sport;
2199 + tuple.port16[1] = dport;
2200 +
2201 + value = bpf_map_lookup_elem(&flow_table_v6, &tuple);
2202 + if (value) {
2203 +#if 0
2204 + char fmt6[] = "Found IPv6 flow: %d -> %d\n";
2205 + bpf_trace_printk(fmt6, sizeof(fmt6), sport, dport);
2206 +#endif
2207 + value->packets++;
2208 + value->bytes += data_end - data;
2209 + value->time = bpf_ktime_get_ns();
2210 +
2211 + iface_peer = bpf_map_lookup_elem(&tx_peer_int, &key0);
2212 + if (!iface_peer) {
2213 + return XDP_DROP;
2214 + } else {
2215 + return bpf_redirect_map(&tx_peer, tx_port, 0);
2216 + }
2217 + }
2218 +
2219 +#if BUILD_CPUMAP
2220 + /* IP-pairs + protocol (UDP/TCP/ICMP) hit same CPU */
2221 + cpu_hash = tuple.src[0] + tuple.dst[0];
2222 + cpu_hash += tuple.src[1] + tuple.dst[1];
2223 + cpu_hash += tuple.src[2] + tuple.dst[2];
2224 + cpu_hash += tuple.src[3] + tuple.dst[3];
2225 + cpu_hash = SuperFastHash((char *)&cpu_hash, 4, ip6h->nexthdr);
2226 +
2227 + if (cpu_max && *cpu_max) {
2228 + cpu_dest = cpu_hash % *cpu_max;
2229 + cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
2230 + if (!cpu_selected)
2231 + return XDP_ABORTED;
2232 + cpu_dest = *cpu_selected;
2233 + return bpf_redirect_map(&cpu_map, cpu_dest, 0);
2234 + } else {
2235 + return XDP_PASS;
2236 + }
2237 +#else
2238 + return XDP_PASS;
2239 +#endif
2240 +}
2241 +
2242 +int SEC("xdp") xdp_hashfilter(struct xdp_md *ctx)
2243 +{
2244 + void *data_end = (void *)(long)ctx->data_end;
2245 + void *data = (void *)(long)ctx->data;
2246 + struct ethhdr *eth = data;
2247 + int rc = XDP_PASS;
2248 + __u16 h_proto;
2249 + __u64 nh_off;
2250 +
2251 + nh_off = sizeof(*eth);
2252 + if (data + nh_off > data_end)
2253 + return rc;
2254 +
2255 + h_proto = eth->h_proto;
2256 +
2257 + if (h_proto == __constant_htons(ETH_P_8021Q) || h_proto == __constant_htons(ETH_P_8021AD)) {
2258 + struct vlan_hdr *vhdr;
2259 +
2260 + vhdr = data + nh_off;
2261 + nh_off += sizeof(struct vlan_hdr);
2262 + if (data + nh_off > data_end)
2263 + return rc;
2264 + h_proto = vhdr->h_vlan_encapsulated_proto;
2265 + }
2266 + if (h_proto == __constant_htons(ETH_P_8021Q) || h_proto == __constant_htons(ETH_P_8021AD)) {
2267 + struct vlan_hdr *vhdr;
2268 +
2269 + vhdr = data + nh_off;
2270 + nh_off += sizeof(struct vlan_hdr);
2271 + if (data + nh_off > data_end)
2272 + return rc;
2273 + h_proto = vhdr->h_vlan_encapsulated_proto;
2274 + }
2275 +
2276 + if (h_proto == __constant_htons(ETH_P_IP))
2277 + return filter_ipv4(data, nh_off, data_end);
2278 + else if (h_proto == __constant_htons(ETH_P_IPV6))
2279 + return filter_ipv6(data, nh_off, data_end);
2280 + else
2281 + rc = XDP_PASS;
2282 +
2283 + return rc;
2284 +}
2285 +
2286 +char __license[] SEC("license") = "GPL";
2287 +
2288 +__u32 __version SEC("version") = LINUX_VERSION_CODE;
+0
-93
debian/patches/clang-ebpf.patch less more
0 From: Hilko Bengen <bengen@debian.org>
1 Date: Thu, 21 Feb 2019 09:34:41 +0100
2 Subject: ebpf: Use $(CLANG) to build eBPF programs
3
4 This change makes it possible to generate the eBPF programs even if
5 Suricata itself is built a different C compiler.
6
7 Implements Feature https://redmine.openinfosecfoundation.org/issues/2789
8 ---
9 configure.ac | 60 ++++++++++++++++----------------------------------------
10 ebpf/Makefile.am | 2 --
11 2 files changed, 17 insertions(+), 45 deletions(-)
12
13 --- a/configure.ac
14 +++ b/configure.ac
15 @@ -416,49 +416,23 @@
16 AS_HELP_STRING([--enable-ebpf-build], [Enable compilation of ebpf files]),[enable_ebpf_build=$enableval],[enable_ebpf_build=no])
17 AM_CONDITIONAL([BUILD_EBPF], [test "x$enable_ebpf_build" = "xyes"])
18
19 - if test "x$enable_ebpf_build" = "xyes"; then
20 - if echo $CC | grep clang; then
21 - if test "x$CC" = "xclang"; then
22 - AC_PATH_PROG(HAVE_LLC, llc, "no")
23 - if test "$HAVE_LLC" != "no"; then
24 - LLC="llc"
25 - AC_SUBST(LLC)
26 - else
27 - llc_version_line=$($CC --version|$GREP version)
28 - llc_version=$(echo $llc_version_line| cut -d '(' -f 1 | $GREP -E -o '@<:@0-9@:>@\.@<:@0-9@:>@')
29 - AC_PATH_PROG(HAVE_LLC, "llc-$llc_version", "no")
30 - if test "$HAVE_LLC" != "no"; then
31 - LLC="llc-$llc_version"
32 - AC_SUBST(LLC)
33 - else
34 - echo "unable to find llc needed to build ebpf files"
35 - exit 1
36 - fi
37 - fi
38 - else
39 - llc_version=$(echo $CC | cut -d '-' -f 2)
40 - AC_PATH_PROG(HAVE_LLC, "llc-$llc_version", "no")
41 - if test "$HAVE_LLC" != "no"; then
42 - LLC="llc-$llc_version"
43 - AC_SUBST(LLC)
44 - else
45 - llc_version_line=$($CC --version|$GREP version)
46 - llc_version=$(echo $llc_version_line| cut -d '(' -f 1 | $GREP -E -o '@<:@0-9@:>@\.@<:@0-9@:>@')
47 - AC_PATH_PROG(HAVE_LLC, "llc-$llc_version", "no")
48 - if test "$HAVE_LLC" != "no"; then
49 - LLC="llc-$llc_version"
50 - AC_SUBST(LLC)
51 - else
52 - echo "unable to find llc needed to build ebpf files"
53 - exit 1
54 - fi
55 - fi
56 - fi
57 - else
58 - echo "clang needed to build ebpf files"
59 - exit 1
60 - fi
61 - fi
62 + AS_IF([test "x$enable_ebpf_build" = "xyes"],
63 + [
64 + AS_IF([test "$CLANG" != no],
65 + [
66 + AC_PATH_PROG(LLC, "llc", "no")
67 + AS_IF([test "$LLC" != "no"],
68 + [AC_SUBST(LLC)],
69 + [
70 + llc_version=$(CLANG --version | awk '/^clang version/ { print $3 }' | cut -d. -f1,2)
71 + AC_PATH_PROG(LLC, "llc-$llc_version", "no")
72 + AS_IF([test "$LLC" != "no"],
73 + [AC_SUBST(LLC)],
74 + [AC_MSG_ERROR([unable to find llc or llc-$llc_version needed to build ebpf files])])
75 + ])
76 + ],
77 + [AC_MSG_ERROR([clang needed to build ebpf files])])
78 + ])
79
80 # enable workaround for old barnyard2 for unified alert output
81 AC_ARG_ENABLE(old-barnyard2,
82 --- a/ebpf/Makefile.am
83 +++ b/ebpf/Makefile.am
84 @@ -3,8 +3,6 @@
85 # Maintaining a local copy of UAPI linux/bpf.h
86 BPF_CFLAGS = -Iinclude
87
88 -CLANG = ${CC}
89 -
90 BPF_TARGETS = lb.bpf
91 BPF_TARGETS += filter.bpf
92 BPF_TARGETS += bypass_filter.bpf
+0
-16
debian/patches/python3-suricatactl.patch less more
0 Description: make suricatactl Python3 compatible
1 Author: Sascha Steinbiss <satta@debian.org>
2 Bug: https://redmine.openinfosecfoundation.org/issues/3157
3 Last-Update: 2019-09-18
4 --- a/python/suricata/ctl/main.py
5 +++ b/python/suricata/ctl/main.py
6 @@ -47,4 +47,8 @@
7
8 args = parser.parse_args()
9
10 - args.func(args)
11 + try:
12 + func = args.func
13 + except AttributeError:
14 + parser.error("too few arguments")
15 + func(args)