mirror of
https://github.com/safing/portmaster
synced 2025-09-01 18:19:12 +00:00
Add tcp process detection with ebpf
This commit is contained in:
parent
efe8cd2fda
commit
0caa3e792c
16 changed files with 127082 additions and 5 deletions
133
firewall/interception/ebpf/bpf_bpfeb.go
Normal file
133
firewall/interception/ebpf/bpf_bpfeb.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build arm64be || armbe || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
|
||||
// +build arm64be armbe mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
|
||||
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type bpfEvent struct {
|
||||
Saddr [4]uint32
|
||||
Daddr [4]uint32
|
||||
Sport uint16
|
||||
Dport uint16
|
||||
Pid uint32
|
||||
IpVersion uint8
|
||||
_ [3]byte
|
||||
}
|
||||
|
||||
// loadBpf returns the embedded CollectionSpec for bpf.
|
||||
func loadBpf() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_BpfBytes)
|
||||
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't load bpf: %w", err)
|
||||
}
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// loadBpfObjects loads bpf and converts it into a struct.
|
||||
//
|
||||
// The following types are suitable as obj argument:
|
||||
//
|
||||
// *bpfObjects
|
||||
// *bpfPrograms
|
||||
// *bpfMaps
|
||||
//
|
||||
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
|
||||
func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
|
||||
spec, err := loadBpf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.LoadAndAssign(obj, opts)
|
||||
}
|
||||
|
||||
// bpfSpecs contains maps and programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type bpfSpecs struct {
|
||||
bpfProgramSpecs
|
||||
bpfMapSpecs
|
||||
}
|
||||
|
||||
// bpfSpecs contains programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type bpfProgramSpecs struct {
|
||||
TcpV4Connect *ebpf.ProgramSpec `ebpf:"tcp_v4_connect"`
|
||||
TcpV6Connect *ebpf.ProgramSpec `ebpf:"tcp_v6_connect"`
|
||||
}
|
||||
|
||||
// bpfMapSpecs contains maps before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type bpfMapSpecs struct {
|
||||
Events *ebpf.MapSpec `ebpf:"events"`
|
||||
}
|
||||
|
||||
// bpfObjects contains all objects after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type bpfObjects struct {
|
||||
bpfPrograms
|
||||
bpfMaps
|
||||
}
|
||||
|
||||
func (o *bpfObjects) Close() error {
|
||||
return _BpfClose(
|
||||
&o.bpfPrograms,
|
||||
&o.bpfMaps,
|
||||
)
|
||||
}
|
||||
|
||||
// bpfMaps contains all maps after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type bpfMaps struct {
|
||||
Events *ebpf.Map `ebpf:"events"`
|
||||
}
|
||||
|
||||
func (m *bpfMaps) Close() error {
|
||||
return _BpfClose(
|
||||
m.Events,
|
||||
)
|
||||
}
|
||||
|
||||
// bpfPrograms contains all programs after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type bpfPrograms struct {
|
||||
TcpV4Connect *ebpf.Program `ebpf:"tcp_v4_connect"`
|
||||
TcpV6Connect *ebpf.Program `ebpf:"tcp_v6_connect"`
|
||||
}
|
||||
|
||||
func (p *bpfPrograms) Close() error {
|
||||
return _BpfClose(
|
||||
p.TcpV4Connect,
|
||||
p.TcpV6Connect,
|
||||
)
|
||||
}
|
||||
|
||||
func _BpfClose(closers ...io.Closer) error {
|
||||
for _, closer := range closers {
|
||||
if err := closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not access this directly.
|
||||
//
|
||||
//go:embed bpf_bpfeb.o
|
||||
var _BpfBytes []byte
|
133
firewall/interception/ebpf/bpf_bpfel.go
Normal file
133
firewall/interception/ebpf/bpf_bpfel.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build 386 || amd64 || amd64p32 || arm || arm64 || mips64le || mips64p32le || mipsle || ppc64le || riscv64
|
||||
// +build 386 amd64 amd64p32 arm arm64 mips64le mips64p32le mipsle ppc64le riscv64
|
||||
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type bpfEvent struct {
|
||||
Saddr [4]uint32
|
||||
Daddr [4]uint32
|
||||
Sport uint16
|
||||
Dport uint16
|
||||
Pid uint32
|
||||
IpVersion uint8
|
||||
_ [3]byte
|
||||
}
|
||||
|
||||
// loadBpf returns the embedded CollectionSpec for bpf.
|
||||
func loadBpf() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_BpfBytes)
|
||||
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't load bpf: %w", err)
|
||||
}
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// loadBpfObjects loads bpf and converts it into a struct.
|
||||
//
|
||||
// The following types are suitable as obj argument:
|
||||
//
|
||||
// *bpfObjects
|
||||
// *bpfPrograms
|
||||
// *bpfMaps
|
||||
//
|
||||
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
|
||||
func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
|
||||
spec, err := loadBpf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.LoadAndAssign(obj, opts)
|
||||
}
|
||||
|
||||
// bpfSpecs contains maps and programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type bpfSpecs struct {
|
||||
bpfProgramSpecs
|
||||
bpfMapSpecs
|
||||
}
|
||||
|
||||
// bpfSpecs contains programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type bpfProgramSpecs struct {
|
||||
TcpV4Connect *ebpf.ProgramSpec `ebpf:"tcp_v4_connect"`
|
||||
TcpV6Connect *ebpf.ProgramSpec `ebpf:"tcp_v6_connect"`
|
||||
}
|
||||
|
||||
// bpfMapSpecs contains maps before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type bpfMapSpecs struct {
|
||||
Events *ebpf.MapSpec `ebpf:"events"`
|
||||
}
|
||||
|
||||
// bpfObjects contains all objects after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type bpfObjects struct {
|
||||
bpfPrograms
|
||||
bpfMaps
|
||||
}
|
||||
|
||||
func (o *bpfObjects) Close() error {
|
||||
return _BpfClose(
|
||||
&o.bpfPrograms,
|
||||
&o.bpfMaps,
|
||||
)
|
||||
}
|
||||
|
||||
// bpfMaps contains all maps after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type bpfMaps struct {
|
||||
Events *ebpf.Map `ebpf:"events"`
|
||||
}
|
||||
|
||||
func (m *bpfMaps) Close() error {
|
||||
return _BpfClose(
|
||||
m.Events,
|
||||
)
|
||||
}
|
||||
|
||||
// bpfPrograms contains all programs after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type bpfPrograms struct {
|
||||
TcpV4Connect *ebpf.Program `ebpf:"tcp_v4_connect"`
|
||||
TcpV6Connect *ebpf.Program `ebpf:"tcp_v6_connect"`
|
||||
}
|
||||
|
||||
func (p *bpfPrograms) Close() error {
|
||||
return _BpfClose(
|
||||
p.TcpV4Connect,
|
||||
p.TcpV6Connect,
|
||||
)
|
||||
}
|
||||
|
||||
func _BpfClose(closers ...io.Closer) error {
|
||||
for _, closer := range closers {
|
||||
if err := closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not access this directly.
|
||||
//
|
||||
//go:embed bpf_bpfel.o
|
||||
var _BpfBytes []byte
|
49
firewall/interception/ebpf/packet.go
Normal file
49
firewall/interception/ebpf/packet.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
//go:build linux
|
||||
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
pmpacket "github.com/safing/portmaster/network/packet"
|
||||
)
|
||||
|
||||
// packet implements the packet.Packet interface.
|
||||
type infoPacket struct {
|
||||
pmpacket.Base
|
||||
}
|
||||
|
||||
// LoadPacketData does nothing on Linux, as data is always fully parsed.
|
||||
func (pkt *infoPacket) LoadPacketData() error {
|
||||
return nil // fmt.Errorf("can't load data info only packet")
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) Accept() error {
|
||||
return nil // fmt.Errorf("can't accept info only packet")
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) Block() error {
|
||||
return nil // fmt.Errorf("can't block info only packet")
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) Drop() error {
|
||||
return nil // fmt.Errorf("can't block info only packet")
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) PermanentAccept() error {
|
||||
return pkt.Accept()
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) PermanentBlock() error {
|
||||
return pkt.Block()
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) PermanentDrop() error {
|
||||
return nil // fmt.Errorf("can't drop info only packet")
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) RerouteToNameserver() error {
|
||||
return nil // fmt.Errorf("can't reroute info only packet")
|
||||
}
|
||||
|
||||
func (pkt *infoPacket) RerouteToTunnel() error {
|
||||
return nil // fmt.Errorf("can't reroute info only packet")
|
||||
}
|
484
firewall/interception/ebpf/program/bpf/bpf_core_read.h
Normal file
484
firewall/interception/ebpf/program/bpf/bpf_core_read.h
Normal file
|
@ -0,0 +1,484 @@
|
|||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
#ifndef __BPF_CORE_READ_H__
|
||||
#define __BPF_CORE_READ_H__
|
||||
|
||||
/*
|
||||
* enum bpf_field_info_kind is passed as a second argument into
|
||||
* __builtin_preserve_field_info() built-in to get a specific aspect of
|
||||
* a field, captured as a first argument. __builtin_preserve_field_info(field,
|
||||
* info_kind) returns __u32 integer and produces BTF field relocation, which
|
||||
* is understood and processed by libbpf during BPF object loading. See
|
||||
* selftests/bpf for examples.
|
||||
*/
|
||||
enum bpf_field_info_kind {
|
||||
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
|
||||
BPF_FIELD_BYTE_SIZE = 1,
|
||||
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
|
||||
BPF_FIELD_SIGNED = 3,
|
||||
BPF_FIELD_LSHIFT_U64 = 4,
|
||||
BPF_FIELD_RSHIFT_U64 = 5,
|
||||
};
|
||||
|
||||
/* second argument to __builtin_btf_type_id() built-in */
|
||||
enum bpf_type_id_kind {
|
||||
BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */
|
||||
BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */
|
||||
};
|
||||
|
||||
/* second argument to __builtin_preserve_type_info() built-in */
|
||||
enum bpf_type_info_kind {
|
||||
BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
|
||||
BPF_TYPE_SIZE = 1, /* type size in target kernel */
|
||||
BPF_TYPE_MATCHES = 2, /* type match in target kernel */
|
||||
};
|
||||
|
||||
/* second argument to __builtin_preserve_enum_value() built-in */
|
||||
enum bpf_enum_value_kind {
|
||||
BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */
|
||||
BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */
|
||||
};
|
||||
|
||||
#define __CORE_RELO(src, field, info) \
|
||||
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
|
||||
bpf_probe_read_kernel( \
|
||||
(void *)dst, \
|
||||
__CORE_RELO(src, fld, BYTE_SIZE), \
|
||||
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
|
||||
#else
|
||||
/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
|
||||
* for big-endian we need to adjust destination pointer accordingly, based on
|
||||
* field byte size
|
||||
*/
|
||||
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
|
||||
bpf_probe_read_kernel( \
|
||||
(void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
|
||||
__CORE_RELO(src, fld, BYTE_SIZE), \
|
||||
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Extract bitfield, identified by s->field, and return its value as u64.
|
||||
* All this is done in relocatable manner, so bitfield changes such as
|
||||
* signedness, bit size, offset changes, this will be handled automatically.
|
||||
* This version of macro is using bpf_probe_read_kernel() to read underlying
|
||||
* integer storage. Macro functions as an expression and its return type is
|
||||
* bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
|
||||
*/
|
||||
#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \
|
||||
unsigned long long val = 0; \
|
||||
\
|
||||
__CORE_BITFIELD_PROBE_READ(&val, s, field); \
|
||||
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
|
||||
if (__CORE_RELO(s, field, SIGNED)) \
|
||||
val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
|
||||
else \
|
||||
val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
|
||||
val; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Extract bitfield, identified by s->field, and return its value as u64.
|
||||
* This version of macro is using direct memory reads and should be used from
|
||||
* BPF program types that support such functionality (e.g., typed raw
|
||||
* tracepoints).
|
||||
*/
|
||||
#define BPF_CORE_READ_BITFIELD(s, field) ({ \
|
||||
const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
|
||||
unsigned long long val; \
|
||||
\
|
||||
/* This is a so-called barrier_var() operation that makes specified \
|
||||
* variable "a black box" for optimizing compiler. \
|
||||
* It forces compiler to perform BYTE_OFFSET relocation on p and use \
|
||||
* its calculated value in the switch below, instead of applying \
|
||||
* the same relocation 4 times for each individual memory load. \
|
||||
*/ \
|
||||
asm volatile("" : "=r"(p) : "0"(p)); \
|
||||
\
|
||||
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
|
||||
case 1: val = *(const unsigned char *)p; break; \
|
||||
case 2: val = *(const unsigned short *)p; break; \
|
||||
case 4: val = *(const unsigned int *)p; break; \
|
||||
case 8: val = *(const unsigned long long *)p; break; \
|
||||
} \
|
||||
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
|
||||
if (__CORE_RELO(s, field, SIGNED)) \
|
||||
val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
|
||||
else \
|
||||
val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
|
||||
val; \
|
||||
})
|
||||
|
||||
#define ___bpf_field_ref1(field) (field)
|
||||
#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
|
||||
#define ___bpf_field_ref(args...) \
|
||||
___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* Convenience macro to check that field actually exists in target kernel's.
|
||||
* Returns:
|
||||
* 1, if matching field is present in target kernel;
|
||||
* 0, if no matching field found.
|
||||
*
|
||||
* Supports two forms:
|
||||
* - field reference through variable access:
|
||||
* bpf_core_field_exists(p->my_field);
|
||||
* - field reference through type and field names:
|
||||
* bpf_core_field_exists(struct my_type, my_field).
|
||||
*/
|
||||
#define bpf_core_field_exists(field...) \
|
||||
__builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS)
|
||||
|
||||
/*
|
||||
* Convenience macro to get the byte size of a field. Works for integers,
|
||||
* struct/unions, pointers, arrays, and enums.
|
||||
*
|
||||
* Supports two forms:
|
||||
* - field reference through variable access:
|
||||
* bpf_core_field_size(p->my_field);
|
||||
* - field reference through type and field names:
|
||||
* bpf_core_field_size(struct my_type, my_field).
|
||||
*/
|
||||
#define bpf_core_field_size(field...) \
|
||||
__builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE)
|
||||
|
||||
/*
|
||||
* Convenience macro to get field's byte offset.
|
||||
*
|
||||
* Supports two forms:
|
||||
* - field reference through variable access:
|
||||
* bpf_core_field_offset(p->my_field);
|
||||
* - field reference through type and field names:
|
||||
* bpf_core_field_offset(struct my_type, my_field).
|
||||
*/
|
||||
#define bpf_core_field_offset(field...) \
|
||||
__builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET)
|
||||
|
||||
/*
|
||||
* Convenience macro to get BTF type ID of a specified type, using a local BTF
|
||||
* information. Return 32-bit unsigned integer with type ID from program's own
|
||||
* BTF. Always succeeds.
|
||||
*/
|
||||
#define bpf_core_type_id_local(type) \
|
||||
__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
|
||||
|
||||
/*
|
||||
* Convenience macro to get BTF type ID of a target kernel's type that matches
|
||||
* specified local type.
|
||||
* Returns:
|
||||
* - valid 32-bit unsigned type ID in kernel BTF;
|
||||
* - 0, if no matching type was found in a target kernel BTF.
|
||||
*/
|
||||
#define bpf_core_type_id_kernel(type) \
|
||||
__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
|
||||
|
||||
/*
|
||||
* Convenience macro to check that provided named type
|
||||
* (struct/union/enum/typedef) exists in a target kernel.
|
||||
* Returns:
|
||||
* 1, if such type is present in target kernel's BTF;
|
||||
* 0, if no matching type is found.
|
||||
*/
|
||||
#define bpf_core_type_exists(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
|
||||
|
||||
/*
|
||||
* Convenience macro to check that provided named type
|
||||
* (struct/union/enum/typedef) "matches" that in a target kernel.
|
||||
* Returns:
|
||||
* 1, if the type matches in the target kernel's BTF;
|
||||
* 0, if the type does not match any in the target kernel
|
||||
*/
|
||||
#define bpf_core_type_matches(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
|
||||
|
||||
/*
|
||||
* Convenience macro to get the byte size of a provided named type
|
||||
* (struct/union/enum/typedef) in a target kernel.
|
||||
* Returns:
|
||||
* >= 0 size (in bytes), if type is present in target kernel's BTF;
|
||||
* 0, if no matching type is found.
|
||||
*/
|
||||
#define bpf_core_type_size(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
|
||||
|
||||
/*
|
||||
* Convenience macro to check that provided enumerator value is defined in
|
||||
* a target kernel.
|
||||
* Returns:
|
||||
* 1, if specified enum type and its enumerator value are present in target
|
||||
* kernel's BTF;
|
||||
* 0, if no matching enum and/or enum value within that enum is found.
|
||||
*/
|
||||
#define bpf_core_enum_value_exists(enum_type, enum_value) \
|
||||
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
|
||||
|
||||
/*
|
||||
* Convenience macro to get the integer value of an enumerator value in
|
||||
* a target kernel.
|
||||
* Returns:
|
||||
* 64-bit value, if specified enum type and its enumerator value are
|
||||
* present in target kernel's BTF;
|
||||
* 0, if no matching enum and/or enum value within that enum is found.
|
||||
*/
|
||||
#define bpf_core_enum_value(enum_type, enum_value) \
|
||||
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
|
||||
|
||||
/*
|
||||
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
|
||||
* offset relocation for source address using __builtin_preserve_access_index()
|
||||
* built-in, provided by Clang.
|
||||
*
|
||||
* __builtin_preserve_access_index() takes as an argument an expression of
|
||||
* taking an address of a field within struct/union. It makes compiler emit
|
||||
* a relocation, which records BTF type ID describing root struct/union and an
|
||||
* accessor string which describes exact embedded field that was used to take
|
||||
* an address. See detailed description of this relocation format and
|
||||
* semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
|
||||
*
|
||||
* This relocation allows libbpf to adjust BPF instruction to use correct
|
||||
* actual field offset, based on target kernel BTF type that matches original
|
||||
* (local) BTF, used to record relocation.
|
||||
*/
|
||||
#define bpf_core_read(dst, sz, src) \
|
||||
bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src))
|
||||
|
||||
/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
|
||||
#define bpf_core_read_user(dst, sz, src) \
|
||||
bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src))
|
||||
/*
|
||||
* bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
|
||||
* additionally emitting BPF CO-RE field relocation for specified source
|
||||
* argument.
|
||||
*/
|
||||
#define bpf_core_read_str(dst, sz, src) \
|
||||
bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
|
||||
|
||||
/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
|
||||
#define bpf_core_read_user_str(dst, sz, src) \
|
||||
bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
|
||||
|
||||
#define ___concat(a, b) a ## b
|
||||
#define ___apply(fn, n) ___concat(fn, n)
|
||||
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
|
||||
|
||||
/*
|
||||
* return number of provided arguments; used for switch-based variadic macro
|
||||
* definitions (see ___last, ___arrow, etc below)
|
||||
*/
|
||||
#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
/*
|
||||
* return 0 if no arguments are passed, N - otherwise; used for
|
||||
* recursively-defined macros to specify termination (0) case, and generic
|
||||
* (N) case (e.g., ___read_ptrs, ___core_read)
|
||||
*/
|
||||
#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
|
||||
|
||||
#define ___last1(x) x
|
||||
#define ___last2(a, x) x
|
||||
#define ___last3(a, b, x) x
|
||||
#define ___last4(a, b, c, x) x
|
||||
#define ___last5(a, b, c, d, x) x
|
||||
#define ___last6(a, b, c, d, e, x) x
|
||||
#define ___last7(a, b, c, d, e, f, x) x
|
||||
#define ___last8(a, b, c, d, e, f, g, x) x
|
||||
#define ___last9(a, b, c, d, e, f, g, h, x) x
|
||||
#define ___last10(a, b, c, d, e, f, g, h, i, x) x
|
||||
#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
|
||||
|
||||
#define ___nolast2(a, _) a
|
||||
#define ___nolast3(a, b, _) a, b
|
||||
#define ___nolast4(a, b, c, _) a, b, c
|
||||
#define ___nolast5(a, b, c, d, _) a, b, c, d
|
||||
#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
|
||||
#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
|
||||
#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
|
||||
#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
|
||||
#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
|
||||
#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
|
||||
|
||||
#define ___arrow1(a) a
|
||||
#define ___arrow2(a, b) a->b
|
||||
#define ___arrow3(a, b, c) a->b->c
|
||||
#define ___arrow4(a, b, c, d) a->b->c->d
|
||||
#define ___arrow5(a, b, c, d, e) a->b->c->d->e
|
||||
#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
|
||||
#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
|
||||
#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
|
||||
#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
|
||||
#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
|
||||
#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
|
||||
|
||||
#define ___type(...) typeof(___arrow(__VA_ARGS__))
|
||||
|
||||
#define ___read(read_fn, dst, src_type, src, accessor) \
|
||||
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
|
||||
|
||||
/* "recursively" read a sequence of inner pointers using local __t var */
|
||||
#define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a);
|
||||
#define ___rd_last(fn, ...) \
|
||||
___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
|
||||
#define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__)
|
||||
#define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
|
||||
#define ___read_ptrs(fn, src, ...) \
|
||||
___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__)
|
||||
|
||||
#define ___core_read0(fn, fn_ptr, dst, src, a) \
|
||||
___read(fn, dst, ___type(src), src, a);
|
||||
#define ___core_readN(fn, fn_ptr, dst, src, ...) \
|
||||
___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \
|
||||
___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \
|
||||
___last(__VA_ARGS__));
|
||||
#define ___core_read(fn, fn_ptr, dst, src, a, ...) \
|
||||
___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \
|
||||
src, a, ##__VA_ARGS__)
|
||||
|
||||
/*
|
||||
* BPF_CORE_READ_INTO() is a more performance-conscious variant of
|
||||
* BPF_CORE_READ(), in which final field is read into user-provided storage.
|
||||
* See BPF_CORE_READ() below for more details on general usage.
|
||||
*/
|
||||
#define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_core_read, bpf_core_read, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/*
|
||||
* Variant of BPF_CORE_READ_INTO() for reading from user-space memory.
|
||||
*
|
||||
* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
|
||||
*/
|
||||
#define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_core_read_user, bpf_core_read_user, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/* Non-CO-RE variant of BPF_CORE_READ_INTO() */
|
||||
#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_probe_read, bpf_probe_read, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/* Non-CO-RE variant of BPF_CORE_READ_USER_INTO().
|
||||
*
|
||||
* As no CO-RE relocations are emitted, source types can be arbitrary and are
|
||||
* not restricted to kernel types only.
|
||||
*/
|
||||
#define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_probe_read_user, bpf_probe_read_user, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/*
|
||||
* BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
|
||||
* BPF_CORE_READ() for intermediate pointers, but then executes (and returns
|
||||
* corresponding error code) bpf_core_read_str() for final string read.
|
||||
*/
|
||||
#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_core_read_str, bpf_core_read, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/*
|
||||
* Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory.
|
||||
*
|
||||
* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
|
||||
*/
|
||||
#define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_core_read_user_str, bpf_core_read_user, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
|
||||
#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_probe_read_str, bpf_probe_read, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/*
|
||||
* Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO().
|
||||
*
|
||||
* As no CO-RE relocations are emitted, source types can be arbitrary and are
|
||||
* not restricted to kernel types only.
|
||||
*/
|
||||
#define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \
|
||||
___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \
|
||||
dst, (src), a, ##__VA_ARGS__) \
|
||||
})
|
||||
|
||||
/*
|
||||
* BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
|
||||
* when there are few pointer chasing steps.
|
||||
* E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
|
||||
* int x = s->a.b.c->d.e->f->g;
|
||||
* can be succinctly achieved using BPF_CORE_READ as:
|
||||
* int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
|
||||
*
|
||||
* BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
|
||||
* CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
|
||||
* equivalent to:
|
||||
* 1. const void *__t = s->a.b.c;
|
||||
* 2. __t = __t->d.e;
|
||||
* 3. __t = __t->f;
|
||||
* 4. return __t->g;
|
||||
*
|
||||
* Equivalence is logical, because there is a heavy type casting/preservation
|
||||
* involved, as well as all the reads are happening through
|
||||
* bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
|
||||
* emit CO-RE relocations.
|
||||
*
|
||||
* N.B. Only up to 9 "field accessors" are supported, which should be more
|
||||
* than enough for any practical purpose.
|
||||
*/
|
||||
#define BPF_CORE_READ(src, a, ...) ({ \
|
||||
___type((src), a, ##__VA_ARGS__) __r; \
|
||||
BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \
|
||||
__r; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Variant of BPF_CORE_READ() for reading from user-space memory.
|
||||
*
|
||||
* NOTE: all the source types involved are still *kernel types* and need to
|
||||
* exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will
|
||||
* fail. Custom user types are not relocatable with CO-RE.
|
||||
* The typical situation in which BPF_CORE_READ_USER() might be used is to
|
||||
* read kernel UAPI types from the user-space memory passed in as a syscall
|
||||
* input argument.
|
||||
*/
|
||||
#define BPF_CORE_READ_USER(src, a, ...) ({ \
|
||||
___type((src), a, ##__VA_ARGS__) __r; \
|
||||
BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \
|
||||
__r; \
|
||||
})
|
||||
|
||||
/* Non-CO-RE variant of BPF_CORE_READ() */
|
||||
#define BPF_PROBE_READ(src, a, ...) ({ \
|
||||
___type((src), a, ##__VA_ARGS__) __r; \
|
||||
BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \
|
||||
__r; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Non-CO-RE variant of BPF_CORE_READ_USER().
|
||||
*
|
||||
* As no CO-RE relocations are emitted, source types can be arbitrary and are
|
||||
* not restricted to kernel types only.
|
||||
*/
|
||||
#define BPF_PROBE_READ_USER(src, a, ...) ({ \
|
||||
___type((src), a, ##__VA_ARGS__) __r; \
|
||||
BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \
|
||||
__r; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
4718
firewall/interception/ebpf/program/bpf/bpf_helper_defs.h
Normal file
4718
firewall/interception/ebpf/program/bpf/bpf_helper_defs.h
Normal file
File diff suppressed because it is too large
Load diff
289
firewall/interception/ebpf/program/bpf/bpf_helpers.h
Normal file
289
firewall/interception/ebpf/program/bpf/bpf_helpers.h
Normal file
|
@ -0,0 +1,289 @@
|
|||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
#ifndef __BPF_HELPERS__
|
||||
#define __BPF_HELPERS__
|
||||
|
||||
/*
|
||||
* Note that bpf programs need to include either
|
||||
* vmlinux.h (auto-generated from BTF) or linux/types.h
|
||||
* in advance since bpf_helper_defs.h uses such types
|
||||
* as __u64.
|
||||
*/
|
||||
#include "bpf_helper_defs.h"
|
||||
|
||||
#define __uint(name, val) int (*name)[val]
|
||||
#define __type(name, val) typeof(val) *name
|
||||
#define __array(name, val) typeof(val) *name[]
|
||||
|
||||
/*
|
||||
* Helper macro to place programs, maps, license in
|
||||
* different sections in elf_bpf file. Section names
|
||||
* are interpreted by libbpf depending on the context (BPF programs, BPF maps,
|
||||
* extern variables, etc).
|
||||
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
|
||||
* make sure __attribute__((unused)) doesn't trigger compilation warning.
|
||||
*/
|
||||
#if __GNUC__ && !__clang__
|
||||
|
||||
/*
|
||||
* Pragma macros are broken on GCC
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
|
||||
*/
|
||||
#define SEC(name) __attribute__((section(name), used))
|
||||
|
||||
#else
|
||||
|
||||
#define SEC(name) \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
|
||||
__attribute__((section(name), used)) \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
|
||||
#endif
|
||||
|
||||
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
|
||||
#undef __always_inline
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
|
||||
#ifndef __noinline
|
||||
#define __noinline __attribute__((noinline))
|
||||
#endif
|
||||
#ifndef __weak
|
||||
#define __weak __attribute__((weak))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Use __hidden attribute to mark a non-static BPF subprogram effectively
|
||||
* static for BPF verifier's verification algorithm purposes, allowing more
|
||||
* extensive and permissive BPF verification process, taking into account
|
||||
* subprogram's caller context.
|
||||
*/
|
||||
#define __hidden __attribute__((visibility("hidden")))
|
||||
|
||||
/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
|
||||
* any system-level headers (such as stddef.h, linux/version.h, etc), and
|
||||
* commonly-used macros like NULL and KERNEL_VERSION aren't available through
|
||||
* vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
|
||||
* them on their own. So as a convenience, provide such definitions here.
|
||||
*/
|
||||
#ifndef NULL
|
||||
#define NULL ((void *)0)
|
||||
#endif
|
||||
|
||||
#ifndef KERNEL_VERSION
|
||||
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper macros to manipulate data structures
|
||||
*/
|
||||
#ifndef offsetof
|
||||
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
#ifndef container_of
|
||||
#define container_of(ptr, type, member) \
|
||||
({ \
|
||||
void *__mptr = (void *)(ptr); \
|
||||
((type *)(__mptr - offsetof(type, member))); \
|
||||
})
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compiler (optimization) barrier.
|
||||
*/
|
||||
#ifndef barrier
|
||||
#define barrier() asm volatile("" ::: "memory")
|
||||
#endif
|
||||
|
||||
/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
|
||||
* compiler believe that there is some black box modification of a given
|
||||
* variable and thus prevents compiler from making extra assumption about its
|
||||
* value and potential simplifications and optimizations on this variable.
|
||||
*
|
||||
* E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
|
||||
* a variable, making some code patterns unverifiable. Putting barrier_var()
|
||||
* in place will ensure that cast is performed before the barrier_var()
|
||||
* invocation, because compiler has to pessimistically assume that embedded
|
||||
* asm section might perform some extra operations on that variable.
|
||||
*
|
||||
* This is a variable-specific variant of more global barrier().
|
||||
*/
|
||||
#ifndef barrier_var
|
||||
#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper macro to throw a compilation error if __bpf_unreachable() gets
|
||||
* built into the resulting code. This works given BPF back end does not
|
||||
* implement __builtin_trap(). This is useful to assert that certain paths
|
||||
* of the program code are never used and hence eliminated by the compiler.
|
||||
*
|
||||
* For example, consider a switch statement that covers known cases used by
|
||||
* the program. __bpf_unreachable() can then reside in the default case. If
|
||||
* the program gets extended such that a case is not covered in the switch
|
||||
* statement, then it will throw a build error due to the default case not
|
||||
* being compiled out.
|
||||
*/
|
||||
#ifndef __bpf_unreachable
|
||||
# define __bpf_unreachable() __builtin_trap()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper function to perform a tail call with a constant/immediate map slot.
|
||||
*/
|
||||
#if __clang_major__ >= 8 && defined(__bpf__)
|
||||
static __always_inline void
|
||||
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
|
||||
{
|
||||
if (!__builtin_constant_p(slot))
|
||||
__bpf_unreachable();
|
||||
|
||||
/*
|
||||
* Provide a hard guarantee that LLVM won't optimize setting r2 (map
|
||||
* pointer) and r3 (constant map index) from _different paths_ ending
|
||||
* up at the _same_ call insn as otherwise we won't be able to use the
|
||||
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
|
||||
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
|
||||
* tracking for prog array pokes") for details on verifier tracking.
|
||||
*
|
||||
* Note on clobber list: we need to stay in-line with BPF calling
|
||||
* convention, so even if we don't end up using r0, r4, r5, we need
|
||||
* to mark them as clobber so that LLVM doesn't end up using them
|
||||
* before / after the call.
|
||||
*/
|
||||
asm volatile("r1 = %[ctx]\n\t"
|
||||
"r2 = %[map]\n\t"
|
||||
"r3 = %[slot]\n\t"
|
||||
"call 12"
|
||||
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5");
|
||||
}
|
||||
#endif
|
||||
|
||||
enum libbpf_pin_type {
|
||||
LIBBPF_PIN_NONE,
|
||||
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
|
||||
LIBBPF_PIN_BY_NAME,
|
||||
};
|
||||
|
||||
enum libbpf_tristate {
|
||||
TRI_NO = 0,
|
||||
TRI_YES = 1,
|
||||
TRI_MODULE = 2,
|
||||
};
|
||||
|
||||
#define __kconfig __attribute__((section(".kconfig")))
|
||||
#define __ksym __attribute__((section(".ksyms")))
|
||||
#define __kptr __attribute__((btf_type_tag("kptr")))
|
||||
#define __kptr_ref __attribute__((btf_type_tag("kptr_ref")))
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
#endif
|
||||
#ifndef ___bpf_apply
|
||||
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
|
||||
#endif
|
||||
#ifndef ___bpf_nth
|
||||
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
|
||||
#endif
|
||||
#ifndef ___bpf_narg
|
||||
#define ___bpf_narg(...) \
|
||||
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
#endif
|
||||
|
||||
#define ___bpf_fill0(arr, p, x) do {} while (0)
|
||||
#define ___bpf_fill1(arr, p, x) arr[p] = x
|
||||
#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
|
||||
#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
|
||||
#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
|
||||
#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
|
||||
#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
|
||||
#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
|
||||
#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
|
||||
#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
|
||||
#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
|
||||
#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
|
||||
#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
|
||||
#define ___bpf_fill(arr, args...) \
|
||||
___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
|
||||
|
||||
/*
|
||||
* BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
|
||||
* in a structure.
|
||||
*/
|
||||
#define BPF_SEQ_PRINTF(seq, fmt, args...) \
|
||||
({ \
|
||||
static const char ___fmt[] = fmt; \
|
||||
unsigned long long ___param[___bpf_narg(args)]; \
|
||||
\
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
___bpf_fill(___param, args); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
\
|
||||
bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
|
||||
___param, sizeof(___param)); \
|
||||
})
|
||||
|
||||
/*
|
||||
* BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
|
||||
* an array of u64.
|
||||
*/
|
||||
#define BPF_SNPRINTF(out, out_size, fmt, args...) \
|
||||
({ \
|
||||
static const char ___fmt[] = fmt; \
|
||||
unsigned long long ___param[___bpf_narg(args)]; \
|
||||
\
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
___bpf_fill(___param, args); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
\
|
||||
bpf_snprintf(out, out_size, ___fmt, \
|
||||
___param, sizeof(___param)); \
|
||||
})
|
||||
|
||||
#ifdef BPF_NO_GLOBAL_DATA
|
||||
#define BPF_PRINTK_FMT_MOD
|
||||
#else
|
||||
#define BPF_PRINTK_FMT_MOD static const
|
||||
#endif
|
||||
|
||||
#define __bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
/*
|
||||
* __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
|
||||
* instead of an array of u64.
|
||||
*/
|
||||
#define __bpf_vprintk(fmt, args...) \
|
||||
({ \
|
||||
static const char ___fmt[] = fmt; \
|
||||
unsigned long long ___param[___bpf_narg(args)]; \
|
||||
\
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
___bpf_fill(___param, args); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
\
|
||||
bpf_trace_vprintk(___fmt, sizeof(___fmt), \
|
||||
___param, sizeof(___param)); \
|
||||
})
|
||||
|
||||
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
|
||||
* Otherwise use __bpf_vprintk
|
||||
*/
|
||||
#define ___bpf_pick_printk(...) \
|
||||
___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
|
||||
__bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
|
||||
__bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
|
||||
__bpf_printk /*1*/, __bpf_printk /*0*/)
|
||||
|
||||
/* Helper macro to print out debug messages */
|
||||
#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
|
||||
|
||||
#endif
|
670
firewall/interception/ebpf/program/bpf/bpf_tracing.h
Normal file
670
firewall/interception/ebpf/program/bpf/bpf_tracing.h
Normal file
|
@ -0,0 +1,670 @@
|
|||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
#ifndef __BPF_TRACING_H__
|
||||
#define __BPF_TRACING_H__
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define bpf_target_x86
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_s390)
|
||||
#define bpf_target_s390
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm)
|
||||
#define bpf_target_arm
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm64)
|
||||
#define bpf_target_arm64
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_mips)
|
||||
#define bpf_target_mips
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_powerpc)
|
||||
#define bpf_target_powerpc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_sparc)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_riscv)
|
||||
#define bpf_target_riscv
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arc)
|
||||
#define bpf_target_arc
|
||||
#define bpf_target_defined
|
||||
#else
|
||||
|
||||
/* Fall back to what the compiler says */
|
||||
#if defined(__x86_64__)
|
||||
#define bpf_target_x86
|
||||
#define bpf_target_defined
|
||||
#elif defined(__s390__)
|
||||
#define bpf_target_s390
|
||||
#define bpf_target_defined
|
||||
#elif defined(__arm__)
|
||||
#define bpf_target_arm
|
||||
#define bpf_target_defined
|
||||
#elif defined(__aarch64__)
|
||||
#define bpf_target_arm64
|
||||
#define bpf_target_defined
|
||||
#elif defined(__mips__)
|
||||
#define bpf_target_mips
|
||||
#define bpf_target_defined
|
||||
#elif defined(__powerpc__)
|
||||
#define bpf_target_powerpc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__sparc__)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__riscv) && __riscv_xlen == 64
|
||||
#define bpf_target_riscv
|
||||
#define bpf_target_defined
|
||||
#elif defined(__arc__)
|
||||
#define bpf_target_arc
|
||||
#define bpf_target_defined
|
||||
#endif /* no compiler target */
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef __BPF_TARGET_MISSING
|
||||
#define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
|
||||
#endif
|
||||
|
||||
#if defined(bpf_target_x86)
|
||||
|
||||
#if defined(__KERNEL__) || defined(__VMLINUX_H__)
|
||||
|
||||
#define __PT_PARM1_REG di
|
||||
#define __PT_PARM2_REG si
|
||||
#define __PT_PARM3_REG dx
|
||||
#define __PT_PARM4_REG cx
|
||||
#define __PT_PARM5_REG r8
|
||||
#define __PT_RET_REG sp
|
||||
#define __PT_FP_REG bp
|
||||
#define __PT_RC_REG ax
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG ip
|
||||
/* syscall uses r10 for PARM4 */
|
||||
#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
|
||||
|
||||
#else
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#define __PT_PARM1_REG eax
|
||||
#define __PT_PARM2_REG edx
|
||||
#define __PT_PARM3_REG ecx
|
||||
/* i386 kernel is built with -mregparm=3 */
|
||||
#define __PT_PARM4_REG __unsupported__
|
||||
#define __PT_PARM5_REG __unsupported__
|
||||
#define __PT_RET_REG esp
|
||||
#define __PT_FP_REG ebp
|
||||
#define __PT_RC_REG eax
|
||||
#define __PT_SP_REG esp
|
||||
#define __PT_IP_REG eip
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
#define __PT_PARM1_REG rdi
|
||||
#define __PT_PARM2_REG rsi
|
||||
#define __PT_PARM3_REG rdx
|
||||
#define __PT_PARM4_REG rcx
|
||||
#define __PT_PARM5_REG r8
|
||||
#define __PT_RET_REG rsp
|
||||
#define __PT_FP_REG rbp
|
||||
#define __PT_RC_REG rax
|
||||
#define __PT_SP_REG rsp
|
||||
#define __PT_IP_REG rip
|
||||
/* syscall uses r10 for PARM4 */
|
||||
#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10)
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10)
|
||||
|
||||
#endif /* __i386__ */
|
||||
|
||||
#endif /* __KERNEL__ || __VMLINUX_H__ */
|
||||
|
||||
#elif defined(bpf_target_s390)
|
||||
|
||||
struct pt_regs___s390 {
|
||||
unsigned long orig_gpr2;
|
||||
};
|
||||
|
||||
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG gprs[2]
|
||||
#define __PT_PARM2_REG gprs[3]
|
||||
#define __PT_PARM3_REG gprs[4]
|
||||
#define __PT_PARM4_REG gprs[5]
|
||||
#define __PT_PARM5_REG gprs[6]
|
||||
#define __PT_RET_REG grps[14]
|
||||
#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG gprs[2]
|
||||
#define __PT_SP_REG gprs[15]
|
||||
#define __PT_IP_REG psw.addr
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
#define __PT_PARM1_REG uregs[0]
|
||||
#define __PT_PARM2_REG uregs[1]
|
||||
#define __PT_PARM3_REG uregs[2]
|
||||
#define __PT_PARM4_REG uregs[3]
|
||||
#define __PT_PARM5_REG uregs[4]
|
||||
#define __PT_RET_REG uregs[14]
|
||||
#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG uregs[0]
|
||||
#define __PT_SP_REG uregs[13]
|
||||
#define __PT_IP_REG uregs[12]
|
||||
|
||||
#elif defined(bpf_target_arm64)
|
||||
|
||||
struct pt_regs___arm64 {
|
||||
unsigned long orig_x0;
|
||||
};
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG regs[0]
|
||||
#define __PT_PARM2_REG regs[1]
|
||||
#define __PT_PARM3_REG regs[2]
|
||||
#define __PT_PARM4_REG regs[3]
|
||||
#define __PT_PARM5_REG regs[4]
|
||||
#define __PT_RET_REG regs[30]
|
||||
#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG regs[0]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
|
||||
|
||||
#elif defined(bpf_target_mips)
|
||||
|
||||
#define __PT_PARM1_REG regs[4]
|
||||
#define __PT_PARM2_REG regs[5]
|
||||
#define __PT_PARM3_REG regs[6]
|
||||
#define __PT_PARM4_REG regs[7]
|
||||
#define __PT_PARM5_REG regs[8]
|
||||
#define __PT_RET_REG regs[31]
|
||||
#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG regs[2]
|
||||
#define __PT_SP_REG regs[29]
|
||||
#define __PT_IP_REG cp0_epc
|
||||
|
||||
#elif defined(bpf_target_powerpc)
|
||||
|
||||
#define __PT_PARM1_REG gpr[3]
|
||||
#define __PT_PARM2_REG gpr[4]
|
||||
#define __PT_PARM3_REG gpr[5]
|
||||
#define __PT_PARM4_REG gpr[6]
|
||||
#define __PT_PARM5_REG gpr[7]
|
||||
#define __PT_RET_REG regs[31]
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG gpr[3]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG nip
|
||||
/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ctx
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
#define __PT_PARM1_REG u_regs[UREG_I0]
|
||||
#define __PT_PARM2_REG u_regs[UREG_I1]
|
||||
#define __PT_PARM3_REG u_regs[UREG_I2]
|
||||
#define __PT_PARM4_REG u_regs[UREG_I3]
|
||||
#define __PT_PARM5_REG u_regs[UREG_I4]
|
||||
#define __PT_RET_REG u_regs[UREG_I7]
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG u_regs[UREG_I0]
|
||||
#define __PT_SP_REG u_regs[UREG_FP]
|
||||
/* Should this also be a bpf_target check for the sparc case? */
|
||||
#if defined(__arch64__)
|
||||
#define __PT_IP_REG tpc
|
||||
#else
|
||||
#define __PT_IP_REG pc
|
||||
#endif
|
||||
|
||||
#elif defined(bpf_target_riscv)
|
||||
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG a0
|
||||
#define __PT_PARM2_REG a1
|
||||
#define __PT_PARM3_REG a2
|
||||
#define __PT_PARM4_REG a3
|
||||
#define __PT_PARM5_REG a4
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG s0
|
||||
#define __PT_RC_REG a0
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ctx
|
||||
|
||||
#elif defined(bpf_target_arc)
|
||||
|
||||
/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG scratch.r0
|
||||
#define __PT_PARM2_REG scratch.r1
|
||||
#define __PT_PARM3_REG scratch.r2
|
||||
#define __PT_PARM4_REG scratch.r3
|
||||
#define __PT_PARM5_REG scratch.r4
|
||||
#define __PT_RET_REG scratch.blink
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG scratch.r0
|
||||
#define __PT_SP_REG scratch.sp
|
||||
#define __PT_IP_REG scratch.ret
|
||||
/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ctx
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(bpf_target_defined)
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
/* allow some architecutres to override `struct pt_regs` */
|
||||
#ifndef __PT_REGS_CAST
|
||||
#define __PT_REGS_CAST(x) (x)
|
||||
#endif
|
||||
|
||||
#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
|
||||
#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
|
||||
#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
|
||||
#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
|
||||
#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
|
||||
#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
|
||||
#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
|
||||
#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
|
||||
#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
|
||||
#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
|
||||
|
||||
#if defined(bpf_target_powerpc)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
#else
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef PT_REGS_PARM1_SYSCALL
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x)
|
||||
#define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x)
|
||||
#ifndef PT_REGS_PARM4_SYSCALL
|
||||
#define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x)
|
||||
|
||||
#ifndef PT_REGS_PARM1_CORE_SYSCALL
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x)
|
||||
#define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x)
|
||||
#ifndef PT_REGS_PARM4_CORE_SYSCALL
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x)
|
||||
#endif
|
||||
#define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x)
|
||||
|
||||
#else /* defined(bpf_target_defined) */
|
||||
|
||||
#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#endif /* defined(bpf_target_defined) */
|
||||
|
||||
/*
|
||||
* When invoked from a syscall handler kprobe, returns a pointer to a
|
||||
* struct pt_regs containing syscall arguments and suitable for passing to
|
||||
* PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
|
||||
*/
|
||||
#ifndef PT_REGS_SYSCALL_REGS
|
||||
/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
|
||||
#endif
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
#endif
|
||||
#ifndef ___bpf_apply
|
||||
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
|
||||
#endif
|
||||
#ifndef ___bpf_nth
|
||||
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
|
||||
#endif
|
||||
#ifndef ___bpf_narg
|
||||
#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
#endif
|
||||
|
||||
#define ___bpf_ctx_cast0() ctx
|
||||
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
|
||||
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
|
||||
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
|
||||
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
|
||||
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
|
||||
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
|
||||
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
|
||||
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
|
||||
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
|
||||
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
|
||||
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
|
||||
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
|
||||
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
|
||||
* similar kinds of BPF programs, that accept input arguments as a single
|
||||
* pointer to untyped u64 array, where each u64 can actually be a typed
|
||||
* pointer or integer of different size. Instead of requring user to write
|
||||
* manual casts and work with array elements by index, BPF_PROG macro
|
||||
* allows user to declare a list of named and typed input arguments in the
|
||||
* same syntax as for normal C function. All the casting is hidden and
|
||||
* performed transparently, while user code can just assume working with
|
||||
* function arguments of specified type and name.
|
||||
*
|
||||
* Original raw context argument is preserved as well as 'ctx' argument.
|
||||
* This is useful when using BPF helpers that expect original context
|
||||
* as one of the parameters (e.g., for bpf_perf_event_output()).
|
||||
*/
|
||||
#define BPF_PROG(name, args...) \
|
||||
name(unsigned long long *ctx); \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx, ##args); \
|
||||
typeof(name(0)) name(unsigned long long *ctx) \
|
||||
{ \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
return ____##name(___bpf_ctx_cast(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx, ##args)
|
||||
|
||||
#ifndef ___bpf_nth2
|
||||
#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \
|
||||
_14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
|
||||
#endif
|
||||
#ifndef ___bpf_narg2
|
||||
#define ___bpf_narg2(...) \
|
||||
___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \
|
||||
6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
|
||||
#endif
|
||||
|
||||
#define ___bpf_treg_cnt(t) \
|
||||
__builtin_choose_expr(sizeof(t) == 1, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 2, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 4, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 8, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 16, 2, \
|
||||
(void)0)))))
|
||||
|
||||
#define ___bpf_reg_cnt0() (0)
|
||||
#define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args)
|
||||
|
||||
#define ___bpf_union_arg(t, x, n) \
|
||||
__builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \
|
||||
(void)0)))))
|
||||
|
||||
#define ___bpf_ctx_arg0(n, args...)
|
||||
#define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x))
|
||||
#define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args)
|
||||
#define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args)
|
||||
#define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args)
|
||||
#define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args)
|
||||
#define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args)
|
||||
#define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args)
|
||||
#define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args)
|
||||
#define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args)
|
||||
#define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args)
|
||||
#define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args)
|
||||
#define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args)
|
||||
#define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args)
|
||||
|
||||
#define ___bpf_ctx_decl0()
|
||||
#define ___bpf_ctx_decl1(t, x) , t x
|
||||
#define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args)
|
||||
#define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args)
|
||||
#define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args)
|
||||
#define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args)
|
||||
#define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args)
|
||||
#define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args)
|
||||
#define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args)
|
||||
#define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args)
|
||||
#define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args)
|
||||
#define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args)
|
||||
#define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args)
|
||||
#define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
|
||||
* arguments. Since each struct argument might take one or two u64 values
|
||||
* in the trampoline stack, argument type size is needed to place proper number
|
||||
* of u64 values for each argument. Therefore, BPF_PROG2 has different
|
||||
* syntax from BPF_PROG. For example, for the following BPF_PROG syntax:
|
||||
*
|
||||
* int BPF_PROG(test2, int a, int b) { ... }
|
||||
*
|
||||
* the corresponding BPF_PROG2 syntax is:
|
||||
*
|
||||
* int BPF_PROG2(test2, int, a, int, b) { ... }
|
||||
*
|
||||
* where type and the corresponding argument name are separated by comma.
|
||||
*
|
||||
* Use BPF_PROG2 macro if one of the arguments might be a struct/union larger
|
||||
* than 8 bytes:
|
||||
*
|
||||
* int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
|
||||
* int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
|
||||
* {
|
||||
* // access a, b, c, d, e, and ret directly
|
||||
* ...
|
||||
* }
|
||||
*/
|
||||
#define BPF_PROG2(name, args...) \
|
||||
name(unsigned long long *ctx); \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \
|
||||
typeof(name(0)) name(unsigned long long *ctx) \
|
||||
{ \
|
||||
return ____##name(ctx ___bpf_ctx_arg(args)); \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#define ___bpf_kprobe_args0() ctx
|
||||
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
|
||||
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
|
||||
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
|
||||
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
|
||||
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
|
||||
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
|
||||
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
|
||||
* low-level way of getting kprobe input arguments from struct pt_regs, and
|
||||
* provides a familiar typed and named function arguments syntax and
|
||||
* semantics of accessing kprobe input paremeters.
|
||||
*
|
||||
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
|
||||
* be necessary when using BPF helpers like bpf_perf_event_output().
|
||||
*/
|
||||
#define BPF_KPROBE(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
return ____##name(___bpf_kprobe_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define ___bpf_kretprobe_args0() ctx
|
||||
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
|
||||
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
|
||||
* return value (in addition to `struct pt_regs *ctx`), but no input
|
||||
* arguments, because they will be clobbered by the time probed function
|
||||
* returns.
|
||||
*/
|
||||
#define BPF_KRETPROBE(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
return ____##name(___bpf_kretprobe_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
|
||||
#define ___bpf_syscall_args0() ctx
|
||||
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
|
||||
|
||||
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
|
||||
#define ___bpf_syswrap_args0() ctx
|
||||
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
|
||||
* tracing syscall functions, like __x64_sys_close. It hides the underlying
|
||||
* platform-specific low-level way of getting syscall input arguments from
|
||||
* struct pt_regs, and provides a familiar typed and named function arguments
|
||||
* syntax and semantics of accessing syscall input parameters.
|
||||
*
|
||||
* Original struct pt_regs * context is preserved as 'ctx' argument. This might
|
||||
* be necessary when using BPF helpers like bpf_perf_event_output().
|
||||
*
|
||||
* At the moment BPF_KSYSCALL does not transparently handle all the calling
|
||||
* convention quirks for the following syscalls:
|
||||
*
|
||||
* - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
|
||||
* - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
|
||||
* CONFIG_CLONE_BACKWARDS3.
|
||||
* - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
|
||||
* - compat syscalls.
|
||||
*
|
||||
* This may or may not change in the future. User needs to take extra measures
|
||||
* to handle such quirks explicitly, if necessary.
|
||||
*
|
||||
* This macro relies on BPF CO-RE support and virtual __kconfig externs.
|
||||
*/
|
||||
#define BPF_KSYSCALL(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
|
||||
? (struct pt_regs *)PT_REGS_PARM1(ctx) \
|
||||
: ctx; \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
if (LINUX_HAS_SYSCALL_WRAPPER) \
|
||||
return ____##name(___bpf_syswrap_args(args)); \
|
||||
else \
|
||||
return ____##name(___bpf_syscall_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
|
||||
|
||||
#endif
|
106
firewall/interception/ebpf/program/monitor.c
Normal file
106
firewall/interception/ebpf/program/monitor.c
Normal file
|
@ -0,0 +1,106 @@
|
|||
#include "vmlinux-x86.h"
|
||||
#include "bpf/bpf_helpers.h"
|
||||
#include "bpf/bpf_tracing.h"
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef short int s16;
|
||||
typedef short unsigned int u16;
|
||||
typedef int s32;
|
||||
typedef unsigned int u32;
|
||||
typedef long long int s64;
|
||||
typedef long long unsigned int u64;
|
||||
typedef u16 le16;
|
||||
typedef u16 be16;
|
||||
typedef u32 be32;
|
||||
typedef u64 be64;
|
||||
typedef u32 wsum;
|
||||
|
||||
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
#define TASK_COMM_LEN 16
|
||||
|
||||
char __license[] SEC("license") = "Dual MIT/GPL";
|
||||
|
||||
struct
|
||||
{
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 1 << 24);
|
||||
} events SEC(".maps");
|
||||
|
||||
/**
|
||||
* The sample submitted to userspace over a ring buffer.
|
||||
* Emit struct event's type info into the ELF's BTF so bpf2go
|
||||
* can generate a Go type from it.
|
||||
*/
|
||||
struct event {
|
||||
be32 saddr[4];
|
||||
be32 daddr[4];
|
||||
u16 sport;
|
||||
u16 dport;
|
||||
u32 pid;
|
||||
u8 ipVersion;
|
||||
};
|
||||
struct event *unused __attribute__((unused));
|
||||
|
||||
SEC("fexit/tcp_v4_connect")
|
||||
int BPF_PROG(tcp_v4_connect, struct sock *sk) {
|
||||
if (sk->__sk_common.skc_family != AF_INET) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tcp_sock *ts = bpf_skc_to_tcp_sock(sk);
|
||||
if (!ts) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct event *tcp_info;
|
||||
tcp_info = bpf_ringbuf_reserve(&events, sizeof(struct event), 0);
|
||||
if (!tcp_info) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
tcp_info->pid = __builtin_bswap32((u32)bpf_get_current_pid_tgid());
|
||||
tcp_info->dport = sk->__sk_common.skc_dport;
|
||||
tcp_info->sport = sk->__sk_common.skc_num;
|
||||
tcp_info->saddr[0] = __builtin_bswap32(sk->__sk_common.skc_rcv_saddr);
|
||||
tcp_info->daddr[0] = __builtin_bswap32(sk->__sk_common.skc_daddr);
|
||||
tcp_info->ipVersion = 4;
|
||||
|
||||
bpf_ringbuf_submit(tcp_info, 0);
|
||||
return 0;
|
||||
};
|
||||
|
||||
SEC("fexit/tcp_v6_connect")
|
||||
int BPF_PROG(tcp_v6_connect, struct sock *sk) {
|
||||
if (sk->__sk_common.skc_family != AF_INET6) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tcp_sock *ts = bpf_skc_to_tcp_sock(sk);
|
||||
if (!ts) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct event *tcp_info;
|
||||
tcp_info = bpf_ringbuf_reserve(&events, sizeof(struct event), 0);
|
||||
if (!tcp_info) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
tcp_info->pid = __builtin_bswap32((u32)bpf_get_current_pid_tgid());
|
||||
for(int i = 0; i < 4; i++) {
|
||||
tcp_info->saddr[i] = __builtin_bswap32(sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32[i]);
|
||||
}
|
||||
for(int i = 0; i < 4; i++) {
|
||||
tcp_info->daddr[i] = __builtin_bswap32(sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32[i]);
|
||||
}
|
||||
tcp_info->dport = sk->__sk_common.skc_dport;
|
||||
tcp_info->sport = sk->__sk_common.skc_num;
|
||||
tcp_info->ipVersion = 6;
|
||||
|
||||
bpf_ringbuf_submit(tcp_info, 0);
|
||||
return 0;
|
||||
};
|
||||
|
||||
// SEC("fentry/udp_sendmsg")
|
17
firewall/interception/ebpf/program/update.sh
Normal file
17
firewall/interception/ebpf/program/update.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Version of libbpf to fetch headers from
|
||||
LIBBPF_VERSION=1.1.0
|
||||
|
||||
# The headers we want
|
||||
prefix=libbpf-"$LIBBPF_VERSION"
|
||||
headers=(
|
||||
"$prefix"/src/bpf_core_read.h
|
||||
"$prefix"/src/bpf_helper_defs.h
|
||||
"$prefix"/src/bpf_helpers.h
|
||||
"$prefix"/src/bpf_tracing.h
|
||||
)
|
||||
|
||||
# Fetch libbpf release and extract the desired headers
|
||||
curl -sL "https://github.com/libbpf/libbpf/archive/refs/tags/v${LIBBPF_VERSION}.tar.gz" | \
|
||||
tar -xz --xform='s#.*/#bpf/#' "${headers[@]}"
|
120348
firewall/interception/ebpf/program/vmlinux-x86.h
Normal file
120348
firewall/interception/ebpf/program/vmlinux-x86.h
Normal file
File diff suppressed because it is too large
Load diff
117
firewall/interception/ebpf/worker.go
Normal file
117
firewall/interception/ebpf/worker.go
Normal file
|
@ -0,0 +1,117 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/link"
|
||||
"github.com/cilium/ebpf/ringbuf"
|
||||
"github.com/cilium/ebpf/rlimit"
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/network/packet"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang -cflags "-O2 -g -Wall -Werror" -type event bpf program/monitor.c
|
||||
var stopper chan struct{}
|
||||
|
||||
func StartEBPFWorker(ch chan packet.Packet) {
|
||||
stopper = make(chan struct{})
|
||||
go func() {
|
||||
// Allow the current process to lock memory for eBPF resources.
|
||||
if err := rlimit.RemoveMemlock(); err != nil {
|
||||
log.Errorf("ebpf: failed to remove ebpf memlock: %s", err)
|
||||
}
|
||||
|
||||
// Load pre-compiled programs and maps into the kernel.
|
||||
objs := bpfObjects{}
|
||||
if err := loadBpfObjects(&objs, nil); err != nil {
|
||||
log.Errorf("ebpf: failed to load ebpf object: %s", err)
|
||||
}
|
||||
defer objs.Close()
|
||||
|
||||
// Create a link to the tcp_v4_connect program.
|
||||
linkv4, err := link.AttachTracing(link.TracingOptions{
|
||||
Program: objs.bpfPrograms.TcpV4Connect,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("ebpf: failed to attach to tcp_v4_connect: %s ", err)
|
||||
}
|
||||
defer linkv4.Close()
|
||||
|
||||
// Create a link to the tcp_v6_connect program.
|
||||
linkv6, err := link.AttachTracing(link.TracingOptions{
|
||||
Program: objs.bpfPrograms.TcpV6Connect,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("ebpf: failed to attach to tcp_v6_connect: %s ", err)
|
||||
}
|
||||
defer linkv6.Close()
|
||||
|
||||
rd, err := ringbuf.NewReader(objs.bpfMaps.Events)
|
||||
if err != nil {
|
||||
log.Errorf("ebpf: failed to open ring buffer: %s", err)
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
go func() {
|
||||
<-stopper
|
||||
|
||||
if err := rd.Close(); err != nil {
|
||||
log.Errorf("ebpf: failed closing ringbuf reader: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
// Read next event
|
||||
record, err := rd.Read()
|
||||
if err != nil {
|
||||
if errors.Is(err, ringbuf.ErrClosed) {
|
||||
// Normal return
|
||||
return
|
||||
}
|
||||
log.Errorf("ebpf: failed to read from ring buffer: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var event bpfEvent
|
||||
// Parse the ringbuf event entry into a bpfEvent structure.
|
||||
if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.BigEndian, &event); err != nil {
|
||||
log.Errorf("ebpf: failed to parse ringbuf event: %s", err)
|
||||
continue
|
||||
}
|
||||
ipVersion := packet.IPVersion(event.IpVersion)
|
||||
log.Debugf("ebpf: pid:%d connection %s -> %s ", int(event.Pid), intToIP(event.Saddr, ipVersion).String()+":"+fmt.Sprintf("%d", event.Sport), intToIP(event.Daddr, ipVersion).String()+":"+fmt.Sprintf("%d", event.Dport))
|
||||
pack := &infoPacket{}
|
||||
|
||||
pack.SetPacketInfo(packet.Info{
|
||||
Inbound: false,
|
||||
InTunnel: false,
|
||||
Version: packet.IPVersion(event.IpVersion),
|
||||
Protocol: packet.TCP,
|
||||
SrcPort: event.Sport,
|
||||
DstPort: event.Dport,
|
||||
Src: intToIP(event.Saddr, ipVersion),
|
||||
Dst: intToIP(event.Daddr, ipVersion),
|
||||
PID: event.Pid,
|
||||
})
|
||||
ch <- pack
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func StopEBPFWorker() {
|
||||
close(stopper)
|
||||
}
|
||||
|
||||
// intToIP converts IPv4 number to net.IP
|
||||
func intToIP(ipNum [4]uint32, ipVersion packet.IPVersion) net.IP {
|
||||
if ipVersion == packet.IPv4 {
|
||||
return unsafe.Slice((*byte)(unsafe.Pointer(&ipNum)), 4)
|
||||
} else {
|
||||
return unsafe.Slice((*byte)(unsafe.Pointer(&ipNum)), 16)
|
||||
}
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package interception
|
||||
|
||||
import (
|
||||
"github.com/safing/portmaster/firewall/interception/ebpf"
|
||||
"github.com/safing/portmaster/firewall/interception/nfq"
|
||||
"github.com/safing/portmaster/network"
|
||||
"github.com/safing/portmaster/network/packet"
|
||||
|
@ -8,11 +9,13 @@ import (
|
|||
|
||||
// start starts the interception.
|
||||
func start(ch chan packet.Packet) error {
|
||||
ebpf.StartEBPFWorker(ch)
|
||||
return StartNfqueueInterception(ch)
|
||||
}
|
||||
|
||||
// stop starts the interception.
|
||||
func stop() error {
|
||||
ebpf.StopEBPFWorker()
|
||||
return StopNfqueueInterception()
|
||||
}
|
||||
|
||||
|
|
1
go.mod
1
go.mod
|
@ -41,6 +41,7 @@ require (
|
|||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect
|
||||
github.com/bluele/gcache v0.0.2 // indirect
|
||||
github.com/cilium/ebpf v0.10.0 // indirect
|
||||
github.com/danieljoos/wincred v1.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
|
|
2
go.sum
2
go.sum
|
@ -31,6 +31,8 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
|
|||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/cilium/ebpf v0.10.0 h1:nk5HPMeoBXtOzbkZBWym+ZWq1GIiHUsBFXxwewXAHLQ=
|
||||
github.com/cilium/ebpf v0.10.0/go.mod h1:DPiVdY/kT534dgc9ERmvP8mWA+9gvwgKfRvk4nNWnoE=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk=
|
||||
|
|
|
@ -13,6 +13,8 @@ type Info struct {
|
|||
Protocol IPProtocol
|
||||
SrcPort, DstPort uint16
|
||||
Src, Dst net.IP
|
||||
|
||||
PID uint32
|
||||
}
|
||||
|
||||
// LocalIP returns the local IP of the packet.
|
||||
|
|
|
@ -25,12 +25,17 @@ func GetProcessByConnection(ctx context.Context, pktInfo *packet.Info) (process
|
|||
// already be there for a while now.
|
||||
fastSearch := pktInfo.Inbound
|
||||
|
||||
log.Tracer(ctx).Tracef("process: getting pid from system network state")
|
||||
var pid int
|
||||
pid, connInbound, err = state.Lookup(pktInfo, fastSearch)
|
||||
if err != nil {
|
||||
log.Tracer(ctx).Tracef("process: failed to find PID of connection: %s", err)
|
||||
return nil, pktInfo.Inbound, err
|
||||
if pktInfo.PID == 0 {
|
||||
log.Tracer(ctx).Tracef("process: getting pid from system network state")
|
||||
pid, connInbound, err = state.Lookup(pktInfo, fastSearch)
|
||||
if err != nil {
|
||||
log.Tracer(ctx).Tracef("process: failed to find PID of connection: %s", err)
|
||||
return nil, pktInfo.Inbound, err
|
||||
}
|
||||
} else {
|
||||
log.Tracer(ctx).Tracef("process: pid already set in packet (by ebpf or kext ALE layer)")
|
||||
pid = int(pktInfo.PID)
|
||||
}
|
||||
|
||||
// Fallback to special profiles if PID could not be found.
|
||||
|
|
Loading…
Add table
Reference in a new issue