|
4 | 4 |
|
5 | 5 | #include <linux/errno.h>
|
6 | 6 | #include <linux/jump_label.h>
|
| 7 | +#include <linux/rbtree.h> |
7 | 8 | #include <uapi/linux/bpf.h>
|
8 | 9 |
|
9 | 10 | struct sock;
|
10 | 11 | struct sockaddr;
|
11 | 12 | struct cgroup;
|
12 | 13 | struct sk_buff;
|
| 14 | +struct bpf_map; |
| 15 | +struct bpf_prog; |
13 | 16 | struct bpf_sock_ops_kern;
|
| 17 | +struct bpf_cgroup_storage; |
14 | 18 |
|
15 | 19 | #ifdef CONFIG_CGROUP_BPF
|
16 | 20 |
|
17 | 21 | extern struct static_key_false cgroup_bpf_enabled_key;
|
18 | 22 | #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
19 | 23 |
|
| 24 | +struct bpf_cgroup_storage_map; |
| 25 | + |
| 26 | +struct bpf_storage_buffer { |
| 27 | + struct rcu_head rcu; |
| 28 | + char data[0]; |
| 29 | +}; |
| 30 | + |
| 31 | +struct bpf_cgroup_storage { |
| 32 | + struct bpf_storage_buffer *buf; |
| 33 | + struct bpf_cgroup_storage_map *map; |
| 34 | + struct bpf_cgroup_storage_key key; |
| 35 | + struct list_head list; |
| 36 | + struct rb_node node; |
| 37 | + struct rcu_head rcu; |
| 38 | +}; |
| 39 | + |
20 | 40 | struct bpf_prog_list {
|
21 | 41 | struct list_head node;
|
22 | 42 | struct bpf_prog *prog;
|
@@ -77,6 +97,15 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
77 | 97 | int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
78 | 98 | short access, enum bpf_attach_type type);
|
79 | 99 |
|
| 100 | +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); |
| 101 | +void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); |
| 102 | +void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, |
| 103 | + struct cgroup *cgroup, |
| 104 | + enum bpf_attach_type type); |
| 105 | +void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); |
| 106 | +int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); |
| 107 | +void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); |
| 108 | + |
80 | 109 | /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
81 | 110 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
82 | 111 | ({ \
|
@@ -221,6 +250,15 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
221 | 250 | return -EINVAL;
|
222 | 251 | }
|
223 | 252 |
|
| 253 | +static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, |
| 254 | + struct bpf_map *map) { return 0; } |
| 255 | +static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, |
| 256 | + struct bpf_map *map) {} |
| 257 | +static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( |
| 258 | + struct bpf_prog *prog) { return 0; } |
| 259 | +static inline void bpf_cgroup_storage_free( |
| 260 | + struct bpf_cgroup_storage *storage) {} |
| 261 | + |
224 | 262 | #define cgroup_bpf_enabled (0)
|
225 | 263 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
226 | 264 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
|
0 commit comments