diff --git a/bpf/include/api.h b/bpf/include/api.h index 9a83dbdd68e..4d42ba8c237 100644 --- a/bpf/include/api.h +++ b/bpf/include/api.h @@ -267,6 +267,8 @@ static long BPF_FUNC(dynptr_read, void *dst, uint32_t len, const struct bpf_dynp static long BPF_FUNC(dynptr_write, const struct bpf_dynptr *dst, uint32_t offset, void *src, uint32_t len, uint64_t flags); static void BPF_FUNC(dynptr_data, const struct bpf_dynptr *ptr, uint32_t offset, uint32_t len); +static long BPF_FUNC(sock_ops_cb_flags_set, struct bpf_sock_ops *bpf_sock, int argval); + /** LLVM built-ins, mem*() routines work for constant size */ #ifndef lock_xadd diff --git a/bpf/lib/bpf_cgroup.h b/bpf/lib/bpf_cgroup.h index 77c3f3198c2..5ed6a201310 100644 --- a/bpf/lib/bpf_cgroup.h +++ b/bpf/lib/bpf_cgroup.h @@ -108,7 +108,7 @@ FUNC_INLINE const char *__get_cgroup_kn_name(const struct kernfs_node *kn) const char *name = NULL; if (kn) - probe_read(&name, sizeof(name), _(&kn->name)); + probe_read_kernel(&name, sizeof(name), _(&kn->name)); return name; } @@ -137,7 +137,7 @@ FUNC_INLINE __u64 __get_cgroup_kn_id(const struct kernfs_node *kn) if (BPF_CORE_READ_INTO(&id, old_kn, id.id) != 0) return 0; } else { - probe_read(&id, sizeof(id), _(&kn->id)); + probe_read_kernel(&id, sizeof(id), _(&kn->id)); } return id; @@ -154,7 +154,7 @@ FUNC_INLINE struct kernfs_node *__get_cgroup_kn(const struct cgroup *cgrp) struct kernfs_node *kn = NULL; if (cgrp) - probe_read(&kn, sizeof(cgrp->kn), _(&cgrp->kn)); + probe_read_kernel(&kn, sizeof(cgrp->kn), _(&cgrp->kn)); return kn; } @@ -183,7 +183,7 @@ FUNC_INLINE __u32 get_cgroup_hierarchy_id(const struct cgroup *cgrp) * @cgrp: target cgroup * * Returns a pointer to the cgroup node name on success that can - * be read with probe_read(). NULL on failures. + * be read with probe_read_kernel(). NULL on failures. */ FUNC_INLINE const char *get_cgroup_name(const struct cgroup *cgrp) { @@ -208,7 +208,7 @@ FUNC_INLINE __u32 get_cgroup_level(const struct cgroup *cgrp) { __u32 level = 0; - probe_read(&level, sizeof(level), _(&cgrp->level)); + probe_read_kernel(&level, sizeof(level), _(&cgrp->level)); return level; } @@ -257,7 +257,7 @@ get_task_cgroup(struct task_struct *task, __u32 subsys_idx, __u32 *error_flags) struct css_set *cgroups; struct cgroup *cgrp = NULL; - probe_read(&cgroups, sizeof(cgroups), _(&task->cgroups)); + probe_read_kernel(&cgroups, sizeof(cgroups), _(&task->cgroups)); if (unlikely(!cgroups)) { *error_flags |= EVENT_ERROR_CGROUPS; return cgrp; @@ -290,13 +290,13 @@ get_task_cgroup(struct task_struct *task, __u32 subsys_idx, __u32 *error_flags) * support as much as workload as possible. It also reduces errors * in a significant way. */ - probe_read(&subsys, sizeof(subsys), _(&cgroups->subsys[subsys_idx])); + probe_read_kernel(&subsys, sizeof(subsys), _(&cgroups->subsys[subsys_idx])); if (unlikely(!subsys)) { *error_flags |= EVENT_ERROR_CGROUP_SUBSYS; return cgrp; } - probe_read(&cgrp, sizeof(cgrp), _(&subsys->cgroup)); + probe_read_kernel(&cgrp, sizeof(cgrp), _(&subsys->cgroup)); if (!cgrp) *error_flags |= EVENT_ERROR_CGROUP_SUBSYSCGRP; diff --git a/bpf/lib/bpf_task.h b/bpf/lib/bpf_task.h index 7f08041803d..0fbe017763c 100644 --- a/bpf/lib/bpf_task.h +++ b/bpf/lib/bpf_task.h @@ -26,7 +26,7 @@ FUNC_INLINE struct task_struct *get_parent(struct task_struct *t) struct task_struct *task; /* Read the real parent */ - probe_read(&task, sizeof(task), _(&t->real_parent)); + probe_read_kernel(&task, sizeof(task), _(&t->real_parent)); if (!task) return 0; return task; @@ -45,7 +45,7 @@ FUNC_INLINE struct task_struct *get_task_from_pid(__u32 pid) i = TASK_PID_LOOP; continue; } - probe_read(&cpid, sizeof(cpid), _(&task->tgid)); + probe_read_kernel(&cpid, sizeof(cpid), _(&task->tgid)); if (cpid == pid) { i = TASK_PID_LOOP; continue; @@ -68,7 +68,7 @@ FUNC_INLINE __u32 get_task_pid_vnr(void) thread_pid_exists = bpf_core_field_exists(task->thread_pid); if (thread_pid_exists) { - probe_read(&pid, sizeof(pid), _(&task->thread_pid)); + probe_read_kernel(&pid, sizeof(pid), _(&task->thread_pid)); if (!pid) return 0; } else { @@ -83,16 +83,16 @@ FUNC_INLINE __u32 get_task_pid_vnr(void) if (!thread_pid_exists) link_sz = 24; // voodoo magic, hard-code 24 to init stack - probe_read(&link, link_sz, - (void *)_(&task->pids) + (PIDTYPE_PID * link_sz)); + probe_read_kernel(&link, link_sz, + (void *)_(&task->pids) + (PIDTYPE_PID * link_sz)); pid = link.pid; } upid_sz = bpf_core_field_size(pid->numbers[0]); - probe_read(&level, sizeof(level), _(&pid->level)); + probe_read_kernel(&level, sizeof(level), _(&pid->level)); if (level < 1) return 0; - probe_read(&upid, upid_sz, - (void *)_(&pid->numbers) + (level * upid_sz)); + probe_read_kernel(&upid, upid_sz, + (void *)_(&pid->numbers) + (level * upid_sz)); return upid.nr; } @@ -103,7 +103,7 @@ FUNC_INLINE __u32 event_find_parent_pid(struct task_struct *t) if (!task) return 0; - probe_read(&pid, sizeof(pid), _(&task->tgid)); + probe_read_kernel(&pid, sizeof(pid), _(&task->tgid)); return pid; } @@ -116,10 +116,10 @@ __event_find_parent(struct task_struct *task) #pragma unroll for (i = 0; i < 4; i++) { - probe_read(&task, sizeof(task), _(&task->real_parent)); + probe_read_kernel(&task, sizeof(task), _(&task->real_parent)); if (!task) break; - probe_read(&pid, sizeof(pid), _(&task->tgid)); + probe_read_kernel(&pid, sizeof(pid), _(&task->tgid)); value = execve_map_get_noinit(pid); if (value && value->key.ktime != 0) return value; @@ -158,13 +158,13 @@ FUNC_INLINE struct execve_map_value *event_find_curr(__u32 *ppid, bool *walked) #pragma unroll for (i = 0; i < 4; i++) { - probe_read(&pid, sizeof(pid), _(&task->tgid)); + probe_read_kernel(&pid, sizeof(pid), _(&task->tgid)); value = execve_map_get_noinit(pid); if (value && value->key.ktime != 0) break; value = 0; *walked = 1; - probe_read(&task, sizeof(task), _(&task->real_parent)); + probe_read_kernel(&task, sizeof(task), _(&task->real_parent)); if (!task) break; }