Skip to content

Commit

Permalink
tetragon: probe_read usage may cause issues with newer kernels
Browse files Browse the repository at this point in the history
Newer kernels should not use probe_read so convert to probe_read_kernel.
We have logic to rever to probe_read on older kernels when needed.

Signed-off-by: John Fastabend <[email protected]>
  • Loading branch information
jrfastab committed Sep 13, 2024
1 parent 88036cd commit 46a92f2
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 19 deletions.
2 changes: 2 additions & 0 deletions bpf/include/api.h
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,8 @@ static long BPF_FUNC(dynptr_read, void *dst, uint32_t len, const struct bpf_dynp
static long BPF_FUNC(dynptr_write, const struct bpf_dynptr *dst, uint32_t offset, void *src, uint32_t len, uint64_t flags);
static void BPF_FUNC(dynptr_data, const struct bpf_dynptr *ptr, uint32_t offset, uint32_t len);

static long BPF_FUNC(sock_ops_cb_flags_set, struct bpf_sock_ops *bpf_sock, int argval);

/** LLVM built-ins, mem*() routines work for constant size */

#ifndef lock_xadd
Expand Down
16 changes: 8 additions & 8 deletions bpf/lib/bpf_cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ FUNC_INLINE const char *__get_cgroup_kn_name(const struct kernfs_node *kn)
const char *name = NULL;

if (kn)
probe_read(&name, sizeof(name), _(&kn->name));
probe_read_kernel(&name, sizeof(name), _(&kn->name));

return name;
}
Expand Down Expand Up @@ -137,7 +137,7 @@ FUNC_INLINE __u64 __get_cgroup_kn_id(const struct kernfs_node *kn)
if (BPF_CORE_READ_INTO(&id, old_kn, id.id) != 0)
return 0;
} else {
probe_read(&id, sizeof(id), _(&kn->id));
probe_read_kernel(&id, sizeof(id), _(&kn->id));
}

return id;
Expand All @@ -154,7 +154,7 @@ FUNC_INLINE struct kernfs_node *__get_cgroup_kn(const struct cgroup *cgrp)
struct kernfs_node *kn = NULL;

if (cgrp)
probe_read(&kn, sizeof(cgrp->kn), _(&cgrp->kn));
probe_read_kernel(&kn, sizeof(cgrp->kn), _(&cgrp->kn));

return kn;
}
Expand Down Expand Up @@ -183,7 +183,7 @@ FUNC_INLINE __u32 get_cgroup_hierarchy_id(const struct cgroup *cgrp)
* @cgrp: target cgroup
*
* Returns a pointer to the cgroup node name on success that can
* be read with probe_read(). NULL on failures.
* be read with probe_read_kernel(). NULL on failures.
*/
FUNC_INLINE const char *get_cgroup_name(const struct cgroup *cgrp)
{
Expand All @@ -208,7 +208,7 @@ FUNC_INLINE __u32 get_cgroup_level(const struct cgroup *cgrp)
{
__u32 level = 0;

probe_read(&level, sizeof(level), _(&cgrp->level));
probe_read_kernel(&level, sizeof(level), _(&cgrp->level));
return level;
}

Expand Down Expand Up @@ -257,7 +257,7 @@ get_task_cgroup(struct task_struct *task, __u32 subsys_idx, __u32 *error_flags)
struct css_set *cgroups;
struct cgroup *cgrp = NULL;

probe_read(&cgroups, sizeof(cgroups), _(&task->cgroups));
probe_read_kernel(&cgroups, sizeof(cgroups), _(&task->cgroups));
if (unlikely(!cgroups)) {
*error_flags |= EVENT_ERROR_CGROUPS;
return cgrp;
Expand Down Expand Up @@ -290,13 +290,13 @@ get_task_cgroup(struct task_struct *task, __u32 subsys_idx, __u32 *error_flags)
* support as much as workload as possible. It also reduces errors
* in a significant way.
*/
probe_read(&subsys, sizeof(subsys), _(&cgroups->subsys[subsys_idx]));
probe_read_kernel(&subsys, sizeof(subsys), _(&cgroups->subsys[subsys_idx]));
if (unlikely(!subsys)) {
*error_flags |= EVENT_ERROR_CGROUP_SUBSYS;
return cgrp;
}

probe_read(&cgrp, sizeof(cgrp), _(&subsys->cgroup));
probe_read_kernel(&cgrp, sizeof(cgrp), _(&subsys->cgroup));
if (!cgrp)
*error_flags |= EVENT_ERROR_CGROUP_SUBSYSCGRP;

Expand Down
22 changes: 11 additions & 11 deletions bpf/lib/bpf_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ FUNC_INLINE struct task_struct *get_parent(struct task_struct *t)
struct task_struct *task;

/* Read the real parent */
probe_read(&task, sizeof(task), _(&t->real_parent));
probe_read_kernel(&task, sizeof(task), _(&t->real_parent));
if (!task)
return 0;
return task;
Expand All @@ -45,7 +45,7 @@ FUNC_INLINE struct task_struct *get_task_from_pid(__u32 pid)
i = TASK_PID_LOOP;
continue;
}
probe_read(&cpid, sizeof(cpid), _(&task->tgid));
probe_read_kernel(&cpid, sizeof(cpid), _(&task->tgid));
if (cpid == pid) {
i = TASK_PID_LOOP;
continue;
Expand All @@ -68,7 +68,7 @@ FUNC_INLINE __u32 get_task_pid_vnr(void)

thread_pid_exists = bpf_core_field_exists(task->thread_pid);
if (thread_pid_exists) {
probe_read(&pid, sizeof(pid), _(&task->thread_pid));
probe_read_kernel(&pid, sizeof(pid), _(&task->thread_pid));
if (!pid)
return 0;
} else {
Expand All @@ -83,15 +83,15 @@ FUNC_INLINE __u32 get_task_pid_vnr(void)
if (!thread_pid_exists)
link_sz =
24; // voodoo magic, hard-code 24 to init stack
probe_read(&link, link_sz,
probe_read_kernel(&link, link_sz,
(void *)_(&task->pids) + (PIDTYPE_PID * link_sz));
pid = link.pid;
}
upid_sz = bpf_core_field_size(pid->numbers[0]);
probe_read(&level, sizeof(level), _(&pid->level));
probe_read_kernel(&level, sizeof(level), _(&pid->level));
if (level < 1)
return 0;
probe_read(&upid, upid_sz,
probe_read_kernel(&upid, upid_sz,
(void *)_(&pid->numbers) + (level * upid_sz));
return upid.nr;
}
Expand All @@ -103,7 +103,7 @@ FUNC_INLINE __u32 event_find_parent_pid(struct task_struct *t)

if (!task)
return 0;
probe_read(&pid, sizeof(pid), _(&task->tgid));
probe_read_kernel(&pid, sizeof(pid), _(&task->tgid));
return pid;
}

Expand All @@ -116,10 +116,10 @@ __event_find_parent(struct task_struct *task)

#pragma unroll
for (i = 0; i < 4; i++) {
probe_read(&task, sizeof(task), _(&task->real_parent));
probe_read_kernel(&task, sizeof(task), _(&task->real_parent));
if (!task)
break;
probe_read(&pid, sizeof(pid), _(&task->tgid));
probe_read_kernel(&pid, sizeof(pid), _(&task->tgid));
value = execve_map_get_noinit(pid);
if (value && value->key.ktime != 0)
return value;
Expand Down Expand Up @@ -158,13 +158,13 @@ FUNC_INLINE struct execve_map_value *event_find_curr(__u32 *ppid, bool *walked)

#pragma unroll
for (i = 0; i < 4; i++) {
probe_read(&pid, sizeof(pid), _(&task->tgid));
probe_read_kernel(&pid, sizeof(pid), _(&task->tgid));
value = execve_map_get_noinit(pid);
if (value && value->key.ktime != 0)
break;
value = 0;
*walked = 1;
probe_read(&task, sizeof(task), _(&task->real_parent));
probe_read_kernel(&task, sizeof(task), _(&task->real_parent));
if (!task)
break;
}
Expand Down

0 comments on commit 46a92f2

Please sign in to comment.