Skip to content

Commit

Permalink
drgn.helpers.linux.slab: add support for retrieving objects exclusive…
Browse files Browse the repository at this point in the history
…ly from per-node lists

The current implementation of for_each_allocated_object() is slow as
it iterates through every physical page.

This commit adds the ability to retrieve objects exclusively from the
per-node partial lists, greatly improving efficiency when searching
for the source of vfs caches of dying cgroups or millions of negative
dentries.

Signed-off-by: Jian Wen <[email protected]>
  • Loading branch information
wenjianhn committed Jun 18, 2023
1 parent e9141fa commit aa857fd
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 3 deletions.
35 changes: 32 additions & 3 deletions drgn/helpers/linux/slab.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,8 +217,28 @@ def _page_objects(
) -> Iterator[Object]:
raise NotImplementedError()

def for_each_allocated_object(self, type: Union[str, Type]) -> Iterator[Object]:
def for_each_allocated_object(
self, type: Union[str, Type], node_partial_only: Optional[bool] = None
) -> Iterator[Object]:
pointer_type = self._prog.pointer_type(self._prog.type(type))

if node_partial_only:
cpu_slab = self._slab_cache.cpu_slab.read_()
if hasattr(cpu_slab, "slab"):
struct = 'struct slab'
member = 'slab_list'
else:
struct = 'struct page'
member = 'lru'

for node in range(self._prog['nr_online_nodes'].value_()):
n = self._slab_cache.node[node]
for slab in list_for_each_entry(
struct, n.partial.address_of_(), member):
yield from self._page_objects(
cast("struct page *", slab), slab, pointer_type)
return

slab_type = _get_slab_type(self._prog)
PG_slab_mask = 1 << self._prog.constant("PG_slab")
for page in for_each_page(self._prog):
Expand Down Expand Up @@ -429,7 +449,8 @@ def _get_slab_cache_helper(slab_cache: Object) -> _SlabCacheHelper:


def slab_cache_for_each_allocated_object(
slab_cache: Object, type: Union[str, Type]
slab_cache: Object, type: Union[str, Type],
node_partial_only: Optional[bool] = None
) -> Iterator[Object]:
"""
Iterate over all allocated objects in a given slab cache.
Expand All @@ -443,11 +464,19 @@ def slab_cache_for_each_allocated_object(
...
}
Only objects on a per-node partial slab list
>>> next(slab_cache_for_each_allocated_object(dentry_cache, "struct dentry", node_partial_only=True))
*(struct dentry *)0xffff95b9beebc000 = {
...
}
:param slab_cache: ``struct kmem_cache *``
:param type: Type of object in the slab cache.
:param node_partial_only: only objects on a per-node partial slab list
:return: Iterator of ``type *`` objects.
"""
return _get_slab_cache_helper(slab_cache).for_each_allocated_object(type)
return _get_slab_cache_helper(slab_cache).for_each_allocated_object(
type, node_partial_only)


def _find_containing_slab(
Expand Down
11 changes: 11 additions & 0 deletions tests/linux_kernel/helpers/test_slab.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,17 @@ def test_slab_cache_for_each_allocated_object(self):
list(objects),
)

@skip_unless_have_full_mm_support
@skip_unless_have_test_kmod
def test_slab_cache_for_each_allocated_object_on_partial_list(self):
self.assertEqual(
sum(1 for _ in slab_cache_for_each_allocated_object(
self.prog["drgn_test_node_partial_kmem_cache"],
"struct drgn_test_node_partial_slab_object", True)
),
1
)

@skip_unless_have_full_mm_support
@skip_unless_have_test_kmod
def test_slab_object_info(self):
Expand Down
29 changes: 29 additions & 0 deletions tests/linux_kernel/kmod/drgn_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,14 @@ struct drgn_test_big_slab_object {
struct drgn_test_small_slab_object *drgn_test_small_slab_objects[5];
struct drgn_test_big_slab_object *drgn_test_big_slab_objects[5];

struct kmem_cache *drgn_test_node_partial_kmem_cache;

struct drgn_test_node_partial_slab_object {
unsigned long unused;
};

struct drgn_test_node_partial_slab_object *drgn_test_node_partial_slab_object_p;

static void drgn_test_slab_exit(void)
{
size_t i;
Expand All @@ -386,6 +394,12 @@ static void drgn_test_slab_exit(void)
}
kmem_cache_destroy(drgn_test_small_kmem_cache);
}
if (drgn_test_node_partial_kmem_cache) {
if (drgn_test_node_partial_slab_object_p)
kmem_cache_free(drgn_test_node_partial_kmem_cache,
drgn_test_node_partial_slab_object_p);
kmem_cache_destroy(drgn_test_node_partial_kmem_cache);
}
}

// Dummy constructor so test slab caches won't get merged.
Expand Down Expand Up @@ -426,6 +440,21 @@ static int drgn_test_slab_init(void)
return -ENOMEM;
drgn_test_big_slab_objects[i]->value = i;
}

drgn_test_node_partial_kmem_cache =
kmem_cache_create(
"drgn_test_partial",
sizeof(struct drgn_test_node_partial_slab_object),
__alignof__(struct drgn_test_node_partial_slab_object),
0, drgn_test_slab_ctor);
if (!drgn_test_node_partial_kmem_cache)
return -ENOMEM;
drgn_test_node_partial_slab_object_p = kmem_cache_alloc(
drgn_test_node_partial_kmem_cache, GFP_KERNEL);

// Move the object to the per-node partial list
kmem_cache_shrink(drgn_test_node_partial_kmem_cache);

return 0;
}

Expand Down

0 comments on commit aa857fd

Please sign in to comment.