Skip to content

Commit

Permalink
Merge branch 'slab/for-6.13/features' into slab/for-next
Browse files Browse the repository at this point in the history
Merge the slab feature branch for 6.13:

- Add new slab_strict_numa parameter for per-object memory policies
  (Christoph Lameter)
  • Loading branch information
tehcaster committed Nov 16, 2024
2 parents 2420baa + f7c80fa commit 9e19aa1
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 0 deletions.
10 changes: 10 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6147,6 +6147,16 @@
For more information see Documentation/mm/slub.rst.
(slub_nomerge legacy name also accepted for now)

slab_strict_numa [MM]
Support memory policies on a per object level
in the slab allocator. The default is for memory
policies to be applied at the folio level when
a new folio is needed or a partial folio is
retrieved from the lists. Increases overhead
in the slab fastpaths but gains more accurate
NUMA kernel object placement which helps with slow
interconnects in NUMA systems.

slram= [HW,MTD]

smart2= [HW]
Expand Down
9 changes: 9 additions & 0 deletions Documentation/mm/slub.rst
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,15 @@ can be influenced by kernel parameters:
``slab_max_order`` to 0, what cause minimum possible order of
slabs allocation.

``slab_strict_numa``
Enables the application of memory policies on each
allocation. This results in more accurate placement of
objects which may result in the reduction of accesses
to remote nodes. The default is to only apply memory
policies at the folio level when a new folio is acquired
or a folio is retrieved from the lists. Enabling this
option reduces the fastpath performance of the slab allocator.

SLUB Debug output
=================

Expand Down
43 changes: 43 additions & 0 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,10 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
#endif /* CONFIG_SLUB_DEBUG */

#ifdef CONFIG_NUMA
static DEFINE_STATIC_KEY_FALSE(strict_numa);
#endif

/* Structure holding parameters for get_partial() call chain */
struct partial_context {
gfp_t flags;
Expand Down Expand Up @@ -3949,6 +3953,28 @@ static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
object = c->freelist;
slab = c->slab;

#ifdef CONFIG_NUMA
if (static_branch_unlikely(&strict_numa) &&
node == NUMA_NO_NODE) {

struct mempolicy *mpol = current->mempolicy;

if (mpol) {
/*
* Special BIND rule support. If existing slab
* is in permitted set then do not redirect
* to a particular node.
* Otherwise we apply the memory policy to get
* the node we need to allocate on.
*/
if (mpol->mode != MPOL_BIND || !slab ||
!node_isset(slab_nid(slab), mpol->nodes))

node = mempolicy_slab_node();
}
}
#endif

if (!USE_LOCKLESS_FAST_PATH() ||
unlikely(!object || !slab || !node_match(slab, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
Expand Down Expand Up @@ -5715,6 +5741,23 @@ static int __init setup_slub_min_objects(char *str)
__setup("slab_min_objects=", setup_slub_min_objects);
__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);

#ifdef CONFIG_NUMA
static int __init setup_slab_strict_numa(char *str)
{
if (nr_node_ids > 1) {
static_branch_enable(&strict_numa);
pr_info("SLUB: Strict NUMA enabled.\n");
} else {
pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
}

return 1;
}

__setup("slab_strict_numa", setup_slab_strict_numa);
#endif


#ifdef CONFIG_HARDENED_USERCOPY
/*
* Rejects incorrectly sized objects and objects that are to be copied
Expand Down

0 comments on commit 9e19aa1

Please sign in to comment.