BACKPORT: mm, slab/slub: introduce kmalloc-reclaimable caches

Kmem caches can be created with a SLAB_RECLAIM_ACCOUNT flag, which
indicates they contain objects which can be reclaimed under memory
pressure (typically through a shrinker).  This makes the slab pages
accounted as NR_SLAB_RECLAIMABLE in vmstat, which is reflected also the
MemAvailable meminfo counter and in overcommit decisions.  The slab pages
are also allocated with __GFP_RECLAIMABLE, which is good for
anti-fragmentation through grouping pages by mobility.

The generic kmalloc-X caches are created without this flag, but sometimes
are used also for objects that can be reclaimed, which due to varying size
cannot have a dedicated kmem cache with SLAB_RECLAIM_ACCOUNT flag.  A
prominent example are dcache external names, which prompted the creation
of a new, manually managed vmstat counter NR_INDIRECTLY_RECLAIMABLE_BYTES
in commit f1782c9bc547 ("dcache: account external names as indirectly
reclaimable memory").

To better handle this and any other similar cases, this patch introduces
SLAB_RECLAIM_ACCOUNT variants of kmalloc caches, named kmalloc-rcl-X.
They are used whenever the kmalloc() call passes __GFP_RECLAIMABLE among
gfp flags.  They are added to the kmalloc_caches array as a new type.
Allocations with both __GFP_DMA and __GFP_RECLAIMABLE will use a dma type
cache.

This change only applies to SLAB and SLUB, not SLOB.  This is fine, since
SLOB's target are tiny system and this patch does add some overhead of
kmem management objects.

Link: http://lkml.kernel.org/r/20180731090649.16028-3-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Roman Gushchin <guro@fb.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Vijayanand Jitta <vjitta@codeaurora.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

(cherry picked from commit 1291523f2c1d631fea34102fd241fb54a4e8f7a0)

Conflicts:
        mm/slab_common.c

(1. replace not yet existing slab_flags_t with unsigned long
2. change %u to %lu in kasprintf to prevent compile warnings)

Bug: 138148041
Test: verify KReclaimable accounting after ION allocation+deallocation
Change-Id: Ibe56723da05349d94e3f962b96aa223fc154785e
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
tirimbino
Vlastimil Babka 6 years ago committed by Suren Baghdasaryan
parent f59edcb963
commit b24663e162
  1. 16
      include/linux/slab.h
  2. 48
      mm/slab_common.c

@ -267,8 +267,13 @@ static inline const char *__check_heap_object(const void *ptr,
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
*/
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,
KMALLOC_RECLAIM,
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
@ -282,12 +287,21 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
{
int is_dma = 0;
int type_dma = 0;
int is_reclaimable;
#ifdef CONFIG_ZONE_DMA
is_dma = !!(flags & __GFP_DMA);
type_dma = is_dma * KMALLOC_DMA;
#endif
return is_dma;
is_reclaimable = !!(flags & __GFP_RECLAIMABLE);
/*
* If an allocation is both __GFP_DMA and __GFP_RECLAIMABLE, return
* KMALLOC_DMA and effectively ignore __GFP_RECLAIMABLE
*/
return type_dma + (is_reclaimable & !is_dma) * KMALLOC_RECLAIM;
}
/*

@ -1052,10 +1052,21 @@ void __init setup_kmalloc_cache_index_table(void)
}
}
static void __init new_kmalloc_cache(int idx, unsigned long flags)
static void __init
new_kmalloc_cache(int idx, int type, unsigned long flags)
{
kmalloc_caches[KMALLOC_NORMAL][idx] = create_kmalloc_cache(
kmalloc_info[idx].name,
const char *name;
if (type == KMALLOC_RECLAIM) {
flags |= SLAB_RECLAIM_ACCOUNT;
name = kasprintf(GFP_NOWAIT, "kmalloc-rcl-%lu",
kmalloc_info[idx].size);
BUG_ON(!name);
} else {
name = kmalloc_info[idx].name;
}
kmalloc_caches[type][idx] = create_kmalloc_cache(name,
kmalloc_info[idx].size, flags);
}
@ -1066,22 +1077,25 @@ static void __init new_kmalloc_cache(int idx, unsigned long flags)
*/
void __init create_kmalloc_caches(unsigned long flags)
{
int i;
int type = KMALLOC_NORMAL;
int i, type;
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
if (!kmalloc_caches[type][i])
new_kmalloc_cache(i, flags);
for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
if (!kmalloc_caches[type][i])
new_kmalloc_cache(i, type, flags);
/*
* Caches that are not of the two-to-the-power-of size.
* These have to be created immediately after the
* earlier power of two caches
*/
if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[type][1] && i == 6)
new_kmalloc_cache(1, flags);
if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[type][2] && i == 7)
new_kmalloc_cache(2, flags);
/*
* Caches that are not of the two-to-the-power-of size.
* These have to be created immediately after the
* earlier power of two caches
*/
if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
!kmalloc_caches[type][1])
new_kmalloc_cache(1, type, flags);
if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
!kmalloc_caches[type][2])
new_kmalloc_cache(2, type, flags);
}
}
/* Kmalloc array is now usable */

Loading…
Cancel
Save