Skip to content

Commit

Permalink
mm: page_alloc: disallow fallbacks when 2M defrag is enabled
Browse files Browse the repository at this point in the history
Fallbacks are already unlikely due to watermarks being enforced
against MIGRATE_FREE blocks. Eliminate them altogether. This allows
compaction to look exclusively at movable blocks, reducing the number
of pageblocks it needs to scan on an ongoing basis.

Signed-off-by: Johannes Weiner <[email protected]>
  • Loading branch information
hnaz committed Mar 9, 2023
1 parent 6c1d5f1 commit 5b97e28
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 48 deletions.
52 changes: 5 additions & 47 deletions mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -1258,46 +1258,6 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION

static bool suitable_migration_source(struct compact_control *cc,
struct page *page)
{
int block_mt;

if (pageblock_skip_persistent(page))
return false;

if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
return true;

block_mt = get_pageblock_migratetype(page);

if (cc->migratetype == MIGRATE_MOVABLE)
return is_migrate_movable(block_mt);
else
return block_mt == cc->migratetype;
}

/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct compact_control *cc,
struct page *page)
{
int mt = get_pageblock_migratetype(page);

/* If the page is a large free page, then disallow migration */
if (mt == MIGRATE_FREE)
return false;

if (cc->ignore_block_suitable)
return true;

/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (is_migrate_movable(mt))
return true;

/* Otherwise skip the block */
return false;
}

static inline unsigned int
freelist_scan_limit(struct compact_control *cc)
{
Expand Down Expand Up @@ -1620,7 +1580,7 @@ static void isolate_freepages(struct compact_control *cc)
continue;

/* Check the block is suitable for migration */
if (!suitable_migration_target(cc, page))
if (!is_migrate_movable(get_pageblock_migratetype(page)))
continue;

/* If isolation recently failed, do not retry */
Expand Down Expand Up @@ -1927,14 +1887,12 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
continue;

/*
* For async direct compaction, only scan the pageblocks of the
* same migratetype without huge pages. Async direct compaction
* is optimistic to see if the minimum amount of work satisfies
* the allocation. The cached PFN is updated as it's possible
* that all remaining blocks between source and target are
* The cached PFN is updated as it's possible that all
* remaining blocks between source and target are
* unsuitable and the compaction scanners fail to meet.
*/
if (!suitable_migration_source(cc, page)) {
if (pageblock_skip_persistent(page) ||
!is_migrate_movable(get_pageblock_migratetype(page))) {
update_cached_migrate(cc, block_end_pfn);
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
#ifdef CONFIG_ZONE_DMA32
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_COMPACTION)
#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
#else
#define ALLOC_NOFRAGMENT 0x0
Expand Down
8 changes: 8 additions & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2622,11 +2622,19 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
*
* The other migratetypes do not have fallbacks.
*/
#ifdef CONFIG_COMPACTION
static int fallbacks[MIGRATE_TYPES][2] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_FREE, MIGRATE_TYPES },
[MIGRATE_MOVABLE] = { MIGRATE_FREE, MIGRATE_TYPES },
[MIGRATE_RECLAIMABLE] = { MIGRATE_FREE, MIGRATE_TYPES },
};
#else
static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_FREE, MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
[MIGRATE_MOVABLE] = { MIGRATE_FREE, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
[MIGRATE_RECLAIMABLE] = { MIGRATE_FREE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
};
#endif

#ifdef CONFIG_CMA
static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
Expand Down

0 comments on commit 5b97e28

Please sign in to comment.