Skip to content

Commit

Permalink
mm: page_alloc: manage free memory in whole pageblocks
Browse files Browse the repository at this point in the history
Right now, allocation requests only reclaim (and compact) for their
exact order. Since the majority of allocation requests are smaller
than a pageblock, this is likely to result in partial blocks being
freed, and subsequently fragmented by fallbacks. This defeats the
allocator's efforts to group pageblocks by mobility.

Fix this mismatch between the allocator and reclaim/compaction: make
the pageblock the default unit for free memory by enforcing watermarks
against MIGRATE_FREE blocks, and have reclaim/compaction produce them.

Signed-off-by: Johannes Weiner <[email protected]>
  • Loading branch information
hnaz committed Mar 9, 2023
1 parent 156046f commit 61be94a
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 121 deletions.
1 change: 0 additions & 1 deletion include/linux/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ enum compact_priority {
COMPACT_PRIO_SYNC_FULL,
MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
COMPACT_PRIO_SYNC_LIGHT,
MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
COMPACT_PRIO_ASYNC,
INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
Expand Down
68 changes: 23 additions & 45 deletions mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -1784,15 +1784,6 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
return pfn;

/*
* Only allow kcompactd and direct requests for movable pages to
* quickly clear out a MOVABLE pageblock for allocation. This
* reduces the risk that a large movable pageblock is freed for
* an unmovable/reclaimable small allocation.
*/
if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
return pfn;

/*
* When starting the migration scanner, pick any pageblock within the
* first half of the search space. Otherwise try and pick a pageblock
Expand Down Expand Up @@ -2175,8 +2166,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,

static enum compact_result __compact_finished(struct compact_control *cc)
{
unsigned int order;
const int migratetype = cc->migratetype;
unsigned long mark;
int ret;

/* Compaction run completes if the migrate and free scanner meet */
Expand Down Expand Up @@ -2230,38 +2220,22 @@ static enum compact_result __compact_finished(struct compact_control *cc)
if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;

/* Direct compactor: Is a suitable page free? */
/* Done when watermarks are restored */
ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < MAX_ORDER; order++) {
struct free_area *area = &cc->zone->free_area[order];
bool can_steal;

/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
return COMPACT_SUCCESS;
if (cc->direct_compaction)
mark = wmark_pages(cc->zone, cc->alloc_flags & ALLOC_WMARK_MASK);
else
mark = high_wmark_pages(cc->zone);
if (zone_watermark_ok(cc->zone, cc->order, mark, cc->highest_zoneidx, cc->alloc_flags))
return COMPACT_SUCCESS;

#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!free_area_empty(area, MIGRATE_CMA))
return COMPACT_SUCCESS;
#endif
/*
* Job done if allocation would steal freepages from
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
true, &can_steal) != -1)
/*
* Movable pages are OK in any pageblock. If we are
* stealing for a non-movable allocation, make sure
* we finish compacting the current pageblock first
* (which is assured by the above migrate_pfn align
* check) so it is as free as possible and we won't
* have to steal another one soon.
*/
return COMPACT_SUCCESS;
}
/*
* In the process of neutralizing blocks, compaction reduces
* the amount of migration targets. Re-check availability.
*/
if (compaction_suitable(cc->zone, cc->order,
cc->highest_zoneidx) == COMPACT_SKIPPED)
return COMPACT_SKIPPED;

out:
if (cc->contended || fatal_signal_pending(current))
Expand Down Expand Up @@ -2310,8 +2284,12 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
unsigned long watermark;

/* Allocation can already succeed, nothing to do */
watermark = wmark_pages(cc->zone,
cc->alloc_flags & ALLOC_WMARK_MASK);
if (cc->direct_compaction)
watermark = wmark_pages(cc->zone,
cc->alloc_flags &
ALLOC_WMARK_MASK);
else
watermark = high_wmark_pages(cc->zone);
if (zone_watermark_ok(cc->zone, cc->order, watermark,
cc->highest_zoneidx, cc->alloc_flags))
return COMPACT_SUCCESS;
Expand Down Expand Up @@ -2804,7 +2782,7 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)

/* Allocation can succeed in any zone, done */
if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
min_wmark_pages(zone),
high_wmark_pages(zone),
highest_zoneidx, 0))
return true;

Expand Down Expand Up @@ -2849,7 +2827,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)

/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, cc.order,
min_wmark_pages(zone), zoneid, 0))
high_wmark_pages(zone), zoneid, 0))
continue;

if (compaction_suitable(zone, cc.order,
Expand Down
1 change: 1 addition & 0 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,7 @@ struct compact_control {
*/
struct capture_control {
struct compact_control *cc;
int order;
int migratetype;
struct page *page;
};
Expand Down
Loading

0 comments on commit 61be94a

Please sign in to comment.