Skip to content

Commit

Permalink
mm: compaction: reorder finished and suitable check functions
Browse files Browse the repository at this point in the history
No functional change.

Signed-off-by: Johannes Weiner <[email protected]>
  • Loading branch information
hnaz committed Mar 9, 2023
1 parent 1d1699a commit 156046f
Showing 1 changed file with 109 additions and 109 deletions.
218 changes: 109 additions & 109 deletions mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -2063,115 +2063,6 @@ static bool should_proactive_compact_node(pg_data_t *pgdat)
return fragmentation_score_node(pgdat) > wmark_high;
}

static enum compact_result __compact_finished(struct compact_control *cc)
{
unsigned int order;
const int migratetype = cc->migratetype;
int ret;

/* Compaction run completes if the migrate and free scanner meet */
if (compact_scanners_met(cc)) {
/* Let the next compaction start anew. */
reset_cached_positions(cc->zone);

/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kcompactd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
if (cc->direct_compaction)
cc->zone->compact_blockskip_flush = true;

if (cc->whole_zone)
return COMPACT_COMPLETE;
else
return COMPACT_PARTIAL_SKIPPED;
}

if (cc->proactive_compaction) {
int score, wmark_low;
pg_data_t *pgdat;

pgdat = cc->zone->zone_pgdat;
if (kswapd_is_running(pgdat))
return COMPACT_PARTIAL_SKIPPED;

score = fragmentation_score_zone(cc->zone);
wmark_low = fragmentation_score_wmark(pgdat, true);

if (score > wmark_low)
ret = COMPACT_CONTINUE;
else
ret = COMPACT_SUCCESS;

goto out;
}

if (is_via_compact_memory(cc->order))
return COMPACT_CONTINUE;

/*
* Always finish scanning a pageblock to reduce the possibility of
* fallbacks in the future. This is particularly important when
* migration source is unmovable/reclaimable but it's not worth
* special casing.
*/
if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;

/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < MAX_ORDER; order++) {
struct free_area *area = &cc->zone->free_area[order];
bool can_steal;

/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
return COMPACT_SUCCESS;

#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!free_area_empty(area, MIGRATE_CMA))
return COMPACT_SUCCESS;
#endif
/*
* Job done if allocation would steal freepages from
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
true, &can_steal) != -1)
/*
* Movable pages are OK in any pageblock. If we are
* stealing for a non-movable allocation, make sure
* we finish compacting the current pageblock first
* (which is assured by the above migrate_pfn align
* check) so it is as free as possible and we won't
* have to steal another one soon.
*/
return COMPACT_SUCCESS;
}

out:
if (cc->contended || fatal_signal_pending(current))
ret = COMPACT_CONTENDED;

return ret;
}

static enum compact_result compact_finished(struct compact_control *cc)
{
int ret;

ret = __compact_finished(cc);
trace_mm_compaction_finished(cc->zone, cc->order, ret);
if (ret == COMPACT_NO_SUITABLE_PAGE)
ret = COMPACT_CONTINUE;

return ret;
}

static enum compact_result __compaction_suitable(struct zone *zone, int order,
int highest_zoneidx,
unsigned long wmark_target)
Expand Down Expand Up @@ -2282,6 +2173,115 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
return false;
}

static enum compact_result __compact_finished(struct compact_control *cc)
{
unsigned int order;
const int migratetype = cc->migratetype;
int ret;

/* Compaction run completes if the migrate and free scanner meet */
if (compact_scanners_met(cc)) {
/* Let the next compaction start anew. */
reset_cached_positions(cc->zone);

/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kcompactd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
if (cc->direct_compaction)
cc->zone->compact_blockskip_flush = true;

if (cc->whole_zone)
return COMPACT_COMPLETE;
else
return COMPACT_PARTIAL_SKIPPED;
}

if (cc->proactive_compaction) {
int score, wmark_low;
pg_data_t *pgdat;

pgdat = cc->zone->zone_pgdat;
if (kswapd_is_running(pgdat))
return COMPACT_PARTIAL_SKIPPED;

score = fragmentation_score_zone(cc->zone);
wmark_low = fragmentation_score_wmark(pgdat, true);

if (score > wmark_low)
ret = COMPACT_CONTINUE;
else
ret = COMPACT_SUCCESS;

goto out;
}

if (is_via_compact_memory(cc->order))
return COMPACT_CONTINUE;

/*
* Always finish scanning a pageblock to reduce the possibility of
* fallbacks in the future. This is particularly important when
* migration source is unmovable/reclaimable but it's not worth
* special casing.
*/
if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;

/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < MAX_ORDER; order++) {
struct free_area *area = &cc->zone->free_area[order];
bool can_steal;

/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
return COMPACT_SUCCESS;

#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!free_area_empty(area, MIGRATE_CMA))
return COMPACT_SUCCESS;
#endif
/*
* Job done if allocation would steal freepages from
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
true, &can_steal) != -1)
/*
* Movable pages are OK in any pageblock. If we are
* stealing for a non-movable allocation, make sure
* we finish compacting the current pageblock first
* (which is assured by the above migrate_pfn align
* check) so it is as free as possible and we won't
* have to steal another one soon.
*/
return COMPACT_SUCCESS;
}

out:
if (cc->contended || fatal_signal_pending(current))
ret = COMPACT_CONTENDED;

return ret;
}

static enum compact_result compact_finished(struct compact_control *cc)
{
int ret;

ret = __compact_finished(cc);
trace_mm_compaction_finished(cc->zone, cc->order, ret);
if (ret == COMPACT_NO_SUITABLE_PAGE)
ret = COMPACT_CONTINUE;

return ret;
}

static enum compact_result
compact_zone(struct compact_control *cc, struct capture_control *capc)
{
Expand Down

0 comments on commit 156046f

Please sign in to comment.