Skip to content

Commit

Permalink
mm: mass constification of folio/page pointers
Browse files Browse the repository at this point in the history
Now that page_pgoff() takes const pointers, we can constify the pointers
to a lot of functions.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and akpm00 committed Nov 7, 2024
1 parent 713da0b commit 68158bf
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 30 deletions.
7 changes: 4 additions & 3 deletions include/linux/ksm.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,

void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
void collect_procs_ksm(struct folio *folio, struct page *page,
void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);

Expand Down Expand Up @@ -122,8 +122,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}

static inline void collect_procs_ksm(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early)
static inline void collect_procs_ksm(const struct folio *folio,
const struct page *page, struct list_head *to_kill,
int force_early)
{
}

Expand Down
10 changes: 5 additions & 5 deletions include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}

struct anon_vma *folio_get_anon_vma(struct folio *folio);
struct anon_vma *folio_get_anon_vma(const struct folio *folio);

/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
Expand All @@ -194,8 +194,8 @@ enum rmap_level {
RMAP_LEVEL_PMD,
};

static inline void __folio_rmap_sanity_checks(struct folio *folio,
struct page *page, int nr_pages, enum rmap_level level)
static inline void __folio_rmap_sanity_checks(const struct folio *folio,
const struct page *page, int nr_pages, enum rmap_level level)
{
/* hugetlb folios are handled separately. */
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
Expand Down Expand Up @@ -771,14 +771,14 @@ struct rmap_walk_control {
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct folio *folio);
struct anon_vma *(*anon_lock)(struct folio *folio,
struct anon_vma *(*anon_lock)(const struct folio *folio,
struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};

void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc);

#else /* !CONFIG_MMU */
Expand Down
5 changes: 3 additions & 2 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -1117,10 +1117,11 @@ void ClearPageHWPoisonTakenOff(struct page *page);
bool take_page_off_buddy(struct page *page);
bool put_page_back_buddy(struct page *page);
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long ksm_addr);
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
unsigned long page_mapped_in_vma(const struct page *page,
struct vm_area_struct *vma);

#else
static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
Expand Down
5 changes: 3 additions & 2 deletions mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
return err;
}

static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
static inline
struct ksm_stable_node *folio_stable_node(const struct folio *folio)
{
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
}
Expand Down Expand Up @@ -3067,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
/*
* Collect processes when the error hit an ksm page.
*/
void collect_procs_ksm(struct folio *folio, struct page *page,
void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early)
{
struct ksm_stable_node *stable_node;
Expand Down
24 changes: 13 additions & 11 deletions mm/memory-failure.c
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
*/
static void __add_to_kill(struct task_struct *tsk, struct page *p,
static void __add_to_kill(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
Expand All @@ -461,7 +461,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
else
tk->size_shift = page_shift(compound_head(p));
tk->size_shift = folio_shift(page_folio(p));

/*
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
Expand All @@ -486,7 +486,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
list_add_tail(&tk->nd, to_kill);
}

static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
Expand All @@ -509,7 +509,7 @@ static bool task_in_to_kill_list(struct list_head *to_kill,
return false;
}

void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
Expand Down Expand Up @@ -606,8 +606,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
/*
* Collect processes when the error hit an anonymous page.
*/
static void collect_procs_anon(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early)
static void collect_procs_anon(const struct folio *folio,
const struct page *page, struct list_head *to_kill,
int force_early)
{
struct task_struct *tsk;
struct anon_vma *av;
Expand Down Expand Up @@ -643,8 +644,9 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
/*
* Collect processes when the error hit a file mapped page.
*/
static void collect_procs_file(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early)
static void collect_procs_file(const struct folio *folio,
const struct page *page, struct list_head *to_kill,
int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
Expand Down Expand Up @@ -680,7 +682,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
}

#ifdef CONFIG_FS_DAX
static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma,
struct list_head *to_kill, pgoff_t pgoff)
{
Expand All @@ -691,7 +693,7 @@ static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
/*
* Collect processes when the error hit a fsdax page.
*/
static void collect_procs_fsdax(struct page *page,
static void collect_procs_fsdax(const struct page *page,
struct address_space *mapping, pgoff_t pgoff,
struct list_head *to_kill, bool pre_remove)
{
Expand Down Expand Up @@ -725,7 +727,7 @@ static void collect_procs_fsdax(struct page *page,
/*
* Collect the processes who have the corrupted page mapped to kill.
*/
static void collect_procs(struct folio *folio, struct page *page,
static void collect_procs(const struct folio *folio, const struct page *page,
struct list_head *tokill, int force_early)
{
if (!folio->mapping)
Expand Down
5 changes: 3 additions & 2 deletions mm/page_vma_mapped.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,9 +337,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* outside the VMA or not present, returns -EFAULT.
* Only valid for normal file or anonymous VMAs.
*/
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
unsigned long page_mapped_in_vma(const struct page *page,
struct vm_area_struct *vma)
{
struct folio *folio = page_folio(page);
const struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.pfn = page_to_pfn(page),
.nr_pages = 1,
Expand Down
11 changes: 6 additions & 5 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ void __init anon_vma_init(void)
* concurrently without folio lock protection). See folio_lock_anon_vma_read()
* which has already covered that, and comment above remap_pages().
*/
struct anon_vma *folio_get_anon_vma(struct folio *folio)
struct anon_vma *folio_get_anon_vma(const struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
Expand Down Expand Up @@ -540,7 +540,7 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio)
* reference like with folio_get_anon_vma() and then block on the mutex
* on !rwc->try_lock case.
*/
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma = NULL;
Expand Down Expand Up @@ -1271,8 +1271,9 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
static void __page_check_anon_rmap(struct folio *folio, struct page *page,
struct vm_area_struct *vma, unsigned long address)
static void __page_check_anon_rmap(const struct folio *folio,
const struct page *page, struct vm_area_struct *vma,
unsigned long address)
{
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
Expand Down Expand Up @@ -2569,7 +2570,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
anon_vma_free(root);
}

static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
Expand Down

0 comments on commit 68158bf

Please sign in to comment.