diff --git a/Kconfig.zephyr b/Kconfig.zephyr index 36b753dd4a8e0a..425d79f4e74ec5 100644 --- a/Kconfig.zephyr +++ b/Kconfig.zephyr @@ -262,6 +262,7 @@ config LINKER_USE_PINNED_SECTION config LINKER_USE_ONDEMAND_SECTION bool "Use Evictable Linker Section" + depends on DEMAND_MAPPING depends on !LINKER_USE_PINNED_SECTION depends on !ARCH_MAPS_ALL_RAM help diff --git a/doc/kernel/memory_management/demand_paging.rst b/doc/kernel/memory_management/demand_paging.rst index 1b454e07e04220..db068397a1423d 100644 --- a/doc/kernel/memory_management/demand_paging.rst +++ b/doc/kernel/memory_management/demand_paging.rst @@ -179,6 +179,11 @@ which must be implemented: free a backing store location (the ``location`` token) which can then be used for subsequent page out operation. +* :c:func:`k_mem_paging_backing_store_location_query()` is called to obtain + the ``location`` token corresponding to storage content to be virtually + mapped and paged-in on demand. Most useful with + :kconfig:option:`CONFIG_DEMAND_MAPPING`. + * :c:func:`k_mem_paging_backing_store_page_in()` copies a data page from the backing store location associated with the provided ``location`` token to the page pointed by ``K_MEM_SCRATCH_PAGE``. diff --git a/include/zephyr/kernel/mm/demand_paging.h b/include/zephyr/kernel/mm/demand_paging.h index ab2ca7d0693252..120ee3299a3496 100644 --- a/include/zephyr/kernel/mm/demand_paging.h +++ b/include/zephyr/kernel/mm/demand_paging.h @@ -349,6 +349,22 @@ int k_mem_paging_backing_store_location_get(struct k_mem_page_frame *pf, */ void k_mem_paging_backing_store_location_free(uintptr_t location); +/** + * Obtain persistent location token for on-demand content + * + * Unlike k_mem_paging_backing_store_location_get() this does not allocate + * any backing store space. Instead, it returns a location token corresponding + * to some fixed storage content to be paged in on demand. This is expected + * to be used in conjonction with CONFIG_LINKER_USE_ONDEMAND_SECTION and the + * K_MEM_MAP_UNPAGED flag to create demand mappings at boot time. This may + * also be used e.g. to implement file-based mmap(). + * + * @param addr Virtual address to obtain a location token for + * @param [out] location storage location token + * @return 0 for success or negative error code + */ +int k_mem_paging_backing_store_location_query(void *addr, uintptr_t *location); + /** * Copy a data page from K_MEM_SCRATCH_PAGE to the specified location * diff --git a/kernel/mmu.c b/kernel/mmu.c index 7cafdded1fabf9..b03ff978786a48 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -1050,6 +1050,34 @@ static void mark_linker_section_pinned(void *start_addr, void *end_addr, } #endif /* CONFIG_LINKER_USE_BOOT_SECTION) || CONFIG_LINKER_USE_PINNED_SECTION */ +#ifdef CONFIG_LINKER_USE_ONDEMAND_SECTION +static void z_paging_ondemand_section_map(void) +{ + uint8_t *addr; + size_t size; + uintptr_t location; + uint32_t flags; + + size = (uintptr_t)lnkr_ondemand_text_size; + flags = K_MEM_MAP_UNPAGED | K_MEM_PERM_EXEC | K_MEM_CACHE_WB; + VIRT_FOREACH(lnkr_ondemand_text_start, size, addr) { + k_mem_paging_backing_store_location_query(addr, &location); + arch_mem_map(addr, location, CONFIG_MMU_PAGE_SIZE, flags); + sys_bitarray_set_region(&virt_region_bitmap, 1, + virt_to_bitmap_offset(addr, CONFIG_MMU_PAGE_SIZE)); + } + + size = (uintptr_t)lnkr_ondemand_rodata_size; + flags = K_MEM_MAP_UNPAGED | K_MEM_CACHE_WB; + VIRT_FOREACH(lnkr_ondemand_rodata_start, size, addr) { + k_mem_paging_backing_store_location_query(addr, &location); + arch_mem_map(addr, location, CONFIG_MMU_PAGE_SIZE, flags); + sys_bitarray_set_region(&virt_region_bitmap, 1, + virt_to_bitmap_offset(addr, CONFIG_MMU_PAGE_SIZE)); + } +} +#endif /* CONFIG_LINKER_USE_ONDEMAND_SECTION */ + void z_mem_manage_init(void) { uintptr_t phys; @@ -1126,6 +1154,11 @@ void z_mem_manage_init(void) } } #endif /* CONFIG_DEMAND_PAGING */ + +#ifdef CONFIG_LINKER_USE_ONDEMAND_SECTION + z_paging_ondemand_section_map(); +#endif + #if __ASSERT_ON page_frames_initialized = true; #endif