Skip to content

Commit

Permalink
From patchwork series 435798
Browse files Browse the repository at this point in the history
  • Loading branch information
Fox Snowpatch committed Dec 9, 2024
1 parent 1ecdccb commit 040d11c
Show file tree
Hide file tree
Showing 24 changed files with 472 additions and 81 deletions.
1 change: 1 addition & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ config ARM64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CC_PLATFORM
select ARCH_HAS_COPY_MC if ACPI_APEI_GHES
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
Expand Down
31 changes: 26 additions & 5 deletions arch/arm64/include/asm/asm-extable.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
#include <linux/bits.h>
#include <asm/gpr-num.h>

#define EX_TYPE_NONE 0
#define EX_TYPE_BPF 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_KACCESS_ERR_ZERO 3
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
#define EX_TYPE_NONE 0
#define EX_TYPE_BPF 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_KACCESS_ERR_ZERO 3
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
/* kernel access memory error safe */
#define EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR 5

/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
#define EX_DATA_REG_ERR_SHIFT 0
Expand Down Expand Up @@ -51,6 +53,17 @@
#define _ASM_EXTABLE_UACCESS(insn, fixup) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)

#define _ASM_EXTABLE_KACCESS_ERR_ZERO_MEM_ERR(insn, fixup, err, zero) \
__ASM_EXTABLE_RAW(insn, fixup, \
EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR, \
( \
EX_DATA_REG(ERR, err) | \
EX_DATA_REG(ZERO, zero) \
))

#define _ASM_EXTABLE_KACCESS_MEM_ERR(insn, fixup) \
_ASM_EXTABLE_KACCESS_ERR_ZERO_MEM_ERR(insn, fixup, wzr, wzr)

/*
* Create an exception table entry for uaccess `insn`, which will branch to `fixup`
* when an unhandled fault is taken.
Expand All @@ -69,6 +82,14 @@
.endif
.endm

/*
* Create an exception table entry for kaccess `insn`, which will branch to
* `fixup` when an unhandled fault is taken.
*/
.macro _asm_extable_kaccess_mem_err, insn, fixup
_ASM_EXTABLE_KACCESS_MEM_ERR(\insn, \fixup)
.endm

#else /* __ASSEMBLY__ */

#include <linux/stringify.h>
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/asm-uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ alternative_else_nop_endif
.endm
#endif

#define KERNEL_MEM_ERR(l, x...) \
9999: x; \
_asm_extable_kaccess_mem_err 9999b, l

#define USER(l, x...) \
9999: x; \
_asm_extable_uaccess 9999b, l
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/extable.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
#endif /* !CONFIG_BPF_JIT */

bool fixup_exception(struct pt_regs *regs);
bool fixup_exception_me(struct pt_regs *regs);
#endif
9 changes: 9 additions & 0 deletions arch/arm64/include/asm/mte.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,11 @@ static inline bool try_page_mte_tagging(struct page *page)
void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t pte, unsigned int nr_pages);
void mte_copy_page_tags(void *kto, const void *kfrom);

#ifdef CONFIG_ARCH_HAS_COPY_MC
int mte_copy_mc_page_tags(void *kto, const void *kfrom);
#endif

void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
void mte_cpu_setup(void);
Expand Down Expand Up @@ -134,6 +139,10 @@ static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages)
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
{
}
static inline int mte_copy_mc_page_tags(void *kto, const void *kfrom)
{
return 0;
}
static inline void mte_thread_init_user(void)
{
}
Expand Down
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,16 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE

#ifdef CONFIG_ARCH_HAS_COPY_MC
int copy_mc_page(void *to, const void *from);
int copy_mc_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_MC_HIGHPAGE

int copy_mc_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_MC_USER_HIGHPAGE
#endif

struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr);
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/include/asm/string.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ extern void *memchr(const void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);

#define __HAVE_ARCH_MEMCPY_MC
extern int memcpy_mc(void *, const void *, __kernel_size_t);
extern int __memcpy_mc(void *, const void *, __kernel_size_t);

#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);
Expand All @@ -57,6 +61,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt);
*/

#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memcpy_mc(dst, src, len) __memcpy_mc(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)

Expand Down
18 changes: 18 additions & 0 deletions arch/arm64/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -542,4 +542,22 @@ static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,

#endif /* CONFIG_ARM64_GCS */

#ifdef CONFIG_ARCH_HAS_COPY_MC
/**
* copy_mc_to_kernel - memory copy that handles source exceptions
*
* @to: destination address
* @from: source address
* @size: number of bytes to copy
*
* Return 0 for success, or bytes not copied.
*/
static inline unsigned long __must_check
copy_mc_to_kernel(void *to, const void *from, unsigned long size)
{
return memcpy_mc(to, from, size);
}
#define copy_mc_to_kernel copy_mc_to_kernel
#endif

#endif /* __ASM_UACCESS_H */
2 changes: 2 additions & 0 deletions arch/arm64/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ endif

lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o

lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o memcpy_mc.o

obj-$(CONFIG_CRC32) += crc32.o crc32-glue.o

obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
Expand Down
37 changes: 37 additions & 0 deletions arch/arm64/lib/copy_mc_page.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0-only */

#include <linux/linkage.h>
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/asm-extable.h>
#include <asm/asm-uaccess.h>

/*
* Copy a page from src to dest (both are page aligned) with memory error safe
*
* Parameters:
* x0 - dest
* x1 - src
* Returns:
* x0 - Return 0 if copy success, or -EFAULT if anything goes wrong
* while copying.
*/
.macro ldp1 reg1, reg2, ptr, val
KERNEL_MEM_ERR(9998f, ldp \reg1, \reg2, [\ptr, \val])
.endm

SYM_FUNC_START(__pi_copy_mc_page)
#include "copy_page_template.S"

mov x0, #0
ret

9998: mov x0, #-EFAULT
ret

SYM_FUNC_END(__pi_copy_mc_page)
SYM_FUNC_ALIAS(copy_mc_page, __pi_copy_mc_page)
EXPORT_SYMBOL(copy_mc_page)
62 changes: 5 additions & 57 deletions arch/arm64/lib/copy_page.S
Original file line number Diff line number Diff line change
Expand Up @@ -17,65 +17,13 @@
* x0 - dest
* x1 - src
*/
SYM_FUNC_START(__pi_copy_page)
#ifdef CONFIG_AS_HAS_MOPS
.arch_extension mops
alternative_if_not ARM64_HAS_MOPS
b .Lno_mops
alternative_else_nop_endif

mov x2, #PAGE_SIZE
cpypwn [x0]!, [x1]!, x2!
cpymwn [x0]!, [x1]!, x2!
cpyewn [x0]!, [x1]!, x2!
ret
.Lno_mops:
#endif
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
ldp x8, x9, [x1, #48]
ldp x10, x11, [x1, #64]
ldp x12, x13, [x1, #80]
ldp x14, x15, [x1, #96]
ldp x16, x17, [x1, #112]

add x0, x0, #256
add x1, x1, #128
1:
tst x0, #(PAGE_SIZE - 1)

stnp x2, x3, [x0, #-256]
ldp x2, x3, [x1]
stnp x4, x5, [x0, #16 - 256]
ldp x4, x5, [x1, #16]
stnp x6, x7, [x0, #32 - 256]
ldp x6, x7, [x1, #32]
stnp x8, x9, [x0, #48 - 256]
ldp x8, x9, [x1, #48]
stnp x10, x11, [x0, #64 - 256]
ldp x10, x11, [x1, #64]
stnp x12, x13, [x0, #80 - 256]
ldp x12, x13, [x1, #80]
stnp x14, x15, [x0, #96 - 256]
ldp x14, x15, [x1, #96]
stnp x16, x17, [x0, #112 - 256]
ldp x16, x17, [x1, #112]

add x0, x0, #128
add x1, x1, #128

b.ne 1b

stnp x2, x3, [x0, #-256]
stnp x4, x5, [x0, #16 - 256]
stnp x6, x7, [x0, #32 - 256]
stnp x8, x9, [x0, #48 - 256]
stnp x10, x11, [x0, #64 - 256]
stnp x12, x13, [x0, #80 - 256]
stnp x14, x15, [x0, #96 - 256]
stnp x16, x17, [x0, #112 - 256]
.macro ldp1 reg1, reg2, ptr, val
ldp \reg1, \reg2, [\ptr, \val]
.endm

SYM_FUNC_START(__pi_copy_page)
#include "copy_page_template.S"
ret
SYM_FUNC_END(__pi_copy_page)
SYM_FUNC_ALIAS(copy_page, __pi_copy_page)
Expand Down
70 changes: 70 additions & 0 deletions arch/arm64/lib/copy_page_template.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Ltd.
*/

/*
* Copy a page from src to dest (both are page aligned)
*
* Parameters:
* x0 - dest
* x1 - src
*/

#ifdef CONFIG_AS_HAS_MOPS
.arch_extension mops
alternative_if_not ARM64_HAS_MOPS
b .Lno_mops
alternative_else_nop_endif

mov x2, #PAGE_SIZE
cpypwn [x0]!, [x1]!, x2!
cpymwn [x0]!, [x1]!, x2!
cpyewn [x0]!, [x1]!, x2!
ret
.Lno_mops:
#endif
ldp1 x2, x3, x1, #0
ldp1 x4, x5, x1, #16
ldp1 x6, x7, x1, #32
ldp1 x8, x9, x1, #48
ldp1 x10, x11, x1, #64
ldp1 x12, x13, x1, #80
ldp1 x14, x15, x1, #96
ldp1 x16, x17, x1, #112

add x0, x0, #256
add x1, x1, #128
1:
tst x0, #(PAGE_SIZE - 1)

stnp x2, x3, [x0, #-256]
ldp1 x2, x3, x1, #0
stnp x4, x5, [x0, #16 - 256]
ldp1 x4, x5, x1, #16
stnp x6, x7, [x0, #32 - 256]
ldp1 x6, x7, x1, #32
stnp x8, x9, [x0, #48 - 256]
ldp1 x8, x9, x1, #48
stnp x10, x11, [x0, #64 - 256]
ldp1 x10, x11, x1, #64
stnp x12, x13, [x0, #80 - 256]
ldp1 x12, x13, x1, #80
stnp x14, x15, [x0, #96 - 256]
ldp1 x14, x15, x1, #96
stnp x16, x17, [x0, #112 - 256]
ldp1 x16, x17, x1, #112

add x0, x0, #128
add x1, x1, #128

b.ne 1b

stnp x2, x3, [x0, #-256]
stnp x4, x5, [x0, #16 - 256]
stnp x6, x7, [x0, #32 - 256]
stnp x8, x9, [x0, #48 - 256]
stnp x10, x11, [x0, #64 - 256]
stnp x12, x13, [x0, #80 - 256]
stnp x14, x15, [x0, #96 - 256]
stnp x16, x17, [x0, #112 - 256]
10 changes: 5 additions & 5 deletions arch/arm64/lib/copy_to_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -20,31 +20,31 @@
* x0 - bytes not copied
*/
.macro ldrb1 reg, ptr, val
ldrb \reg, [\ptr], \val
KERNEL_MEM_ERR(9998f, ldrb \reg, [\ptr], \val)
.endm

.macro strb1 reg, ptr, val
user_ldst 9998f, sttrb, \reg, \ptr, \val
.endm

.macro ldrh1 reg, ptr, val
ldrh \reg, [\ptr], \val
KERNEL_MEM_ERR(9998f, ldrh \reg, [\ptr], \val)
.endm

.macro strh1 reg, ptr, val
user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm

.macro ldr1 reg, ptr, val
ldr \reg, [\ptr], \val
KERNEL_MEM_ERR(9998f, ldr \reg, [\ptr], \val)
.endm

.macro str1 reg, ptr, val
user_ldst 9997f, sttr, \reg, \ptr, \val
.endm

.macro ldp1 reg1, reg2, ptr, val
ldp \reg1, \reg2, [\ptr], \val
KERNEL_MEM_ERR(9998f, ldp \reg1, \reg2, [\ptr], \val)
.endm

.macro stp1 reg1, reg2, ptr, val
Expand All @@ -64,7 +64,7 @@ SYM_FUNC_START(__arch_copy_to_user)
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
ldrb tmp1w, [srcin]
KERNEL_MEM_ERR(9998f, ldrb tmp1w, [srcin])
USER(9998f, sttrb tmp1w, [dst])
add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
Expand Down
Loading

0 comments on commit 040d11c

Please sign in to comment.