-
Notifications
You must be signed in to change notification settings - Fork 2
/
sel4_mmap.c
146 lines (116 loc) · 3.08 KB
/
sel4_mmap.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023, 2024, Technology Innovation Institute
*
*/
#include <linux/mm.h>
#include <linux/version.h>
#include "sel4_virt_drv.h"
static int sel4_find_mem_index(struct vm_area_struct *vma)
{
return (vma->vm_pgoff) ? -1 : (int) vma->vm_pgoff;
}
static vm_fault_t sel4_handle_vma_fault(struct vm_fault *vmf)
{
struct sel4_mem_map *map = vmf->vma->vm_private_data;
struct page *page;
unsigned long offset;
void *paddr;
vm_fault_t rc = 0;
int index;
unsigned long irqflags;
BUG_ON(!map || !map->vmm || !map->vmm->vm);
irqflags = sel4_vm_lock(map->vmm->vm);
index = sel4_find_mem_index(vmf->vma);
if (index < 0) {
rc = VM_FAULT_SIGBUS;
goto out_unlock;
}
offset = (vmf->pgoff - index) << PAGE_SHIFT;
paddr = (void *)(unsigned long)map->paddr + offset;
if (map->type == SEL4_MEM_LOGICAL)
page = virt_to_page(paddr);
else
page = vmalloc_to_page(paddr);
get_page(page);
vmf->page = page;
out_unlock:
sel4_vm_unlock(map->vmm->vm, irqflags);
return rc;
}
static const struct vm_operations_struct sel4_mmap_logical_vm_ops = {
.fault = sel4_handle_vma_fault,
};
static inline void sel4_set_vm_flags(struct vm_area_struct *vma, vm_flags_t flags)
{
#if LINUX_VERSION_CODE <= KERNEL_VERSION(6,3,0)
vma->vm_flags |= flags;
#else
vm_flags_set(vma, flags);
#endif
}
static int sel4_mmap_logical(struct vm_area_struct *vma)
{
sel4_set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &sel4_mmap_logical_vm_ops;
return 0;
}
static const struct vm_operations_struct sel4_mmap_physical_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
#endif
};
static int sel4_mmap_physical(struct vm_area_struct *vma, struct sel4_mem_map *map)
{
if (!vma || !map)
return -EINVAL;
if (sel4_find_mem_index(vma))
return -EINVAL;
if (map->paddr & ~PAGE_MASK)
return -ENODEV;
if (vma->vm_end - vma->vm_start > map->size)
return -EINVAL;
vma->vm_ops = &sel4_mmap_physical_vm_ops;
return remap_pfn_range(vma,
vma->vm_start,
map->paddr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
int sel4_vm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct sel4_mem_map *map = filp->private_data;
unsigned long requested_pages, actual_pages;
int rc = 0;
unsigned long irqflags;
BUG_ON(!map || !map->vmm || !map->vmm->vm);
if (vma->vm_end < vma->vm_start)
return -EINVAL;
vma->vm_private_data = map;
irqflags = sel4_vm_lock(map->vmm->vm);
if (sel4_find_mem_index(vma) < 0) {
rc = -EINVAL;
goto out_unlock;
}
requested_pages = vma_pages(vma);
actual_pages = ((map->paddr & ~PAGE_MASK) +
map->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (requested_pages > actual_pages) {
rc = -EINVAL;
goto out_unlock;
}
switch (map->type) {
case SEL4_MEM_IOVA: /* shared memory with guest vmm */
rc = sel4_mmap_physical(vma, map);
break;
case SEL4_MEM_LOGICAL: /* kmalloc'd */
case SEL4_MEM_VIRTUAL: /* vmalloc'd */
rc = sel4_mmap_logical(vma);
break;
default:
rc = -EINVAL;
}
out_unlock:
sel4_vm_unlock(map->vmm->vm, irqflags);
return rc;
}