1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
|
/*
* Copyright (C) 2015 Imagination Technologies
* Author: Alex Smith <alex.smith@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
#include <asm/abi.h>
#include <asm/mips-cps.h>
#include <asm/page.h>
#include <asm/vdso.h>
/* Kernel-provided data used by the VDSO. */
static union mips_vdso_data vdso_data __page_aligned_data;
/*
* Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
* what we map and where within the area they are mapped is determined at
* runtime.
*/
static struct page *no_pages[] = { NULL };
static struct vm_special_mapping vdso_vvar_mapping = {
.name = "[vvar]",
.pages = no_pages,
};
static void __init init_vdso_image(struct mips_vdso_image *image)
{
unsigned long num_pages, i;
unsigned long data_pfn;
BUG_ON(!PAGE_ALIGNED(image->data));
BUG_ON(!PAGE_ALIGNED(image->size));
num_pages = image->size / PAGE_SIZE;
data_pfn = __phys_to_pfn(__pa_symbol(image->data));
for (i = 0; i < num_pages; i++)
image->mapping.pages[i] = pfn_to_page(data_pfn + i);
}
static int __init init_vdso(void)
{
init_vdso_image(&vdso_image);
#ifdef CONFIG_MIPS32_O32
init_vdso_image(&vdso_image_o32);
#endif
#ifdef CONFIG_MIPS32_N32
init_vdso_image(&vdso_image_n32);
#endif
return 0;
}
subsys_initcall(init_vdso);
void update_vsyscall(struct timekeeper *tk)
{
vdso_data_write_begin(&vdso_data);
vdso_data.xtime_sec = tk->xtime_sec;
vdso_data.xtime_nsec = tk->tkr_mono.xtime_nsec;
vdso_data.wall_to_mono_sec = tk->wall_to_monotonic.tv_sec;
vdso_data.wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec;
vdso_data.cs_shift = tk->tkr_mono.shift;
vdso_data.clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode;
if (vdso_data.clock_mode != VDSO_CLOCK_NONE) {
vdso_data.cs_mult = tk->tkr_mono.mult;
vdso_data.cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data.cs_mask = tk->tkr_mono.mask;
}
vdso_data_write_end(&vdso_data);
}
void update_vsyscall_tz(void)
{
if (vdso_data.clock_mode != VDSO_CLOCK_NONE) {
vdso_data.tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data.tz_dsttime = sys_tz.tz_dsttime;
}
}
static unsigned long vdso_base(void)
{
unsigned long base;
/* Skip the delay slot emulation page */
base = STACK_TOP + PAGE_SIZE;
if (current->flags & PF_RANDOMIZE) {
base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
base = PAGE_ALIGN(base);
}
return base;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn;
struct vm_area_struct *vma;
int ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
/* Map delay slot emulation page */
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
0, NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
/*
* Determine total area size. This includes the VDSO data itself, the
* data page, and the GIC user page if present. Always create a mapping
* for the GIC user area if the GIC is present regardless of whether it
* is the current clocksource, in case it comes into use later on. We
* only map a page even though the total area is 64K, as we only need
* the counter registers at the start.
*/
gic_size = mips_gic_present() ? PAGE_SIZE : 0;
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;
/*
* Find a region that's large enough for us to perform the
* colour-matching alignment below.
*/
if (cpu_has_dc_aliases)
size += shm_align_mask + 1;
base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
/*
* If we suffer from dcache aliasing, ensure that the VDSO data page
* mapping is coloured the same as the kernel's mapping of that memory.
* This ensures that when the kernel updates the VDSO data userland
* will observe it without requiring cache invalidations.
*/
if (cpu_has_dc_aliases) {
base = __ALIGN_MASK(base, shm_align_mask);
base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
}
data_addr = base + gic_size;
vdso_addr = data_addr + PAGE_SIZE;
vma = _install_special_mapping(mm, base, vvar_size,
VM_READ | VM_MAYREAD,
&vdso_vvar_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
/* Map GIC user page. */
if (gic_size) {
gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT;
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(PAGE_READONLY));
if (ret)
goto out;
}
/* Map data page. */
ret = remap_pfn_range(vma, data_addr,
virt_to_phys(&vdso_data) >> PAGE_SHIFT,
PAGE_SIZE, PAGE_READONLY);
if (ret)
goto out;
/* Map VDSO image. */
vma = _install_special_mapping(mm, vdso_addr, image->size,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&image->mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
mm->context.vdso = (void *)vdso_addr;
ret = 0;
out:
up_write(&mm->mmap_sem);
return ret;
}
|