summaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/vdso.c
blob: 1533af24106ea4653faa97588e73935999f74495 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
/*
 * Copyright 2012 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/pagemap.h>

#include <asm/vdso.h>
#include <asm/mman.h>
#include <asm/sections.h>

#include <arch/sim.h>

/* The alignment of the vDSO. */
#define VDSO_ALIGNMENT  PAGE_SIZE


static unsigned int vdso_pages;
static struct page **vdso_pagelist;

#ifdef CONFIG_COMPAT
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
static int vdso_ready;

/*
 * The vdso data page.
 */
static union {
	struct vdso_data	data;
	u8			page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;

struct vdso_data *vdso_data = &vdso_data_store.data;

static unsigned int __read_mostly vdso_enabled = 1;

static struct page **vdso_setup(void *vdso_kbase, unsigned int pages)
{
	int i;
	struct page **pagelist;

	pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL);
	BUG_ON(pagelist == NULL);
	for (i = 0; i < pages - 1; i++) {
		struct page *pg = virt_to_page(vdso_kbase + i*PAGE_SIZE);
		ClearPageReserved(pg);
		pagelist[i] = pg;
	}
	pagelist[pages - 1] = virt_to_page(vdso_data);
	pagelist[pages] = NULL;

	return pagelist;
}

static int __init vdso_init(void)
{
	int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT;

	/*
	 * We can disable vDSO support generally, but we need to retain
	 * one page to support the two-bundle (16-byte) rt_sigreturn path.
	 */
	if (!vdso_enabled) {
		size_t offset = (unsigned long)&__vdso_rt_sigreturn;
		static struct page *sigret_page;
		sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
		BUG_ON(sigret_page == NULL);
		vdso_pagelist = &sigret_page;
		vdso_pages = 1;
		BUG_ON(offset >= PAGE_SIZE);
		memcpy(page_address(sigret_page) + offset,
		       vdso_start + offset, 16);
#ifdef CONFIG_COMPAT
		vdso32_pages = vdso_pages;
		vdso32_pagelist = vdso_pagelist;
#endif
		vdso_ready = 1;
		return 0;
	}

	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
	vdso_pages += data_pages;
	vdso_pagelist = vdso_setup(vdso_start, vdso_pages);

#ifdef CONFIG_COMPAT
	vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT;
	vdso32_pages += data_pages;
	vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages);
#endif

	smp_wmb();
	vdso_ready = 1;

	return 0;
}
arch_initcall(vdso_init);

const char *arch_vma_name(struct vm_area_struct *vma)
{
	if (vma->vm_mm && vma->vm_start == VDSO_BASE)
		return "[vdso]";
#ifndef __tilegx__
	if (vma->vm_start == MEM_USER_INTRPT)
		return "[intrpt]";
#endif
	return NULL;
}

struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
	return NULL;
}

int in_gate_area(struct mm_struct *mm, unsigned long address)
{
	return 0;
}

int in_gate_area_no_mm(unsigned long address)
{
	return 0;
}

int setup_vdso_pages(void)
{
	struct page **pagelist;
	unsigned long pages;
	struct mm_struct *mm = current->mm;
	unsigned long vdso_base = 0;
	int retval = 0;

	if (!vdso_ready)
		return 0;

	mm->context.vdso_base = 0;

	pagelist = vdso_pagelist;
	pages = vdso_pages;
#ifdef CONFIG_COMPAT
	if (is_compat_task()) {
		pagelist = vdso32_pagelist;
		pages = vdso32_pages;
	}
#endif

	/*
	 * vDSO has a problem and was disabled, just don't "enable" it for the
	 * process.
	 */
	if (pages == 0)
		return 0;

	vdso_base = get_unmapped_area(NULL, vdso_base,
				      (pages << PAGE_SHIFT) +
				      ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
				      0, 0);
	if (IS_ERR_VALUE(vdso_base)) {
		retval = vdso_base;
		return retval;
	}

	/* Add required alignment. */
	vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);

	/*
	 * Put vDSO base into mm struct. We need to do this before calling
	 * install_special_mapping or the perf counter mmap tracking code
	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
	 */
	mm->context.vdso_base = vdso_base;

	/*
	 * our vma flags don't have VM_WRITE so by default, the process isn't
	 * allowed to write those pages.
	 * gdb can break that with ptrace interface, and thus trigger COW on
	 * those pages but it's then your responsibility to never do that on
	 * the "data" page of the vDSO or you'll stop getting kernel updates
	 * and your nice userland gettimeofday will be totally dead.
	 * It's fine to use that for setting breakpoints in the vDSO code
	 * pages though
	 */
	retval = install_special_mapping(mm, vdso_base,
					 pages << PAGE_SHIFT,
					 VM_READ|VM_EXEC |
					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
					 pagelist);
	if (retval)
		mm->context.vdso_base = 0;

	return retval;
}

static __init int vdso_func(char *s)
{
	return kstrtouint(s, 0, &vdso_enabled);
}
__setup("vdso=", vdso_func);