summaryrefslogtreecommitdiffstats
path: root/arch/csky/kernel/vdso.c
blob: abc3dbc658d4979aa9260561a73b60225796561a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/unistd.h>
#include <linux/uaccess.h>

#include <asm/vdso.h>
#include <asm/cacheflush.h>

static struct page *vdso_page;

static int __init init_vdso(void)
{
	struct csky_vdso *vdso;
	int err = 0;

	vdso_page = alloc_page(GFP_KERNEL);
	if (!vdso_page)
		panic("Cannot allocate vdso");

	vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
	if (!vdso)
		panic("Cannot map vdso");

	clear_page(vdso);

	err = setup_vdso_page(vdso->rt_signal_retcode);
	if (err)
		panic("Cannot set signal return code, err: %x.", err);

	dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16);

	vunmap(vdso);

	return 0;
}
subsys_initcall(init_vdso);

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	int ret;
	unsigned long addr;
	struct mm_struct *mm = current->mm;

	mmap_write_lock(mm);

	addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
	if (IS_ERR_VALUE(addr)) {
		ret = addr;
		goto up_fail;
	}

	ret = install_special_mapping(
			mm,
			addr,
			PAGE_SIZE,
			VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
			&vdso_page);
	if (ret)
		goto up_fail;

	mm->context.vdso = (void *)addr;

up_fail:
	mmap_write_unlock(mm);
	return ret;
}

const char *arch_vma_name(struct vm_area_struct *vma)
{
	if (vma->vm_mm == NULL)
		return NULL;

	if (vma->vm_start == (long)vma->vm_mm->context.vdso)
		return "[vdso]";
	else
		return NULL;
}