1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 ARM Ltd.
*/
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
#include <asm/sysreg.h>
void mte_sync_tags(pte_t *ptep, pte_t pte)
{
struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page);
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
mte_clear_page_tags(page_address(page));
}
}
int memcmp_pages(struct page *page1, struct page *page2)
{
char *addr1, *addr2;
int ret;
addr1 = page_address(page1);
addr2 = page_address(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
if (!system_supports_mte() || ret)
return ret;
/*
* If the page content is identical but at least one of the pages is
* tagged, return non-zero to avoid KSM merging. If only one of the
* pages is tagged, set_pte_at() may zero or change the tags of the
* other page via mte_sync_tags().
*/
if (test_bit(PG_mte_tagged, &page1->flags) ||
test_bit(PG_mte_tagged, &page2->flags))
return addr1 != addr2;
return ret;
}
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
isb();
}
static void set_sctlr_el1_tcf0(u64 tcf0)
{
/*
* mte_thread_switch() checks current->thread.sctlr_tcf0 as an
* optimisation. Disable preemption so that it does not see
* the variable update before the SCTLR_EL1.TCF0 one.
*/
preempt_disable();
current->thread.sctlr_tcf0 = tcf0;
update_sctlr_el1_tcf0(tcf0);
preempt_enable();
}
void flush_mte_state(void)
{
if (!system_supports_mte())
return;
/* clear any pending asynchronous tag fault */
dsb(ish);
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
}
void mte_thread_switch(struct task_struct *next)
{
if (!system_supports_mte())
return;
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
}
long set_mte_ctrl(unsigned long arg)
{
u64 tcf0;
if (!system_supports_mte())
return 0;
switch (arg & PR_MTE_TCF_MASK) {
case PR_MTE_TCF_NONE:
tcf0 = SCTLR_EL1_TCF0_NONE;
break;
case PR_MTE_TCF_SYNC:
tcf0 = SCTLR_EL1_TCF0_SYNC;
break;
case PR_MTE_TCF_ASYNC:
tcf0 = SCTLR_EL1_TCF0_ASYNC;
break;
default:
return -EINVAL;
}
set_sctlr_el1_tcf0(tcf0);
return 0;
}
long get_mte_ctrl(void)
{
if (!system_supports_mte())
return 0;
switch (current->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
return PR_MTE_TCF_NONE;
case SCTLR_EL1_TCF0_SYNC:
return PR_MTE_TCF_SYNC;
case SCTLR_EL1_TCF0_ASYNC:
return PR_MTE_TCF_ASYNC;
}
return 0;
}
|