summaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/kasan.h
blob: 2768d5db181f89992bdf2f1f9249aa03ad7db4c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H

#include <asm/pgtable.h>

#ifdef CONFIG_KASAN

#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE						       \
	(_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_OFFSET	_AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_START	KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END	(KASAN_SHADOW_START + KASAN_SHADOW_SIZE)

extern void kasan_early_init(void);
extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);

/*
 * Estimate kasan memory requirements, which it will reserve
 * at the very end of available physical memory. To estimate
 * that, we take into account that kasan would require
 * 1/8 of available physical memory (for shadow memory) +
 * creating page tables for the whole memory + shadow memory
 * region (1 + 1/8). To keep page tables estimates simple take
 * the double of combined ptes size.
 *
 * physmem parameter has to be already adjusted if not entire physical memory
 * would be used (e.g. due to effect of "mem=" option).
 */
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
{
	unsigned long kasan_needs;
	unsigned long pages;
	/* for shadow memory */
	kasan_needs = round_up(physmem / 8, PAGE_SIZE);
	/* for paging structures */
	pages = DIV_ROUND_UP(physmem + kasan_needs, PAGE_SIZE);
	kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;

	return kasan_needs;
}
#else
static inline void kasan_early_init(void) { }
static inline void kasan_copy_shadow_mapping(void) { }
static inline void kasan_free_early_identity(void) { }
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif

#endif