summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/hardirq.h
blob: 87ad961f3c9753c30a33d1d8c8baac050d75612b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2012 ARM Ltd.
 */
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H

#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/threads.h>
#include <asm/barrier.h>
#include <asm/irq.h>
#include <asm/kvm_arm.h>
#include <asm/sysreg.h>

#define NR_IPI	7

typedef struct {
	unsigned int __softirq_pending;
	unsigned int ipi_irqs[NR_IPI];
} ____cacheline_aligned irq_cpustat_t;

#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */

#define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
#define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)

u64 smp_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu	smp_irq_stat_cpu

#define __ARCH_IRQ_EXIT_IRQS_DISABLED	1

struct nmi_ctx {
	u64 hcr;
};

DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);

#define arch_nmi_enter()							\
	do {									\
		if (is_kernel_in_hyp_mode()) {					\
			struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts);	\
			nmi_ctx->hcr = read_sysreg(hcr_el2);			\
			if (!(nmi_ctx->hcr & HCR_TGE)) {			\
				write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2);	\
				isb();						\
			}							\
		}								\
	} while (0)

#define arch_nmi_exit()								\
	do {									\
		if (is_kernel_in_hyp_mode()) {					\
			struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts);	\
			if (!(nmi_ctx->hcr & HCR_TGE))				\
				write_sysreg(nmi_ctx->hcr, hcr_el2);		\
		}								\
	} while (0)

static inline void ack_bad_irq(unsigned int irq)
{
	extern unsigned long irq_err_count;
	irq_err_count++;
}

#endif /* __ASM_HARDIRQ_H */