1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
return 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
{
int id;
struct fwnode_handle *domain_handle = NULL;
switch (gsi) {
case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
if (liointc_handle)
domain_handle = liointc_handle;
break;
case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
if (pch_lpc_handle)
domain_handle = pch_lpc_handle;
break;
case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
id = find_pch_pic(gsi);
if (id >= 0 && pch_pic_handle[id])
domain_handle = pch_pic_handle[id];
break;
}
return domain_handle;
}
static void mask_loongarch_irq(struct irq_data *d)
{
clear_csr_ecfg(ECFGF(d->hwirq));
}
static void unmask_loongarch_irq(struct irq_data *d)
{
set_csr_ecfg(ECFGF(d->hwirq));
}
static struct irq_chip cpu_irq_controller = {
.name = "CPUINTC",
.irq_mask = mask_loongarch_irq,
.irq_unmask = unmask_loongarch_irq,
};
static void handle_cpu_irq(struct pt_regs *regs)
{
int hwirq;
unsigned int estat = read_csr_estat() & CSR_ESTAT_IS;
while ((hwirq = ffs(estat))) {
estat &= ~BIT(hwirq - 1);
generic_handle_domain_irq(irq_domain, hwirq - 1);
}
}
static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_noprobe(irq);
irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq);
return 0;
}
static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
.map = loongarch_cpu_intc_map,
.xlate = irq_domain_xlate_onecell,
};
static int __init
liointc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
return liointc_acpi_init(irq_domain, liointc_entry);
}
static int __init
eiointc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
return eiointc_acpi_init(irq_domain, eiointc_entry);
}
static int __init acpi_cascade_irqdomain_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
liointc_parse_madt, 0);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
eiointc_parse_madt, 0);
return 0;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
if (irq_domain)
return 0;
/* Mask interrupts. */
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
cpuintc_handle = irq_domain_alloc_fwnode(NULL);
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
if (!irq_domain)
panic("Failed to add irqdomain for LoongArch CPU");
set_handle_irq(&handle_cpu_irq);
acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
acpi_cascade_irqdomain_init();
return 0;
}
IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init);
|