Skip to content

Commit b4cbe48

Browse files
andrew-mtklgirdwood
authored andcommitted
drivers: mtk: Add drivers for mt8365
Add interrupt, timer, and ipc drivers for mt8365 mt8365 DSP has 25 interrupts. Signed-off-by: Andrew Perepech <andrew.perepech@mediatek.com>
1 parent b482346 commit b4cbe48

7 files changed

Lines changed: 718 additions & 0 deletions

File tree

src/drivers/mediatek/CMakeLists.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,6 @@ endif()
1010
if(CONFIG_MT8196)
1111
add_subdirectory(mt8196)
1212
endif()
13+
if(CONFIG_MT8365)
14+
add_subdirectory(mt8365)
15+
endif()
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# SPDX-License-Identifier: BSD-3-Clause
2+
3+
add_local_sources(sof ipc.c timer.c interrupt.c)
4+
Lines changed: 225 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,225 @@
1+
// SPDX-License-Identifier: BSD-3-Clause
2+
/*
3+
* Copyright(c) 2024 MediaTek. All rights reserved.
4+
*
5+
* Author: Andrew Perepech <andrew.perepech@mediatek.com>
6+
*/
7+
8+
#include <errno.h>
9+
#include <inttypes.h>
10+
#include <rtos/bit.h>
11+
#include <sof/common.h>
12+
#include <rtos/interrupt.h>
13+
#include <sof/lib/cpu.h>
14+
#include <sof/lib/io.h>
15+
#include <sof/lib/memory.h>
16+
#include <sof/lib/uuid.h>
17+
#include <sof/list.h>
18+
#include <rtos/spinlock.h>
19+
#include <stdbool.h>
20+
#include <stddef.h>
21+
#include <stdint.h>
22+
23+
#define PENDING_IRQ_INDEX_MAX 10
24+
25+
SOF_DEFINE_REG_UUID(interrupt);
26+
27+
DECLARE_TR_CTX(int_tr, SOF_UUID(interrupt_uuid), LOG_LEVEL_INFO);
28+
29+
static void mtk_irq_init(void)
30+
{
31+
/* disable all ADSP IRQ */
32+
io_reg_write(RG_DSP_IRQ_EN, 0x0);
33+
}
34+
35+
static void mtk_irq_mask(struct irq_desc *desc, uint32_t irq, unsigned int core)
36+
{
37+
if (!desc) {
38+
io_reg_update_bits(RG_DSP_IRQ_EN, BIT(irq + IRQ_EXT_BIT_OFFSET), 0);
39+
} else {
40+
switch (desc->irq) {
41+
case IRQ_EXT_GROUP1_BASE:
42+
io_reg_update_bits(RG_DSP_IRQ_EN, BIT(irq + IRQ_EXT_GROUP1_BIT_OFFSET), 0);
43+
break;
44+
default:
45+
tr_err(&int_tr, "Invalid interrupt %d", irq);
46+
return;
47+
}
48+
}
49+
}
50+
51+
static void mtk_irq_unmask(struct irq_desc *desc, uint32_t irq, unsigned int core)
52+
{
53+
if (!desc) {
54+
io_reg_update_bits(RG_DSP_IRQ_EN, BIT(irq + IRQ_EXT_BIT_OFFSET),
55+
BIT(irq + IRQ_EXT_BIT_OFFSET));
56+
} else {
57+
switch (desc->irq) {
58+
case IRQ_EXT_GROUP1_BASE:
59+
io_reg_update_bits(RG_DSP_IRQ_EN, BIT(irq + IRQ_EXT_GROUP1_BIT_OFFSET),
60+
BIT(irq + IRQ_EXT_GROUP1_BIT_OFFSET));
61+
break;
62+
default:
63+
tr_err(&int_tr, "Invalid interrupt %d", irq);
64+
return;
65+
}
66+
}
67+
}
68+
69+
static uint32_t mtk_irq_group_pending_status(uint32_t irq)
70+
{
71+
uint32_t irq_status = 0;
72+
73+
if (irq == IRQ_EXT_GROUP1_BASE) {
74+
irq_status = io_reg_read(RG_DSP_IRQ_STATUS);
75+
irq_status &= IRQ_EXT_MASK;
76+
}
77+
78+
return irq_status;
79+
}
80+
81+
static uint32_t mtk_get_pending_index(uint32_t current, uint32_t *next)
82+
{
83+
uint32_t index;
84+
85+
if (current == 0)
86+
return PENDING_IRQ_INDEX_MAX;
87+
88+
/* ffs returns one plus the index of the least significant 1-bit of input int */
89+
index = ffs(current) - 1;
90+
91+
/* remove the handling index from current pending status */
92+
*next = current & ~(1ull << index);
93+
94+
return index;
95+
}
96+
97+
static inline void mtk_handle_group_pending_irq(struct irq_cascade_desc *cascade,
98+
uint32_t line_index, uint32_t status)
99+
{
100+
int core = cpu_get_id();
101+
struct list_item *clist;
102+
struct irq_desc *child = NULL;
103+
uint32_t idx;
104+
uint32_t next_status;
105+
bool handled;
106+
k_spinlock_key_t key;
107+
108+
idx = mtk_get_pending_index(status, &next_status);
109+
while (idx < PENDING_IRQ_INDEX_MAX) {
110+
handled = false;
111+
112+
key = k_spin_lock(&cascade->lock);
113+
list_for_item(clist, &cascade->child[idx - IRQ_EXT_GROUP1_BIT_OFFSET].list) {
114+
child = container_of(clist, struct irq_desc, irq_list);
115+
116+
if (child->handler && (child->cpu_mask & 1 << core)) {
117+
child->handler(child->handler_arg);
118+
handled = true;
119+
}
120+
}
121+
k_spin_unlock(&cascade->lock, key);
122+
123+
if (!handled) {
124+
tr_err(&int_tr, "Not handle irq %u in group %u",
125+
idx, line_index);
126+
}
127+
128+
idx = mtk_get_pending_index(next_status, &next_status);
129+
}
130+
}
131+
132+
static inline void mtk_irq_group_handler(void *data, uint32_t line_index)
133+
{
134+
struct irq_desc *parent = data;
135+
struct irq_cascade_desc *cascade =
136+
container_of(parent, struct irq_cascade_desc, desc);
137+
uint32_t status;
138+
139+
status = mtk_irq_group_pending_status(line_index);
140+
if (status)
141+
mtk_handle_group_pending_irq(cascade, line_index, status);
142+
else
143+
tr_err(&int_tr, "No pending irq in group %d", line_index);
144+
}
145+
146+
#define DEFINE_IRQ_HANDLER(n) \
147+
static void irqhandler_##n(void *arg) \
148+
{ \
149+
mtk_irq_group_handler(arg, n); \
150+
}
151+
152+
DEFINE_IRQ_HANDLER(1)
153+
154+
static const char mtk_ext_irq[] = "mtk_ext_irq";
155+
156+
static const struct irq_cascade_ops irq_ops = {
157+
.mask = mtk_irq_mask,
158+
.unmask = mtk_irq_unmask,
159+
};
160+
161+
static const struct irq_cascade_tmpl dsp_irq[] = {
162+
{
163+
.name = mtk_ext_irq,
164+
.irq = IRQ_EXT_GROUP1_BASE,
165+
.handler = irqhandler_1,
166+
.ops = &irq_ops,
167+
.global_mask = false,
168+
},
169+
};
170+
171+
int mtk_irq_group_id(uint32_t in_irq)
172+
{
173+
if (in_irq >= PLATFORM_IRQ_HW_NUM)
174+
in_irq -= PLATFORM_IRQ_HW_NUM;
175+
176+
return interrupt_get_irq(in_irq, dsp_irq[0].name);
177+
}
178+
179+
void platform_interrupt_init(void)
180+
{
181+
int i;
182+
183+
mtk_irq_init();
184+
for (i = 0; i < ARRAY_SIZE(dsp_irq); i++)
185+
interrupt_cascade_register(dsp_irq + i);
186+
}
187+
188+
void platform_interrupt_set(uint32_t irq)
189+
{
190+
if (interrupt_is_dsp_direct(irq))
191+
arch_interrupt_set(irq);
192+
}
193+
194+
void platform_interrupt_clear(uint32_t irq, uint32_t mask)
195+
{
196+
if (interrupt_is_dsp_direct(irq))
197+
arch_interrupt_clear(irq);
198+
}
199+
200+
uint32_t platform_interrupt_get_enabled(void)
201+
{
202+
return 0;
203+
}
204+
205+
void interrupt_mask(uint32_t irq, unsigned int cpu)
206+
{
207+
struct irq_cascade_desc *cascade = interrupt_get_parent(irq);
208+
209+
if (cascade && cascade->ops->mask)
210+
cascade->ops->mask(&cascade->desc, irq - cascade->irq_base,
211+
cpu);
212+
else
213+
mtk_irq_mask(NULL, irq, 0);
214+
}
215+
216+
void interrupt_unmask(uint32_t irq, unsigned int cpu)
217+
{
218+
struct irq_cascade_desc *cascade = interrupt_get_parent(irq);
219+
220+
if (cascade && cascade->ops->unmask)
221+
cascade->ops->unmask(&cascade->desc, irq - cascade->irq_base,
222+
cpu);
223+
else
224+
mtk_irq_unmask(NULL, irq, 0);
225+
}

0 commit comments

Comments
 (0)