Skip to content

Commit 11bb0aa

Browse files
author
Daniel Rossier
committed
Alignment with the MICOFE project to make capsules running on RPi4
1 parent ceac389 commit 11bb0aa

27 files changed

Lines changed: 205 additions & 320 deletions

File tree

so3/arch/arm64/Kconfig

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,6 @@ if ARCH_ARM64
33

44
menu "Platform"
55

6-
config ARM64VT
7-
depends on AVZ
8-
bool "Virtualization support (ARM64 VT)"
9-
help
10-
Enabling support of CPU virtualization extensions.
11-
126
choice
137
prompt "Target"
148

so3/arch/arm64/Makefile

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
ifneq ($(CONFIG_ARM64VT),y)
2+
ifneq ($(CONFIG_AVZ),y)
33
obj-y += hyp-stub.o
44
endif
55

@@ -10,16 +10,14 @@ obj-y += backtrace.o backtrace_asm.o
1010
obj-y += cache_v8.o cache.o context.o
1111
obj-y += semihosting.o semicall.o
1212

13-
obj-$(CONFIG_AVZ) += domain.o mmio.o
13+
obj-$(CONFIG_AVZ) += domain.o mmio.o #smmu.o
1414

1515
obj-y += smccc-call.o
1616

1717
obj-y += thread.o
1818

1919
obj-$(CONFIG_MMU) += mmu.o
2020

21-
obj-$(CONFIG_ARM64VT) += #smmu.o
22-
2321
obj-y += lib/
2422

2523
obj-y += $(TARGET)/

so3/arch/arm64/cache_v8.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ void mmu_setup(void *pgtable)
101101
invalidate_dcache_all();
102102
__asm_invalidate_tlb_all();
103103

104-
#endif /* !CONFIG_ARM64VT */
104+
#endif /* !CONFIG_AVZ */
105105
}
106106

107107
/*

so3/arch/arm64/domain.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
#include <asm/mmu.h>
2727
#include <asm/processor.h>
2828

29-
#ifdef CONFIG_ARM64VT
29+
#ifdef CONFIG_AVZ
3030
#include <mach/ipamap.h>
3131
#endif
3232

@@ -123,7 +123,7 @@ void __setup_dom_pgtable(struct domain *d, addr_t paddr_start, unsigned long map
123123
/* Map the shared page in the IPA space; the shared page is located right after the domain area
124124
* in the IPA space, and if any, the RT shared page follows the shared page (in IPA space).
125125
*/
126-
__create_mapping(new_pt, memslot[slotID].ipa_addr + map_size, __pa(d->avz_shared), PAGE_SIZE, true, S2);
126+
__create_mapping(new_pt, memslot[slotID].ipa_addr + map_size, __pa(d->avz_shared), PAGE_SIZE, false, S2);
127127

128128
#ifdef CONFIG_SOO
129129
/* Initialize the grant pfn (ipa address) area */

so3/arch/arm64/include/asm/arm_timer.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
#include <asm/processor.h>
2525

2626
/**
27-
* In AVZ and ARM64VT we are using the ARM physical timer. The guest domains will
27+
* In AVZ we are using the ARM physical timer. The guest domains will
2828
* rely on virtual timer where an offset can be added.
2929
*/
3030

@@ -53,7 +53,7 @@
5353
* nicely work out which register we want, and chuck away the rest of
5454
* the code. At least it does so with a recent GCC (4.6.3).
5555
*/
56-
#ifdef CONFIG_ARM64VT
56+
#ifdef CONFIG_AVZ
5757

5858
static inline void arch_timer_reg_write_el2(enum arch_timer_reg reg, u32 val)
5959
{
@@ -161,7 +161,7 @@ static inline u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
161161
return 0;
162162
}
163163

164-
#endif /* CONFIG_ARM64VT */
164+
#endif /* CONFIG_AVZ */
165165

166166
/**
167167
* Get the timer frequency
@@ -183,7 +183,7 @@ static inline u64 arch_counter_get_cntvct(void)
183183
u64 cnt;
184184

185185
isb();
186-
#ifdef CONFIG_ARM64VT
186+
#ifdef CONFIG_AVZ
187187
cnt = read_sysreg(cntpct_el0);
188188
#else
189189
cnt = read_sysreg(cntvct_el0);

so3/arch/arm64/include/asm/mmu.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ typedef enum { S1, S2 } mmu_stage_t;
523523
#define VA2PA_WR "w"
524524
#define va2pa_at(stage, el, rw, va) asm volatile("at " stage el rw ", %0" : : "r"(va) : "memory", "cc");
525525

526-
#ifdef CONFIG_ARM64VT
526+
#ifdef CONFIG_AVZ
527527

528528
typedef struct {
529529
addr_t ipa_addr;
@@ -556,7 +556,7 @@ static inline void set_pte_page_S2(u64 *pte, enum dcache_option option)
556556
*pte |= S2_PTE_FLAG_NORMAL;
557557
}
558558

559-
#endif /* CONFIG_ARM64_VT */
559+
#endif /* CONFIG_AVZ */
560560

561561
static inline void set_pte_table(u64 *pte, enum dcache_option option)
562562
{
@@ -620,7 +620,7 @@ static inline bool user_space_vaddr(addr_t addr)
620620
return true;
621621
}
622622

623-
#ifdef CONFIG_ARM64VT
623+
#ifdef CONFIG_AVZ
624624

625625
static inline unsigned int get_sctlr(void)
626626
{
@@ -637,7 +637,7 @@ static inline void set_sctlr(unsigned int val)
637637
asm volatile("isb");
638638
}
639639

640-
#else
640+
#else /* CONFIG_AVZ */
641641

642642
static inline unsigned int get_sctlr(void)
643643
{
@@ -654,7 +654,7 @@ static inline void set_sctlr(unsigned int val)
654654
asm volatile("isb");
655655
}
656656

657-
#endif
657+
#endif /* !CONFIG_AVZ */
658658

659659
extern addr_t __sys_root_pgtable[], __sys_idmap_l1pgtable[], __sys_linearmap_l1pgtable[], __sys_linearmap_l2pgtable[];
660660

@@ -678,7 +678,7 @@ extern void __mmu_switch_vttbr(void *root_pgtable_phys);
678678

679679
void __mmu_setup(void *pgtable);
680680

681-
#ifdef CONFIG_ARM64VT
681+
#ifdef CONFIG_AVZ
682682
void do_ipamap(void *pgtable, ipamap_t ipamap[], int nbelement);
683683
#endif
684684

so3/arch/arm64/include/asm/processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@
648648
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
649649
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
650650

651-
#ifdef CONFIG_ARM64VT
651+
#ifdef CONFIG_AVZ
652652
#define SCTLR_EL2_RES1 (SCTLR_UCI_BIT | SCTLR_nTWE | SCTLR_nTWI \
653653
| SCTLR_UCT_BIT | SCTLR_DZE_BIT)
654654
#else

so3/arch/arm64/mmu.c

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ static void alloc_init_l3(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
8888
/* Attach the L2 PTE to this L3 page table */
8989
*l2pte = __pa((addr_t) l3pgtable) & TTB_L2_TABLE_ADDR_MASK;
9090

91-
#ifdef CONFIG_ARM64VT
91+
#ifdef CONFIG_AVZ
9292
if (stage == S1)
9393
set_pte_table(l2pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
9494
else
@@ -105,7 +105,7 @@ static void alloc_init_l3(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
105105

106106
*l3pte = phys & TTB_L3_PAGE_ADDR_MASK;
107107

108-
#ifdef CONFIG_ARM64VT
108+
#ifdef CONFIG_AVZ
109109
if (stage == S1)
110110
set_pte_page(l3pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
111111
else
@@ -161,7 +161,7 @@ static void alloc_init_l2(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
161161
/* Attach the L1 PTE to this L2 page table */
162162
*l1pte = __pa((addr_t) l2pgtable) & TTB_L1_TABLE_ADDR_MASK;
163163

164-
#ifdef CONFIG_ARM64VT
164+
#ifdef CONFIG_AVZ
165165
if (stage == S1)
166166
set_pte_table(l1pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
167167
else
@@ -183,7 +183,7 @@ static void alloc_init_l2(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
183183
if (((addr | next | phys) & ~BLOCK_2M_MASK) == 0) {
184184
*l2pte = phys & TTB_L2_BLOCK_ADDR_MASK;
185185

186-
#ifdef CONFIG_ARM64VT
186+
#ifdef CONFIG_AVZ
187187
if (stage == S1)
188188
set_pte_block(l2pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
189189
else
@@ -239,7 +239,7 @@ static void alloc_init_l1(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
239239

240240
/* Attach the L0 PTE to this L1 page table */
241241
*l0pte = __pa((addr_t) l1pgtable) & TTB_L0_TABLE_ADDR_MASK;
242-
#ifdef CONFIG_ARM64VT
242+
#ifdef CONFIG_AVZ
243243
if (stage == S1)
244244
set_pte_table(l0pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
245245
else
@@ -260,7 +260,7 @@ static void alloc_init_l1(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
260260

261261
if (((addr | next | phys) & ~BLOCK_1G_MASK) == 0) {
262262
*l1pte = phys & TTB_L1_BLOCK_ADDR_MASK;
263-
#ifdef CONFIG_ARM64VT
263+
#ifdef CONFIG_AVZ
264264
if (stage == S1)
265265
set_pte_block(l1pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
266266
else
@@ -712,9 +712,6 @@ void __mmu_switch_kernel(void *pgtable_paddr, bool vttbr)
712712
if (vttbr)
713713
__mmu_switch_vttbr(pgtable_paddr);
714714
else
715-
#endif
716-
717-
#ifdef CONFIG_ARM64VT
718715
__mmu_switch_ttbr0(pgtable_paddr);
719716
#else
720717
__mmu_switch_ttbr1(pgtable_paddr);
@@ -968,6 +965,7 @@ addr_t virt_to_phys_pt(addr_t vaddr)
968965

969966
l1pte = l1pte_offset(l0pte, vaddr);
970967
BUG_ON(!*l1pte);
968+
971969
#elif CONFIG_VA_BITS_39
972970
if (user_space_vaddr(vaddr))
973971
l1pte = l1pte_offset((u64 *) current_pgtable(), vaddr);
@@ -1000,7 +998,6 @@ addr_t virt_to_phys_pt(addr_t vaddr)
1000998

1001999
#ifdef CONFIG_AVZ
10021000

1003-
#ifdef CONFIG_ARM64VT
10041001
/**
10051002
* Perform a mapping of IPA regions to physical regions
10061003
*
@@ -1017,4 +1014,3 @@ void do_ipamap(void *pgtable, ipamap_t ipamap[], int nbelement)
10171014

10181015
#endif /* CONFIG_AVZ */
10191016

1020-
#endif

so3/arch/arm64/rpi4_64/include/mach/ipamap.h

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ ipamap_t agency_ipamap[] = {
2525

2626
/* I/O Memory space*/
2727
{
28-
.ipa_addr = 0xf0000000,
29-
.phys_addr = 0xf0000000,
30-
.size = 0x10000000,
28+
.ipa_addr = 0xfc000000,
29+
.phys_addr = 0xfc000000,
30+
.size = 0x04000000,
3131
},
3232

3333
/* Null pointer exception */
@@ -46,12 +46,13 @@ ipamap_t agency_ipamap[] = {
4646
ipamap_t capsule_ipamap[] = {
4747

4848
{
49-
/* Only mapping the CPU interface to the vGIC CPU interface.
49+
/* Only mapping the CPU interface to the vGIC CPU interface (GICV).
5050
* Access to the distributor must lead to a trap and be handled by the hypervisor.
51+
* BCM2711 GIC-400: GICV (virtual CPU interface) at 0xFF846000.
5152
*/
52-
.ipa_addr = 0x08010000,
53-
.phys_addr = 0x08040000,
54-
.size = 0x10000,
53+
.ipa_addr = 0xff842000,
54+
.phys_addr = 0xff846000,
55+
.size = 0x2000,
5556
},
5657
};
5758

so3/arch/arm64/traps.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,10 +78,19 @@ void trap_handle_error(addr_t lr)
7878
{
7979
#ifdef CONFIG_AVZ
8080
unsigned long esr = read_sysreg(esr_el2);
81+
unsigned long far = read_sysreg(far_el2);
82+
unsigned long elr = read_sysreg(elr_el2);
83+
unsigned long hpfar = read_sysreg(hpfar_el2);
8184
#else
8285
unsigned long esr = read_sysreg(esr_el1);
86+
unsigned long far = read_sysreg(far_el1);
87+
unsigned long elr = 0;
88+
unsigned long hpfar = 0;
8389
#endif
8490

91+
printk(" FAR: %lx\n", far);
92+
printk(" ELR: %lx\n", elr);
93+
printk(" HPFAR: %lx (IPA: %lx)\n", hpfar, (hpfar >> 4) << 12);
8594
show_invalid_entry_message(ESR_ELx_EC(esr), esr, lr);
8695
}
8796

@@ -123,7 +132,7 @@ void trap_handle(cpu_regs_t *regs)
123132
syscall_args_t sys_args;
124133
#endif
125134

126-
#ifdef CONFIG_ARM64VT
135+
#ifdef CONFIG_AVZ
127136

128137
unsigned long esr = read_sysreg(esr_el2);
129138
unsigned long hvc_code;
@@ -137,7 +146,7 @@ void trap_handle(cpu_regs_t *regs)
137146

138147
#else
139148
unsigned long esr = read_sysreg(esr_el1);
140-
#endif /* CONFIG_ARM64VT */
149+
#endif /* CONFIG_AVZ */
141150

142151
switch (ESR_ELx_EC(esr)) {
143152
case ESR_ELx_EC_DABT_LOW:

0 commit comments

Comments
 (0)