From 51e8fac674a6ca7c8b552611545dd450b7251b4c Mon Sep 17 00:00:00 2001 From: Daniel Oliveira Date: Mon, 6 Nov 2023 15:36:42 +0000 Subject: [PATCH] style: format comment blocks to wrap on 100 col limit Signed-off-by: Daniel Oliveira Signed-off-by: Jose Martins --- src/arch/armv8/aarch32/boot.S | 36 ++--- .../armv8/aarch32/inc/arch/subarch/sysregs.h | 4 +- src/arch/armv8/aarch64/boot.S | 40 +++-- src/arch/armv8/aarch64/inc/arch/spinlock.h | 3 +- src/arch/armv8/aarch64/inc/arch/subarch/vm.h | 3 +- src/arch/armv8/aborts.c | 9 +- src/arch/armv8/armv8-a/aarch32/boot.S | 4 +- src/arch/armv8/armv8-a/aarch32/vmm.c | 3 +- src/arch/armv8/armv8-a/aarch64/boot.S | 18 +-- src/arch/armv8/armv8-a/aarch64/relocate.S | 3 +- src/arch/armv8/armv8-a/aarch64/vmm.c | 12 +- src/arch/armv8/armv8-a/cpu.c | 11 +- src/arch/armv8/armv8-a/inc/arch/page_table.h | 6 +- src/arch/armv8/armv8-a/mem.c | 5 +- src/arch/armv8/armv8-a/page_table.c | 4 +- src/arch/armv8/armv8-a/psci.c | 9 +- src/arch/armv8/armv8-a/smmuv2.c | 29 ++-- src/arch/armv8/armv8-r/aarch32/boot.S | 13 +- src/arch/armv8/armv8-r/aarch64/boot.S | 13 +- src/arch/armv8/armv8-r/inc/arch/profile/cpu.h | 8 +- src/arch/armv8/armv8-r/mpu.c | 81 +++++----- src/arch/armv8/armv8-r/psci.c | 9 +- src/arch/armv8/armv8-r/vmm.c | 4 +- src/arch/armv8/cache.c | 4 +- src/arch/armv8/cpu.c | 6 +- src/arch/armv8/gic.c | 3 +- src/arch/armv8/gicv2.c | 3 +- src/arch/armv8/inc/arch/sysregs.h | 17 +- src/arch/armv8/platform.c | 3 +- src/arch/armv8/psci.c | 31 ++-- src/arch/armv8/vgic.c | 10 +- src/arch/armv8/vgicv3.c | 4 +- src/arch/armv8/vm.c | 7 +- src/arch/riscv/boot.S | 40 +++-- src/arch/riscv/cache.c | 17 +- src/arch/riscv/inc/arch/interrupts.h | 7 +- src/arch/riscv/interrupts.c | 12 +- src/arch/riscv/iommu.c | 33 ++-- src/arch/riscv/irqc/aia/inc/aplic.h | 24 ++- src/arch/riscv/irqc/aia/inc/vaplic.h | 9 +- src/arch/riscv/irqc/aia/vaplic.c | 79 ++++------ src/arch/riscv/irqc/plic/plic.c | 4 +- src/arch/riscv/mem.c | 5 +- src/arch/riscv/relocate.S | 5 +- src/arch/riscv/sbi.c | 4 +- src/arch/riscv/sync_exceptions.c | 20 +-- src/arch/riscv/vmm.c | 13 +- src/core/cpu.c | 5 +- src/core/inc/config.h | 35 ++--- src/core/inc/types.h | 7 +- src/core/interrupts.c | 4 +- src/core/mem.c | 27 ++-- src/core/mmu/io.c | 3 +- src/core/mmu/mem.c | 148 ++++++++---------- src/core/mpu/config.c | 10 +- src/core/mpu/inc/mem_prot/mem.h | 10 +- src/core/mpu/mem.c | 21 ++- src/core/vm.c | 15 +- src/core/vmm.c | 10 +- src/lib/inc/bit.h | 8 +- src/lib/inc/util.h | 6 +- src/lib/printk.c | 15 +- src/platform/drivers/8250_uart/8250_uart.c | 7 +- src/platform/rpi4/rpi4_desc.c | 4 +- src/platform/zcu102/zcu102_desc.c | 18 +-- src/platform/zcu104/zcu104_desc.c | 18 +-- 66 files changed, 456 insertions(+), 602 deletions(-) diff --git a/src/arch/armv8/aarch32/boot.S b/src/arch/armv8/aarch32/boot.S index bb69c9029..0b613c225 100644 --- a/src/arch/armv8/aarch32/boot.S +++ b/src/arch/armv8/aarch32/boot.S @@ -19,15 +19,14 @@ .data .balign 4 /** - * barrier is used to minimal synchronization in boot - other cores wait for - * bsp to set it. + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. */ _barrier: .4byte 0 /** - * The following code MUST be at the base of the image, as this is bao's entry - * point. Therefore .boot section must also be the first in the linker script. - * DO NOT implement any code before the _reset_handler in this section. + * The following code MUST be at the base of the image, as this is bao's entry point. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. */ .section ".boot", "ax" .globl _reset_handler @@ -57,10 +56,10 @@ _reset_handler: adr r1, _el2_entry /** - * Linearize cpu id according to the number of clusters and processors - * per cluster. We are only considering two levels of affinity. - * TODO: this should be done some other way. We shouldn't depend on the platform - * description this early in the initialization. + * Linearize cpu id according to the number of clusters and processors per cluster. We are only + * considering two levels of affinity. + * TODO: this should be done some other way. We shouldn't depend on the platform description + * this early in the initialization. */ mov r3, r0, lsr #8 and r3, r3, #0xff @@ -88,8 +87,8 @@ _reset_handler: add r0, r0, r7 /* - * Install vector table physical address early, in case exception occurs - * during this initialization. + * Install vector table physical address early, in case exception occurs during this + * initialization. */ get_phys_addr r3, r4, _hyp_vector_table mcr p15, 4, r3, c12, c0, 0 // write HVBAR @@ -103,9 +102,9 @@ _reset_handler: bne 1f #else /** - * If the cpu master is not fixed, for setting it, we assume only one cpu is - * initially activated which later will turn on all the others. Therefore, there - * is no concurrency when setting CPU_MASTER and no atomic operations are needed. + * If the cpu master is not fixed, for setting it, we assume only one cpu is initially activated + * which later will turn on all the others. Therefore, there is no concurrency when setting + * CPU_MASTER and no atomic operations are needed. */ .pushsection .data _master_set: @@ -124,8 +123,8 @@ _set_master_cpu: 1: /** - * TODO: bring the system to a well known state. This includes disabling - * the MPU, all caches, BP and others, and invalidating them. + * TODO: bring the system to a well known state. This includes disabling the MPU, all caches, + * BP and others, and invalidating them. */ /* Clear stack pointer to avoid unaligned SP exceptions during boot */ @@ -182,7 +181,7 @@ _set_master_cpu: /* This point should never be reached */ b . -/***** Helper functions for boot code. ******/ +/***** Helper functions for boot code. ******/ .global boot_clear .func boot_clear @@ -199,8 +198,7 @@ boot_clear: .endfunc /* - * Code adapted from "Application Note Bare-metal Boot Code for ARMv8-A - * Processors - Version 1.0" + * Code adapted from "Application Note Bare-metal Boot Code for ARMv8-A Processors - Version 1.0" * * r0 - cache level to be invalidated (0 - dl1$, 1 - il1$) */ diff --git a/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h b/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h index 66c6dd681..512bf6f17 100644 --- a/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h +++ b/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h @@ -59,8 +59,8 @@ } /** - * We give aarch32 registers the same name as aarch64's to which they are - * architecturally mapped to, so that we can use the same name in common code. + * We give aarch32 registers the same name as aarch64's to which they are architecturally mapped + * to, so that we can use the same name in common code. */ SYSREG_GEN_ACCESSORS(esr_el2, 4, c5, c2, 0); // hsr SYSREG_GEN_ACCESSORS_BANKED(elr_el2, elr_hyp); diff --git a/src/arch/armv8/aarch64/boot.S b/src/arch/armv8/aarch64/boot.S index 6863ef3e8..5c2a159ca 100644 --- a/src/arch/armv8/aarch64/boot.S +++ b/src/arch/armv8/aarch64/boot.S @@ -12,16 +12,15 @@ .data .align 3 /** - * barrier is used to minimal synchronization in boot - other cores wait for - * bsp to set it. + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. */ .global _boot_barrier _boot_barrier: .8byte 0 /** - * The following code MUST be at the base of the image, as this is bao's entry - * point. Therefore .boot section must also be the first in the linker script. - * DO NOT implement any code before the _reset_handler in this section. + * The following code MUST be at the base of the image, as this is bao's entry point. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. */ .section ".boot", "ax" .globl _reset_handler @@ -31,9 +30,8 @@ _reset_handler: /** * TODO: before anything... - * perform sanity checks on ID registers to ensure support for - * VE and TZ, 4K granule and possibly other needed features. - * Also, check current exception level. Act accordingly. + * perform sanity checks on ID registers to ensure support for VE and TZ, 4K granule and + * possibly other needed features. Also, check current exception level. Act accordingly. * However, we expect to be running at EL2 at this point. */ @@ -51,17 +49,17 @@ _reset_handler: adrp x1, _image_start /* - * Install vector table physical address early, in case exception occurs - * during this initialization. + * Install vector table physical address early, in case exception occurs during this + * initialization. */ adr x3, _hyp_vector_table msr VBAR_EL2, x3 /** - * Linearize cpu id according to the number of clusters and processors per - * cluster. We are only considering two levels of affinity. - * TODO: this should be done some other way. We shouldn't depend on the platform - * description this early in the initialization. + * Linearize cpu id according to the number of clusters and processors per cluster. We are + * only considering two levels of affinity. + * TODO: this should be done some other way. We shouldn't depend on the platform description + * this early in the initialization. */ mov x3, x0, lsr #8 @@ -96,9 +94,9 @@ _reset_handler: cbnz x9, 1f #else /** - * If the cpu master is not fixed, for setting it, we assume only one cpu is - * initially activated which later will turn on all the others. Therefore, there - * is no concurrency when setting CPU_MASTER and no atomic operations are needed. + * If the cpu master is not fixed, for setting it, we assume only one cpu is initially activated + * which later will turn on all the others. Therefore, there is no concurrency when setting + * CPU_MASTER and no atomic operations are needed. */ .pushsection .data _master_set: @@ -116,9 +114,8 @@ _set_master_cpu: 1: /** - * TODO: bring the system to a well known state. This includes disabling - * the MMU (done), all caches (missing i$), BP and others... - * and invalidating them. + * TODO: bring the system to a well known state. This includes disabling the MMU (done), + * all caches (missing i$), BP and others... and invalidating them. */ /* boot_clear stack pointer to avoid unaligned SP exceptions during boot */ @@ -195,8 +192,7 @@ boot_clear: .endfunc /* - * Code taken from "Application Note Bare-metal Boot Code for ARMv8-A - * Processors - Version 1.0" + * Code taken from "Application Note Bare-metal Boot Code for ARMv8-A Processors - Version 1.0" * * x0 - cache level to be invalidated (0 - dl1$, 1 - il1$, 2 - l2$) */ diff --git a/src/arch/armv8/aarch64/inc/arch/spinlock.h b/src/arch/armv8/aarch64/inc/arch/spinlock.h index 6915a6ec9..31143a067 100644 --- a/src/arch/armv8/aarch64/inc/arch/spinlock.h +++ b/src/arch/armv8/aarch64/inc/arch/spinlock.h @@ -22,8 +22,7 @@ static inline void spinlock_init(spinlock_t* lock) } /** - * This lock follows the ticket lock algorithm described in Arm's ARM DDI0487I.a - * Appendix K13. + * This lock follows the ticket lock algorithm described in Arm's ARM DDI0487I.a Appendix K13. */ static inline void spin_lock(spinlock_t* lock) diff --git a/src/arch/armv8/aarch64/inc/arch/subarch/vm.h b/src/arch/armv8/aarch64/inc/arch/subarch/vm.h index 49ff98a39..7f6704de8 100644 --- a/src/arch/armv8/aarch64/inc/arch/subarch/vm.h +++ b/src/arch/armv8/aarch64/inc/arch/subarch/vm.h @@ -12,7 +12,6 @@ struct arch_regs { uint64_t x[31]; uint64_t elr_el2; uint64_t spsr_el2; -} __attribute__((aligned(16))); // makes size always aligned to 16 to respect - // stack alignment +} __attribute__((aligned(16))); // makes size always aligned to 16 to respect stack alignment #endif /* VM_SUBARCH_H */ diff --git a/src/arch/armv8/aborts.c b/src/arch/armv8/aborts.c index 39bc06cfa..825a24842 100644 --- a/src/arch/armv8/aborts.c +++ b/src/arch/armv8/aborts.c @@ -37,8 +37,7 @@ void aborts_data_lower(unsigned long iss, unsigned long far, unsigned long il, u emul.reg_width = 4 + (4 * bit64_extract(iss, ESR_ISS_DA_SF_OFF, ESR_ISS_DA_SF_LEN)); emul.sign_ext = bit64_extract(iss, ESR_ISS_DA_SSE_OFF, ESR_ISS_DA_SSE_LEN); - // TODO: check if the access is aligned. If not, inject an exception in - // the vm + // TODO: check if the access is aligned. If not, inject an exception in the vm if (handler(&emul)) { unsigned long pc_step = 2 + (2 * il); @@ -101,9 +100,9 @@ void smc_handler(unsigned long iss, unsigned long far, unsigned long il, unsigne syscall_handler(iss, far, il, ec); /** - * Since SMCs are trapped due to setting hcr_el2.tsc, the "preferred - * exception return address" is the address of the actual smc instruction. - * Thus, we need to adjust it to the next instruction. + * Since SMCs are trapped due to setting hcr_el2.tsc, the "preferred exception return address" + * is the address of the actual smc instruction. Thus, we need to adjust it to the next + * instruction. */ vcpu_writepc(cpu()->vcpu, vcpu_readpc(cpu()->vcpu) + 4); } diff --git a/src/arch/armv8/armv8-a/aarch32/boot.S b/src/arch/armv8/armv8-a/aarch32/boot.S index 48ceb0698..65420a275 100644 --- a/src/arch/armv8/armv8-a/aarch32/boot.S +++ b/src/arch/armv8/armv8-a/aarch32/boot.S @@ -24,8 +24,8 @@ boot_arch_profile_init: mov r13, lr /* - * Register r12 contains the size of the allocated physical memory between - * the loadable sections of the image and the non-loadable. + * Register r12 contains the size of the allocated physical memory between the loadable + * sections of the image and the non-loadable. */ ldr r10, =extra_allocated_phys_mem diff --git a/src/arch/armv8/armv8-a/aarch32/vmm.c b/src/arch/armv8/armv8-a/aarch32/vmm.c index 232ce24fa..134cee29d 100644 --- a/src/arch/armv8/armv8-a/aarch32/vmm.c +++ b/src/arch/armv8/armv8-a/aarch32/vmm.c @@ -10,8 +10,7 @@ void vmm_arch_init_tcr() { if (cpu()->id == CPU_MASTER) { - /* Despite LPAE, we only support 32-bit guest physical address spaces. - */ + /* Despite LPAE, we only support 32-bit guest physical address spaces. */ parange = PAR_32BIT; } diff --git a/src/arch/armv8/armv8-a/aarch64/boot.S b/src/arch/armv8/armv8-a/aarch64/boot.S index 6f77a0bba..708d6ba14 100644 --- a/src/arch/armv8/armv8-a/aarch64/boot.S +++ b/src/arch/armv8/armv8-a/aarch64/boot.S @@ -18,8 +18,8 @@ boot_arch_profile_init: mov x20, x30 /* - * Register x18 contains the size of the allocated physical memory between - * the loadable sections of the image and the non-loadable. + * Register x18 contains the size of the allocated physical memory between the loadable + * sections of the image and the non-loadable. */ ldr x18, =extra_allocated_phys_mem @@ -172,8 +172,8 @@ map_cpu: setup_cpu: /** - * The operation is purposely commented out. - * We are assuming monitor code already enabled smp coherency. + * The operation is purposely commented out. We are assuming monitor code already enabled smp + * coherency. */ /* setup translation configurations */ @@ -195,14 +195,12 @@ setup_cpu: msr TTBR0_EL2, x3 /** - * TODO: set implementation defined registers such as ACTLR or AMAIR. - * Maybe define a macro for this in a implementation oriented directory - * inside arch. + * TODO: set implementation defined registers such as ACTLR or AMAIR. Maybe define a macro for + * this in a implementation oriented directory inside arch. */ /** - * TODO: invalidate caches, TLBs and branch prediction. - * Need for barriers? + * TODO: invalidate caches, TLBs and branch prediction. Need for barriers? */ ldr x5, =_enter_vas @@ -248,7 +246,7 @@ warm_boot: /* save x0 which contains pointer to saved state psci context */ mov x19, x0 - /* invalidate l1$ */ + /* invalidate l1$ */ mov x0, #0 bl boot_cache_invalidate diff --git a/src/arch/armv8/armv8-a/aarch64/relocate.S b/src/arch/armv8/armv8-a/aarch64/relocate.S index b10139224..cf45a2abe 100644 --- a/src/arch/armv8/armv8-a/aarch64/relocate.S +++ b/src/arch/armv8/armv8-a/aarch64/relocate.S @@ -50,8 +50,7 @@ memcpy: switch_space: /** - * update flat maping page table entry to feature new physical address space - * entry page + * update flat maping page table entry to feature new physical address space entry page */ adr x3, _image_start PTE_INDEX_ASM x4, x3, 1 diff --git a/src/arch/armv8/armv8-a/aarch64/vmm.c b/src/arch/armv8/armv8-a/aarch64/vmm.c index d2fc7e7d9..6e9c17149 100644 --- a/src/arch/armv8/armv8-a/aarch64/vmm.c +++ b/src/arch/armv8/armv8-a/aarch64/vmm.c @@ -10,14 +10,12 @@ void vmm_arch_init_tcr() { /** - * Check available physical address range which will limit - * IPA size. Patch 2-stage page table descriptors if this forces - * the initial lookup to level 1. + * Check available physical address range which will limit IPA size. Patch 2-stage page table + * descriptors if this forces the initial lookup to level 1. * - * In multi-cluster heterogenous we only support the minimum parange - * for a vm's physicall adress space. - * TODO: we could make this more dynamic and adapt it to each virtual - * machine. + * In multi-cluster heterogenous we only support the minimum parange for a vm's physicall + * adress space. + * TODO: we could make this more dynamic and adapt it to each virtual machine. */ static size_t min_parange = 0b111; diff --git a/src/arch/armv8/armv8-a/cpu.c b/src/arch/armv8/armv8-a/cpu.c index 79352d709..f715b986d 100644 --- a/src/arch/armv8/armv8-a/cpu.c +++ b/src/arch/armv8/armv8-a/cpu.c @@ -11,8 +11,8 @@ void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr) { if (cpuid == CPU_MASTER) { - /* power on necessary, but still sleeping, secondary cpu cores - * Assumes CPU zero is doing this */ + /* power on necessary, but still sleeping, secondary cpu cores Assumes CPU zero is doing + * this */ for (size_t cpu_core_id = 0; cpu_core_id < platform.cpu_num; cpu_core_id++) { if (cpu_core_id == cpuid) { continue; @@ -34,8 +34,7 @@ void cpu_arch_profile_idle() switch (err) { case PSCI_E_NOT_SUPPORTED: /** - * If power down is not supported let's just wait for an - * interrupt + * If power down is not supported let's just wait for an interrupt */ asm volatile("wfi"); break; @@ -45,7 +44,7 @@ void cpu_arch_profile_idle() } /** - * Power down was sucessful but did not jump to requested entry - * point. Just return to the architectural + * Power down was sucessful but did not jump to requested entry point. Just return to the + * architectural */ } diff --git a/src/arch/armv8/armv8-a/inc/arch/page_table.h b/src/arch/armv8/armv8-a/inc/arch/page_table.h index a9b576f31..bd955b4cf 100644 --- a/src/arch/armv8/armv8-a/inc/arch/page_table.h +++ b/src/arch/armv8/armv8-a/inc/arch/page_table.h @@ -15,9 +15,9 @@ #define PTE_INDEX_SHIFT(LEVEL) ((9 * (3 - LEVEL)) + 12) #define PTE_INDEX(LEVEL, ADDR) ((ADDR >> PTE_INDEX_SHIFT(LEVEL)) & (0x1FF)) -// We turn clang-format off at this point since this is an assembly macro -// and thus is incorrectly formatted. Despite this being assembly we keep -// this macro here so that is next to its C macro counter-part defined above. +// We turn clang-format off at this point since this is an assembly macro and thus is incorrectly +// formatted. Despite this being assembly we keep this macro here so that is next to its C macro +// counter-part defined above. // clang-format off .macro PTE_INDEX_ASM index, addr, level lsr \index, \addr, #PTE_INDEX_SHIFT(\level) diff --git a/src/arch/armv8/armv8-a/mem.c b/src/arch/armv8/armv8-a/mem.c index b3a62063a..8728ea17a 100644 --- a/src/arch/armv8/armv8-a/mem.c +++ b/src/arch/armv8/armv8-a/mem.c @@ -13,9 +13,8 @@ void as_arch_init(struct addr_space* as) size_t index; /* - * If the address space is a copy of an existing hypervisor space it's not - * possible to use the PT_CPU_REC index to navigate it, so we have to use - * the PT_VM_REC_IND. + * If the address space is a copy of an existing hypervisor space it's not possible to use the + * PT_CPU_REC index to navigate it, so we have to use the PT_VM_REC_IND. */ if (as->type == AS_HYP_CPY || as->type == AS_VM) { index = PT_VM_REC_IND; diff --git a/src/arch/armv8/armv8-a/page_table.c b/src/arch/armv8/armv8-a/page_table.c index 6e76227f3..e1370b9d6 100644 --- a/src/arch/armv8/armv8-a/page_table.c +++ b/src/arch/armv8/armv8-a/page_table.c @@ -34,8 +34,8 @@ struct page_table_dscr armv8_pt_dscr = { }; /** - * This might be modified at initialization depending on the - * value of parange and consequently SL0 in VTCR_EL2. + * This might be modified at initialization depending on the value of parange and consequently SL0 + * in VTCR_EL2. */ struct page_table_dscr armv8_pt_s2_dscr = { .lvls = 4, diff --git a/src/arch/armv8/armv8-a/psci.c b/src/arch/armv8/armv8-a/psci.c index 4689cc2d7..aff0b8e07 100644 --- a/src/arch/armv8/armv8-a/psci.c +++ b/src/arch/armv8/armv8-a/psci.c @@ -26,9 +26,9 @@ static void psci_save_state(enum wakeup_reason wakeup_reason) cpu()->arch.profile.psci_off_state.wakeup_reason = wakeup_reason; /** - * Although the real PSCI implementation is responsible for managing cache - * state, make sure the saved state is in memory as we'll use this on wake - * up before enabling cache to restore basic processor state. + * Although the real PSCI implementation is responsible for managing cache state, make sure the + * saved state is in memory as we'll use this on wake up before enabling cache to restore basic + * processor state. */ cache_flush_range((vaddr_t)&cpu()->arch.profile.psci_off_state, sizeof(cpu()->arch.profile.psci_off_state)); @@ -39,8 +39,7 @@ static void psci_save_state(enum wakeup_reason wakeup_reason) static void psci_restore_state() { /** - * The majority of the state is already restored in assembly routine - * psci_boot_entry. + * The majority of the state is already restored in assembly routine psci_boot_entry. */ gicc_restore_state(&cpu()->arch.profile.psci_off_state.gicc_state); diff --git a/src/arch/armv8/armv8-a/smmuv2.c b/src/arch/armv8/armv8-a/smmuv2.c index 1f44adf67..34fab7eca 100644 --- a/src/arch/armv8/armv8-a/smmuv2.c +++ b/src/arch/armv8/armv8-a/smmuv2.c @@ -89,10 +89,9 @@ static void smmu_check_features() } /** - * TODO: the most common smmuv2 implementation (mmu-500) does not provide - * ptw coherency. So we must add some mechanism software-managed - * coherency mechanism for the vms using the smmu according to the - * result of this feature test. + * TODO: the most common smmuv2 implementation (mmu-500) does not provide ptw coherency. So we + * must add some mechanism software-managed coherency mechanism for the vms using the smmu + * according to the result of this feature test. */ if (!(smmu.hw.glbl_rs0->IDR0 & SMMUV2_IDR0_CTTW_BIT)) { WARNING("smmuv2 does not support coherent page table walks"); @@ -122,8 +121,7 @@ void smmu_init() /* * Alloc pages for global address space. * - * Map the first 4k so we can read all the info we need to further - * allocate smmu registers. + * Map the first 4k so we can read all the info we need to further allocate smmu registers. */ vaddr_t smmu_glbl_rs0 = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.arch.smmu.base, NUM_PAGES(sizeof(struct smmu_glbl_rs0_hw))); @@ -223,9 +221,8 @@ void smmu_write_ctxbnk(size_t ctx_id, paddr_t root_pt, asid_t vm_id) smmu.hw.glbl_rs1->CBA2R[ctx_id] = SMMUV2_CBAR_VA64; /** - * This should closely match to the VTCR configuration set up in - * vmm_arch_init as we're sharing page table between the VM and its - * smmu context. + * This should closely match to the VTCR configuration set up in vmm_arch_init as we're + * sharing page table between the VM and its smmu context. */ uint32_t tcr = ((parange << SMMUV2_TCR_PS_OFF) & SMMUV2_TCR_PS_MSK); size_t t0sz = 64 - parange_table[parange]; @@ -265,13 +262,12 @@ ssize_t smmu_alloc_sme() * 1. sme is a group; * 2. sme is a device. * - * Groups can be merged together if one is found to be inclusive or equal of - * the other. + * Groups can be merged together if one is found to be inclusive or equal of the other. * * Devices can be added (i.e. merged into) a group, but not together. * - * This function searches for existing smes that are compatible for merging - * with the new sme, raising an ERROR when conflicting attributes are found. + * This function searches for existing smes that are compatible for merging with the new sme, + * raising an ERROR when conflicting attributes are found. */ bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, bool group) { @@ -291,10 +287,9 @@ bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, bool ctx == smmu_sme_get_ctx(sme)) { /* Compatible entry found. * - * If the new entry includes an existing one, there is the - * possibility that it will include other existing entries, it - * is therefore necessary to remove the existing entry and keep - * searching. + * If the new entry includes an existing one, there is the possibility that it will + * include other existing entries, it is therefore necessary to remove the existing + * entry and keep searching. */ if (mask > sme_mask) { bitmap_clear(smmu.sme_bitmap, sme); diff --git a/src/arch/armv8/armv8-r/aarch32/boot.S b/src/arch/armv8/armv8-r/aarch32/boot.S index 7393f00f0..8a69d18a6 100644 --- a/src/arch/armv8/armv8-r/aarch32/boot.S +++ b/src/arch/armv8/armv8-r/aarch32/boot.S @@ -7,10 +7,10 @@ #include #include -/* In Armv8-R there is no virtual address space (VAS). Notwithstanding, - * we define VAS as an identity map of the PAS with MPU rules enforced - * using an equivalent "page size" of (at least) 64 bytes (minimal MPU - * granularity). +/** + * In Armv8-R there is no virtual address space (VAS). Notwithstanding, we define VAS as an + * identity map of the PAS with MPU rules enforced using an equivalent "page size" of (at least) 64 + * bytes (minimal MPU granularity). */ .section ".boot", "ax" .global boot_arch_profile_init @@ -52,9 +52,8 @@ boot_arch_profile_init: /** * Map loadable image (and possibly unloadable) - * If the vm image section is used and has built-in vm images, we need - * to map the loadble and non-loadble region of the image separately. - * Otherwise we can map it as a single region. + * If the vm image section is used and has built-in vm images, we need to map the loadble and + * non-loadble region of the image separately. Otherwise we can map it as a single region. */ add r4, r4, #1 mcr p15, 4, r4, c6, c2, 1 // HPRSELR diff --git a/src/arch/armv8/armv8-r/aarch64/boot.S b/src/arch/armv8/armv8-r/aarch64/boot.S index 08fb5e4af..75a90f1af 100644 --- a/src/arch/armv8/armv8-r/aarch64/boot.S +++ b/src/arch/armv8/armv8-r/aarch64/boot.S @@ -7,10 +7,10 @@ #include #include -/* In Armv8-R there is no virtual address space (VAS). Notwithstanding, - * we define VAS as an identity map of the PAS with MPU rules enforced - * using an equivalent "page size" of (at least) 64 bytes (minimal MPU - * granularity). +/** + * In Armv8-R there is no virtual address space (VAS). Notwithstanding, we define VAS as an + * identity map of the PAS with MPU rules enforced using an equivalent "page size" of (at least) 64 + * bytes (minimal MPU granularity). */ .section ".boot", "ax" .global boot_arch_profile_init @@ -50,9 +50,8 @@ boot_arch_profile_init: /** * Map loadable image (and possibly unloadable) - * If the vm image section is used and has built-in vm images, we need - * to map the loadble and non-loadble region of the image separately. - * Otherwise we can map it as a single region. + * If the vm image section is used and has built-in vm images, we need to map the loadble and + * non-loadble region of the image separately. Otherwise we can map it as a single region. */ msr prselr_el2, x4 isb diff --git a/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h b/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h index f4e2a6050..996c49a18 100644 --- a/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h +++ b/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h @@ -17,8 +17,7 @@ struct cpu_arch_profile { struct { BITMAP_ALLOC(bitmap, MPU_ARCH_MAX_NUM_ENTRIES); /** - * A locked region means that it can never be removed from the MPU. - * For example, + * A locked region means that it can never be removed from the MPU. For example, */ BITMAP_ALLOC(locked, MPU_ARCH_MAX_NUM_ENTRIES); struct mpu_perms { @@ -26,9 +25,8 @@ struct cpu_arch_profile { perms_t el1; } perms[MPU_ARCH_MAX_NUM_ENTRIES]; /** - * We maintain an ordered list of the regions currently in the mpu - * to simplify the merging algorithm when mapping an overllaping - * region. + * We maintain an ordered list of the regions currently in the mpu to simplify the merging + * algorithm when mapping an overllaping region. */ struct { struct list list; diff --git a/src/arch/armv8/armv8-r/mpu.c b/src/arch/armv8/armv8-r/mpu.c index 79d057020..70d33f5cb 100644 --- a/src/arch/armv8/armv8-r/mpu.c +++ b/src/arch/armv8/armv8-r/mpu.c @@ -111,8 +111,7 @@ static inline perms_t mem_vmpu_entry_perms(struct mp_region* mpr) static inline void mpu_entry_set_perms(struct mp_region* mpr, struct mpu_perms mpu_perms) { - // TODO: should we check this is following the allowed permission - // combinations? + // TODO: should we check this is following the allowed permission combinations? bool el1_priv = mpu_perms.el1 != PERM_NONE; perms_t perms = mpu_perms.el1 | mpu_perms.el2; @@ -201,10 +200,9 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) while (size_left > 0 && !failed) { /** - * Since we'll be checking for overlapping regions in order, there - * will be at most two regions to map in a given iteration. This - * happens when the previous iteration found an overlapping region - * that is fully contained by the new region. + * Since we'll be checking for overlapping regions in order, there will be at most two + * regions to map in a given iteration. This happens when the previous iteration found an + * overlapping region that is fully contained by the new region. */ struct mp_region* new_reg; @@ -216,17 +214,15 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) break; } - // As Armv8-R does not allow overlapping regions, we must first check - // if usch regions already exist. Specifically, for the case where the - // regions has hypervisor permissions only, and this is a map - // targetting a guest mpu, we just need to flip the guest permission - // bit. This will allow us to share regions between guest and hypevisor - // to, for example, (i) share the use of a peripheral (mainly uart for - // debugging purposes), or (ii) share a RW page between hypervisor and - // guest. Although having a RO page for guest while RW for the - // hypervisor is highly useful, this MPU does not allow it. That said, - // in the case we need it in the future, we'll have to implement a - // mechanism for that based on traps. + // As Armv8-R does not allow overlapping regions, we must first check if usch regions + // already exist. Specifically, for the case where the regions has hypervisor permissions + // only, and this is a map targetting a guest mpu, we just need to flip the guest + // permission bit. This will allow us to share regions between guest and hypevisor to, for + // example, (i) share the use of a peripheral (mainly uart for debugging purposes), or (ii) + // share a RW page between hypervisor and guest. Although having a RO page for guest while + // RW for the hypervisor is highly useful, this MPU does not allow it. That said, in the + // case we need it in the future, we'll have to implement a mechanism for that based on + // traps. bool overlaped = false; perms_t new_perms = mem_vmpu_entry_perms(new_reg); @@ -249,26 +245,24 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } if (!mem_regions_overlap(new_reg, &overlapped_reg)) { - // If we are not overlapping, continue to search for overlapped - // regions until we check all entries. This should be the most - // frequent case, so the overhead for the checks on overllap - // will rarely execute. + // If we are not overlapping, continue to search for overlapped regions until we + // check all entries. This should be the most frequent case, so the overhead for + // the checks on overllap will rarely execute. prev = mpid; continue; } overlaped = true; if (mpu_entry_has_priv(mpid, priv)) { - // We don't allow overlapping regions of the same privilege. - // This is something that should be checked at the vmpu level, - // but we re-check it here anyway. + // We don't allow overlapping regions of the same privilege. This is something that + // should be checked at the vmpu level, but we re-check it here anyway. failed = true; break; } - // We only allow to bump up permissions if the overlapped region - // is a RO hypervisor region. Otherwise permissions have to be - // RW in both regions. We don't allow to overlap executable regions. + // We only allow to bump up permissions if the overlapped region is a RO hypervisor + // region. Otherwise permissions have to be RW in both regions. We don't allow to + // overlap executable regions. struct mpu_perms overlapped_perms = cpu()->arch.profile.mpu.perms[mpid]; struct mpu_perms overlap_perms = overlapped_perms; priv_t overlapped_priv; @@ -285,23 +279,22 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) if (((overlap_perms.el1 & PERM_RW) == PERM_R) && ((overlap_perms.el2 & PERM_W) != PERM_NONE)) { - // We allow promoting read/write privielges of the hypervisor - // region to match the guest's. However, this combination - // promotes the guest privielges, which we don't allow. + // We allow promoting read/write privielges of the hypervisor region to match the + // guest's. However, this combination promotes the guest privielges, which we don't + // allow. failed = true; break; } if ((overlap_perms.el1 & PERM_X) != (overlap_perms.el2 & PERM_X)) { - // Unless explicitly mapped, we don't promote execution - // privileges. + // Unless explicitly mapped, we don't promote execution privileges. failed = true; break; } - // The Armv8-R MPU does not allow us to have different permissions - // for hypervisor and guest. So we must fail if asked to add an - // overlapping mapping with different permissions or attributes + // The Armv8-R MPU does not allow us to have different permissions for hypervisor and + // guest. So we must fail if asked to add an overlapping mapping with different + // permissions or attributes if (mpu_entry_attrs(new_reg) != mpu_entry_attrs(&overlapped_reg)) { failed = true; break; @@ -408,10 +401,9 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } /** - * Check if we can merge the current region with the region - * right before and/or right after. This can only be done if - * they are adjacent and have the same exect flags (i.e. - * permissions and memory attribtues). + * Check if we can merge the current region with the region right before and/or right + * after. This can only be done if they are adjacent and have the same exect flags + * (i.e. permissions and memory attribtues). */ if ((prev != INVALID_MPID) && !mpu_entry_locked(prev)) { @@ -442,8 +434,7 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } /** - * If we can merge the region do it. Otherwise, allocate a new - * entry and set it. + * If we can merge the region do it. Otherwise, allocate a new entry and set it. */ if (merge_mpid != INVALID_MPID) { mpu_entry_update_priv_perms(priv, merge_mpid, new_perms); @@ -544,8 +535,8 @@ bool mpu_unmap(priv_t priv, struct mp_region* mpr) size_left -= overlap_size; } - // TODO: check if we can merge new regions after unmapping a given - // privilege from a shared region + // TODO: check if we can merge new regions after unmapping a given privilege from a shared + // region return size_left == 0; } @@ -563,8 +554,8 @@ void mpu_init() bitmap_set(cpu()->arch.profile.mpu.locked, mpid); /** - * We are assuming all initial regions have all hyp perms. - * This might change in the future. + * We are assuming all initial regions have all hyp perms. This might change in the + * future. */ cpu()->arch.profile.mpu.perms[mpid].el1 = PERM_NONE; cpu()->arch.profile.mpu.perms[mpid].el2 = PERM_RWX; diff --git a/src/arch/armv8/armv8-r/psci.c b/src/arch/armv8/armv8-r/psci.c index e69770e1e..6e13691bf 100644 --- a/src/arch/armv8/armv8-r/psci.c +++ b/src/arch/armv8/armv8-r/psci.c @@ -6,11 +6,10 @@ #include /** - * In Armv8-R systems there is no standard firmware readily available that - * implements PSCI for each platform. Therefore, we provide a minimal - * implementation of the necessary PSCI functions. - * Note this might change in the future, or we might decide to implement - * PSCI in Bao itself for each platform. + * In Armv8-R systems there is no standard firmware readily available that implements PSCI for each + * platform. Therefore, we provide a minimal implementation of the necessary PSCI functions. Note + * this might change in the future, or we might decide to implement PSCI in Bao itself for each + * platform. */ int32_t psci_standby() diff --git a/src/arch/armv8/armv8-r/vmm.c b/src/arch/armv8/armv8-r/vmm.c index 4a078f623..17498f958 100644 --- a/src/arch/armv8/armv8-r/vmm.c +++ b/src/arch/armv8/armv8-r/vmm.c @@ -16,8 +16,8 @@ void vmm_arch_profile_init() { if (cpu()->id == CPU_MASTER) { /** - * Since there is no firmware in cortex-r platforms, we need to - * initialize the system counter. + * Since there is no firmware in cortex-r platforms, we need to initialize the system + * counter. */ volatile struct generic_timer_cntctrl* timer_ctl; timer_ctl = (struct generic_timer_cntctrl*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_PRIVATE, diff --git a/src/arch/armv8/cache.c b/src/arch/armv8/cache.c index 868cf5df2..82bb8b2b4 100644 --- a/src/arch/armv8/cache.c +++ b/src/arch/armv8/cache.c @@ -13,8 +13,8 @@ void cache_arch_enumerate(struct cache* dscrp) { if (platform.cache.lvls != 0) { /** - * No need to probe cache registers, cache topology is described - * in the platform descrption. + * No need to probe cache registers, cache topology is described in the platform + * descrption. */ *dscrp = platform.cache; } diff --git a/src/arch/armv8/cpu.c b/src/arch/armv8/cpu.c index 972d4b93b..5dc834002 100644 --- a/src/arch/armv8/cpu.c +++ b/src/arch/armv8/cpu.c @@ -27,9 +27,9 @@ void cpu_arch_idle() cpu_arch_profile_idle(); /* - * In case the profile implementation does not jump to a predefined wake-up - * point and just returns from the profile, manually rewind stack and jump - * to idle wake up. Therefore, we should not return after this point. + * In case the profile implementation does not jump to a predefined wake-up point and just + * returns from the profile, manually rewind stack and jump to idle wake up. Therefore, we + * should not return after this point. */ asm volatile("mov sp, %0\n\r" "b cpu_idle_wakeup\n\r" ::"r"(&cpu()->stack[STACK_SIZE])); diff --git a/src/arch/armv8/gic.c b/src/arch/armv8/gic.c index 5a6d897a9..e4217e137 100644 --- a/src/arch/armv8/gic.c +++ b/src/arch/armv8/gic.c @@ -29,8 +29,7 @@ void gicd_init() /* Bring distributor to known state */ for (size_t i = GIC_NUM_PRIVINT_REGS; i < GIC_NUM_INT_REGS(int_num); i++) { /** - * Make sure all interrupts are not enabled, non pending, - * non active. + * Make sure all interrupts are not enabled, non pending, non active. */ gicd->IGROUPR[i] = -1; gicd->ICENABLER[i] = -1; diff --git a/src/arch/armv8/gicv2.c b/src/arch/armv8/gicv2.c index 3fbbc3349..e5aa98ecd 100644 --- a/src/arch/armv8/gicv2.c +++ b/src/arch/armv8/gicv2.c @@ -94,8 +94,7 @@ void gic_cpu_init() { for (size_t i = 0; i < GIC_NUM_INT_REGS(GIC_CPU_PRIV); i++) { /** - * Make sure all private interrupts are not enabled, non pending, - * non active. + * Make sure all private interrupts are not enabled, non pending, non active. */ gicd->ICENABLER[i] = -1; gicd->ICPENDR[i] = -1; diff --git a/src/arch/armv8/inc/arch/sysregs.h b/src/arch/armv8/inc/arch/sysregs.h index b14666357..8d8414fc7 100644 --- a/src/arch/armv8/inc/arch/sysregs.h +++ b/src/arch/armv8/inc/arch/sysregs.h @@ -111,8 +111,8 @@ #define TCR_TBI (1 << 20) /** - * Default hypervisor translation control - * The PS field must be filled at runtime by first reading parange + * Default hypervisor translation control. The PS field must be filled at runtime by first reading + * parange */ #define TCR_EL2_DFLT \ (TCR_RES1 | TCR_TG0_4K | TCR_PS_48B | TCR_ORGN0_WB_RA_WA | TCR_IRGN0_WB_RA_WA | TCR_T0SZ(16) | \ @@ -196,10 +196,8 @@ #define MAIR_IWA (0x1 << 0) /** - * Default hypervisor memory attributes - * 0 -> Device-nGnRnE - * 1 -> Normal, Inner/Outer WB/WA/RA - * 2 -> Device-nGnRE + * Default hypervisor memory attributes 0 -> Device-nGnRnE 1 -> Normal, Inner/Outer WB/WA/RA 2 -> + * Device-nGnRE */ #define MAIR_EL2_DFLT \ (((MAIR_OWBNT | MAIR_ORA | MAIR_OWA | MAIR_IWBNT | MAIR_IRA | MAIR_IWA) << MAIR_ATTR_WIDTH) | \ @@ -274,8 +272,8 @@ #define CCSIDR_NUMSETS_LEN 15 /** - * Below are platform implementation registers related to a53. - * TODO: move them to a a53 specific file. + * Below are platform implementation registers related to a53. TODO: move them to a a53 specific + * file. */ /* CPUECTLR_EL1 - CPU Extended Control Register */ @@ -454,8 +452,7 @@ #define ICC_SGI1R_CASE (0x18) #define ICC_SGI1R_ADDR (0x3A3016) -// #define ICH_AP0R_EL2 S3_4_C12_C8 _0-3 -// #define ICH_AP1R_EL2 S3_4_C12_C9 _0-3 +// #define ICH_AP0R_EL2 S3_4_C12_C8 _0-3 #define ICH_AP1R_EL2 S3_4_C12_C9 _0-3 #define ICH_HCR_EL2 S3_4_C12_C11_0 #define ICH_VTR_EL2 S3_4_C12_C11_1 #define ICH_MISR_EL2 S3_4_C12_C11_2 diff --git a/src/arch/armv8/platform.c b/src/arch/armv8/platform.c index bce489b13..0d751da09 100644 --- a/src/arch/armv8/platform.c +++ b/src/arch/armv8/platform.c @@ -9,8 +9,7 @@ unsigned long platform_arch_cpuid_to_mpidr(const struct platform* plat, cpuid_t cpuid) { if (cpuid > plat->cpu_num) { - return ~(~MPIDR_RES1 & MPIDR_RES0_MSK); // return an invlid mpidr by - // inverting res bits + return ~(~MPIDR_RES1 & MPIDR_RES0_MSK); // return an invlid mpidr by inverting res bits } unsigned long mpidr = 0; diff --git a/src/arch/armv8/psci.c b/src/arch/armv8/psci.c index a76a728c8..ccb04c4ea 100644 --- a/src/arch/armv8/psci.c +++ b/src/arch/armv8/psci.c @@ -49,9 +49,8 @@ int32_t psci_cpu_suspend_handler(uint32_t power_state, unsigned long entrypoint, unsigned long context_id) { /** - * !! Ignoring the rest of the requested powerstate for now. - * This might be a problem howwver since powerlevel and stateid are - * implementation defined. + * !! Ignoring the rest of the requested powerstate for now. This might be a problem howwver + * since powerlevel and stateid are implementation defined. */ uint32_t state_type = power_state & PSCI_STATE_TYPE_BIT; int32_t ret; @@ -66,11 +65,9 @@ int32_t psci_cpu_suspend_handler(uint32_t power_state, unsigned long entrypoint, } else { // PSCI_STATE_TYPE_STANDBY: /** - * TODO: ideally we would emmit a standby request to PSCI - * (currently, ATF), but when we do, we do not wake up on interrupts - * on the current development target zcu104. - * We should understand why. To circunvent this, we directly emmit a - * wfi + * TODO: ideally we would emmit a standby request to PSCI (currently, ATF), but when we + * do, we do not wake up on interrupts on the current development target zcu104. We should + * understand why. To circunvent this, we directly emmit a wfi */ // ret = psci_standby(); asm volatile("wfi\n\r"); @@ -83,9 +80,8 @@ int32_t psci_cpu_suspend_handler(uint32_t power_state, unsigned long entrypoint, int32_t psci_cpu_off_handler(void) { /** - * Right now we only support one vcpu por cpu, so passthrough the request - * directly to the monitor psci implementation. Later another vcpu, will - * call cpu_on on this vcpu()-> + * Right now we only support one vcpu por cpu, so passthrough the request directly to the + * monitor psci implementation. Later another vcpu, will call cpu_on on this vcpu()-> */ spin_lock(&cpu()->vcpu->arch.psci_ctx.lock); @@ -142,15 +138,14 @@ int32_t psci_cpu_on_handler(unsigned long target_cpu, unsigned long entrypoint, int32_t psci_affinity_info_handler(unsigned long target_affinity, uint32_t lowest_affinity_level) { - /* return ON, if at least one core in the affinity instance: has been - enabled with a call to CPU_ON, and that core has not called CPU_OFF */ + /* return ON, if at least one core in the affinity instance: has been enabled with a call to + CPU_ON, and that core has not called CPU_OFF */ - /* return off if all of the cores in the affinity instance have called - CPU_OFF and each of these calls has been processed by the PSCI - implementation. */ + /* return off if all of the cores in the affinity instance have called CPU_OFF and each of + these calls has been processed by the PSCI implementation. */ - /* return ON_PENDING if at least one core in the affinity instance is in - the ON_PENDING state */ + /* return ON_PENDING if at least one core in the affinity instance is in the ON_PENDING state + */ /** * TODO diff --git a/src/arch/armv8/vgic.c b/src/arch/armv8/vgic.c index e2f57e9c8..b306a06b0 100644 --- a/src/arch/armv8/vgic.c +++ b/src/arch/armv8/vgic.c @@ -931,8 +931,7 @@ void vgic_ipi_handler(uint32_t event, uint64_t data) if (vm_id != cpu()->vcpu->vm->id) { ERROR("received vgic3 msg target to another vcpu"); - // TODO: need to fetch vcpu from other vm if the taget vm for this - // is not active + // TODO: need to fetch vcpu from other vm if the taget vm for this is not active } switch (event) { @@ -1107,10 +1106,9 @@ void gic_maintenance_handler(irqid_t irq_id) size_t vgic_get_itln(const struct vgic_dscrp* vgic_dscrp) { /** - * By default the guest sees the real platforms interrupt line number - * in the virtual gic. However a user can control this using the - * interrupt_num in the platform description configuration which be at - * least the number of ppis and a multiple of 32. + * By default the guest sees the real platforms interrupt line number in the virtual gic. + * However a user can control this using the interrupt_num in the platform description + * configuration which be at least the number of ppis and a multiple of 32. */ size_t vtyper_itln = bit32_extract(gicd->TYPER, GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN); diff --git a/src/arch/armv8/vgicv3.c b/src/arch/armv8/vgicv3.c index 4a104399b..8a20e8028 100644 --- a/src/arch/armv8/vgicv3.c +++ b/src/arch/armv8/vgicv3.c @@ -264,8 +264,8 @@ bool vgic_icc_sgir_handler(struct emul_access* acc) trgtlist = cpu()->vcpu->vm->cpus & ~(1U << cpu()->vcpu->phys_id); } else { /** - * TODO: we are assuming the vm has a single cluster. Change this - * when adding virtual cluster support. + * TODO: we are assuming the vm has a single cluster. Change this when adding virtual + * cluster support. */ trgtlist = vm_translate_to_pcpu_mask(cpu()->vcpu->vm, ICC_SGIR_TRGLSTFLT(sgir), cpu()->vcpu->vm->cpu_num); diff --git a/src/arch/armv8/vm.c b/src/arch/armv8/vm.c index ad04ed340..317a876b4 100644 --- a/src/arch/armv8/vm.c +++ b/src/arch/armv8/vm.c @@ -68,16 +68,15 @@ void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) sysreg_cntvoff_el2_write(0); /** - * See ARMv8-A ARM section D1.9.1 for registers that must be in a known - * state at reset. + * See ARMv8-A ARM section D1.9.1 for registers that must be in a known state at reset. */ sysreg_sctlr_el1_write(SCTLR_RES1); sysreg_cntkctl_el1_write(0); sysreg_pmcr_el0_write(0); /** - * TODO: ARMv8-A ARM mentions another implementation optional registers - * that reset to a known value. + * TODO: ARMv8-A ARM mentions another implementation optional registers that reset to a known + * value. */ } diff --git a/src/arch/riscv/boot.S b/src/arch/riscv/boot.S index b486d4a6a..0dbdb505f 100644 --- a/src/arch/riscv/boot.S +++ b/src/arch/riscv/boot.S @@ -14,8 +14,8 @@ #define PTE_INDEX_SHIFT(LEVEL) ((9 * (PT_LVLS - 1 - (LEVEL))) + 12) /** - * Calculates the index or offset of a page table entry for given virtual - * address(addr) at a given level of page table. + * Calculates the index or offset of a page table entry for given virtual address(addr) at a given + * level of page table. */ .macro PTE_INDEX_ASM index, addr, level srl \index, \addr, PTE_INDEX_SHIFT(\level) @@ -25,8 +25,8 @@ .endm /** - * Calculates the pointer to a pte given the page table pointer(pt), - * the page table level (levle) and the target virtual address (va) + * Calculates the pointer to a pte given the page table pointer(pt), the page table level (levle) + * and the target virtual address (va) */ .macro PTE_PTR pte, pt, level, va PTE_INDEX_ASM s1, \va, \level @@ -34,8 +34,7 @@ .endm /** - * Creates a page table entry (pte) for a given physical address (pa) - * and set of flags. + * Creates a page table entry (pte) for a given physical address (pa) and set of flags. */ .macro PTE_FILL pte, pa, flags srl \pte, \pa, 2 @@ -61,15 +60,14 @@ _bss_end_sym: .8byte _bss_end .data .align 3 /** - * barrier is used to minimal synchronization in boot - other cores wait for - * bsp to set it. + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. */ _barrier: .8byte 0 /** - * The following code MUST be at the base of the image, as this is bao's entry - * point. Therefore .boot section must also be the first in the linker script. - * DO NOT implement any code before the _reset_handler in this section. + * The following code MUST be at the base of the image, as this is bao's entrypoint. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. */ .section ".boot", "ax" .globl _reset_handler @@ -77,19 +75,17 @@ _barrier: .8byte 0 _reset_handler: /** - * The previous boot stage should pass the following arguments: + * The previous boot stage should pass the following arguments: * a0 -> hart id * a1 -> config binary load addr - * The following registers are reserved to be passed to init function - * as arguments: + * The following registers are reserved to be passed to init function as arguments: * a0 -> hart id * a1 -> contains image base load address * a2 -> config binary load address (originally passed in a1) * - * The remaining code must use t0-t6 as scratchpad registers in the main - * flow and s0-s5 in auxiliary routines. s6-s11 are used to hold constants - * a3-a7 are used as arguments and return values (can be also corrputed in - * auxiliary routines). + * The remaining code must use t0-t6 as scratchpad registers in the main flow and s0-s5 in + * auxiliary routines. s6-s11 are used to hold constants a3-a7 are used as arguments and return + * values (can be also corrputed in auxiliary routines). */ mv a2, a1 @@ -97,8 +93,8 @@ _reset_handler: la s6, extra_allocated_phys_mem /** - * Setup stvec early. In case of we cause an exception in this boot code - * we end up at a known place. + * Setup stvec early. In case of we cause an exception in this boot code we end up at a known + * place. */ la t0, _hyp_trap_vector and t0, t0, ~STVEC_MODE_MSK @@ -106,8 +102,8 @@ _reset_handler: csrw stvec, t0 /** - * Bring processor to known supervisor state: make sure interrupts - * and memory translation are disabled. + * Bring processor to known supervisor state: make sure interrupts and memory translation are + * disabled. */ csrw sstatus, zero diff --git a/src/arch/riscv/cache.c b/src/arch/riscv/cache.c index c860c99a4..621e99129 100644 --- a/src/arch/riscv/cache.c +++ b/src/arch/riscv/cache.c @@ -7,18 +7,16 @@ #include /** - * The riscv spec does not include cache maintenance. There are current - * efforts to define and standardize a set of cache management instructions, - * but for now this is platform dependent. + * The riscv spec does not include cache maintenance. There are current efforts to define and + * standardize a set of cache management instructions, but for now this is platform dependent. */ void cache_arch_enumerate(struct cache* dscrp) { /** - * Currently the typical of way for system software to discover cache - * topology is to read it of a dtb passed by the bootloader. As we are not - * implementing an fdt parser, a platform port must add it to the platform - * description. + * Currently the typical of way for system software to discover cache topology is to read it of + * a dtb passed by the bootloader. As we are not implementing an fdt parser, a platform port + * must add it to the platform description. */ *dscrp = platform.cache; } @@ -26,9 +24,8 @@ void cache_arch_enumerate(struct cache* dscrp) __attribute__((weak)) void cache_flush_range(vaddr_t base, size_t size) { /** - * A platform must define its custom cache flush operation, otherwise - * certain mechanisms such as coloring and hypervisor relocation will - * most probably fail. + * A platform must define its custom cache flush operation, otherwise certain mechanisms such + * as coloring and hypervisor relocation will most probably fail. */ WARNING("trying to flush caches but the operation is not defined for this " "platform"); diff --git a/src/arch/riscv/inc/arch/interrupts.h b/src/arch/riscv/inc/arch/interrupts.h index db04c8ba0..aba7d60ed 100644 --- a/src/arch/riscv/inc/arch/interrupts.h +++ b/src/arch/riscv/inc/arch/interrupts.h @@ -13,10 +13,9 @@ #define APLIC (2) /** - * In riscv, the ipi (software interrupt) and timer interrupts dont actually - * have an ID as their are treated differently from external interrupts - * routed by the external interrupt controller, the PLIC. - * Will define their ids as the ids after the maximum possible in the PLIC. + * In riscv, the ipi (software interrupt) and timer interrupts dont actually have an ID as their + * are treated differently from external interrupts routed by the external interrupt controller, + * the PLIC. Will define their ids as the ids after the maximum possible in the PLIC. */ #define SOFT_INT_ID (IRQC_MAX_INTERRUPTS + 1) #define TIMR_INT_ID (IRQC_MAX_INTERRUPTS + 2) diff --git a/src/arch/riscv/interrupts.c b/src/arch/riscv/interrupts.c index 2fe8616f7..dffb1364a 100644 --- a/src/arch/riscv/interrupts.c +++ b/src/arch/riscv/interrupts.c @@ -77,13 +77,11 @@ void interrupts_arch_handle() case SCAUSE_CODE_STI: interrupts_handle(TIMR_INT_ID); /** - * Clearing the timer pending bit actually has no effect. We could - * re-program the timer to "infinity" but we don't know if the - * handler itself re-programed the timer with a new event. - * Therefore, at this point, we must trust the handler either - * correctly re-programms the timer or disables the interrupt so the - * cpu is not starved by continously triggering the timer interrupt - * (spoiler alert, it does!) + * Clearing the timer pending bit actually has no effect. We could re-program the timer + * to "infinity" but we don't know if the handler itself re-programed the timer with a + * new event. Therefore, at this point, we must trust the handler either correctly + * re-programms the timer or disables the interrupt so the cpu is not starved by + * continously triggering the timer interrupt (spoiler alert, it does!) */ break; case SCAUSE_CODE_SEI: diff --git a/src/arch/riscv/iommu.c b/src/arch/riscv/iommu.c index 007f892e3..085233f30 100644 --- a/src/arch/riscv/iommu.c +++ b/src/arch/riscv/iommu.c @@ -301,23 +301,19 @@ void rv_iommu_init(void) // Read and check caps rv_iommu_check_features(); - // Set fctl.WSI - // We will be first using WSI as IOMMU interrupt mechanism. Then MSIs will - // be included + // Set fctl.WSI We will be first using WSI as IOMMU interrupt mechanism. Then MSIs will be + // included rv_iommu.hw.reg_ptr->fctl = RV_IOMMU_FCTL_DEFAULT; - // Configure interrupt vectors (icvec) - // We use a different vector for each interrupt source CQ and FQ by now + // Configure interrupt vectors (icvec) We use a different vector for each interrupt source CQ + // and FQ by now rv_iommu.hw.reg_ptr->icvec = RV_IOMMU_ICVEC_DEFAULT; // Clear all IP flags (ipsr) rv_iommu.hw.reg_ptr->ipsr = RV_IOMMU_IPSR_CLEAR; - // TODO - // Allocate memory for CQ - // Configure cqb with queue size and base address. Clear cqt - // Allocate IRQ for CQ - // Enable CQ (cqcsr) + // TODO Allocate memory for CQ Configure cqb with queue size and base address. Clear cqt + // Allocate IRQ for CQ Enable CQ (cqcsr) // Allocate memory for FQ (aligned to 4kiB) vaddr_t fq_vaddr = (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct fq_entry) * FQ_N_ENTRIES), @@ -386,8 +382,8 @@ bool rv_iommu_alloc_did(deviceid_t dev_id) } /** - * Program DDT entry with base address of the root PT, VMID and translation - * configuration. Enable DC. + * Program DDT entry with base address of the root PT, VMID and translation configuration. Enable + * DC. * * @dev_id: device_id to index DDT * @vm: VM to which the device is being assigned @@ -410,9 +406,8 @@ void rv_iommu_write_ddt(deviceid_t dev_id, struct vm* vm, paddr_t root_pt) iohgatp |= RV_IOMMU_IOHGATP_SV39X4; rv_iommu.hw.ddt[dev_id].iohgatp = iohgatp; - // TODO: - // Configure first-stage translation. Second-stage only by now - // Configure MSI translation + // TODO: Configure first-stage translation. Second-stage only by now Configure MSI + // translation } spin_unlock(&rv_iommu.ddt_lock); } @@ -426,8 +421,7 @@ void rv_iommu_write_ddt(deviceid_t dev_id, struct vm* vm, paddr_t root_pt) */ bool iommu_arch_init() { - // By checking platform.arch.iommu.base we verify if an IOMMU is present in - // the platform + // By checking platform.arch.iommu.base we verify if an IOMMU is present in the platform if (cpu()->id == CPU_MASTER && platform.arch.iommu.base) { rv_iommu_init(); return true; @@ -437,9 +431,8 @@ bool iommu_arch_init() } /** - * Initialize the DDT entry indexed by device_id for the given VM - * Configure corresponding DDT entry with root PT base addr, VMID (GSCID) and - * device config + * Initialize the DDT entry indexed by device_id for the given VM Configure corresponding DDT entry + * with root PT base addr, VMID (GSCID) and device config * * @vm: VM struct to which the device will be assigned. * @dev_id: device_id of the device to be added. diff --git a/src/arch/riscv/irqc/aia/inc/aplic.h b/src/arch/riscv/irqc/aia/inc/aplic.h index c0c518932..618ab197f 100644 --- a/src/arch/riscv/irqc/aia/inc/aplic.h +++ b/src/arch/riscv/irqc/aia/inc/aplic.h @@ -135,8 +135,8 @@ uint32_t aplic_get_sourcecfg(irqid_t intp_id); void aplic_set_pend(irqid_t intp_id); /** - * @brief Potentially modifies the pending bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. + * @brief Potentially modifies the pending bits for interrupt sources reg_indx × 32 through + * reg_indx × 32 + 31. * * @param reg_indx register index * @param reg_val register value to be written. @@ -153,8 +153,7 @@ void aplic_set_pend_reg(size_t reg_indx, uint32_t reg_val); bool aplic_get_pend(irqid_t intp_id); /** - * @brief Reads the pending bits for interrupt sources - * reg_indx × 32 through reg_indx × 32 + 31. + * @brief Reads the pending bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. * * @param reg_indx register index * @return a 32 bit value containing interrupts pending state for reg_indx. @@ -169,8 +168,7 @@ uint32_t aplic_get_pend_reg(size_t reg_indx); void aplic_clr_pend(irqid_t intp_id); /** - * @brief Modifies the pending bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. + * @brief Modifies the pending bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. * * @param reg_indx register index * @return register value to be written. @@ -178,8 +176,8 @@ void aplic_clr_pend(irqid_t intp_id); void aplic_clr_pend_reg(size_t reg_indx, uint32_t reg_val); /** - * @brief Read the current rectified value for interrupt sources - * reg_indx × 32 through reg_indx × 32 + 31. + * @brief Read the current rectified value for interrupt sources reg_indx × 32 through reg_indx × + * 32 + 31. * * @param reg_indx register index * @return a 32 bit value containing interrupts rectified state for reg_indx. @@ -194,8 +192,7 @@ uint32_t aplic_get_inclrip_reg(size_t reg_indx); void aplic_set_enbl(irqid_t intp_id); /** - * @brief Modifies the enable bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. + * @brief Modifies the enable bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. * * @param reg_indx register index * @param reg_val register value to be written. @@ -219,8 +216,7 @@ bool aplic_get_enbl(irqid_t intp_id); void aplic_clr_enbl(irqid_t intp_id); /** - * @brief Modifies the enable bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. + * @brief Modifies the enable bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. * * @param reg_indx register index * @param reg_val register value to be written. @@ -262,8 +258,8 @@ cpuid_t aplic_get_target_hart(irqid_t intp_id); /** * @brief Returns the highest pending and enabled interrupt id. * - * Claimi has the same value as topi. However, reading claimi has the side - * effect of clearing the pending bit for the reported interrupt identity. + * Claimi has the same value as topi. However, reading claimi has the side effect of clearing the + * pending bit for the reported interrupt identity. * * @param idc_id IDC to read and clear the pending-bit the highest-priority * @return uint32_t returns the interrupt identity and interrupt priority. diff --git a/src/arch/riscv/irqc/aia/inc/vaplic.h b/src/arch/riscv/irqc/aia/inc/vaplic.h index 30fba4d54..f8083f30b 100644 --- a/src/arch/riscv/irqc/aia/inc/vaplic.h +++ b/src/arch/riscv/irqc/aia/inc/vaplic.h @@ -49,15 +49,14 @@ void vaplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp); * @param vcpu Virtual CPU that will inject the interrupt * @param id interrupt identification * - * The virtual CPU passed by arg. may not be the CPU to which the interrupt - * is associated. In that case the vcpu will send a msg to the target cpu. + * The virtual CPU passed by arg. may not be the CPU to which the interrupt is associated. In that + * case the vcpu will send a msg to the target cpu. */ void vaplic_inject(struct vcpu* vcpu, irqid_t id); /** - * @brief For a given virtual machine and an interrupt, associate this - * interrupt with the physical one. Thus, interrupt id is mapped - * to the physical id source. + * @brief For a given virtual machine and an interrupt, associate this interrupt with the physical + * one. Thus, interrupt id is mapped to the physical id source. * * @param vm Virtual machine to associate the 1-1 virt/phys interrupt * @param id interrupt identification to associate. diff --git a/src/arch/riscv/irqc/aia/vaplic.c b/src/arch/riscv/irqc/aia/vaplic.c index b8893b783..321d84640 100644 --- a/src/arch/riscv/irqc/aia/vaplic.c +++ b/src/arch/riscv/irqc/aia/vaplic.c @@ -144,8 +144,7 @@ static bool vaplic_get_active(struct vcpu* vcpu, irqid_t intp_id) /** * @brief Set a given interrupt as pending * - * @pre This function should only be called by a function that - * has taken the lock. + * @pre This function should only be called by a function that has taken the lock. * * @param vcpu virtual cpu * @param intp_id interrupt id @@ -166,8 +165,7 @@ static bool vaplic_set_pend(struct vcpu* vcpu, irqid_t intp_id) } /** - * @brief Updates the topi register with with the - * highest pend & en interrupt id + * @brief Updates the topi register with with the highest pend & en interrupt id * * @param vcpu virtual cpu * @return true if topi was updated, requiring the handling of the interrupt @@ -231,9 +229,8 @@ static void vaplic_update_hart_line(struct vcpu* vcpu, vcpuid_t vhart_index) cpuid_t pcpu_id = vaplic_vcpuid_to_pcpuid(vcpu, vhart_index); /** - * If the current cpu is the targeting cpu, signal the intp - * to the hart - * Else, send a mensage to the targeting cpu + * If the current cpu is the targeting cpu, signal the intp to the hart. Else, send a mensage + * to the targeting cpu */ if (pcpu_id == cpu()->id) { if (vaplic_update_topi(vcpu)) { @@ -251,9 +248,9 @@ static void vaplic_update_hart_line(struct vcpu* vcpu, vcpuid_t vhart_index) * @brief Triggers the hart/harts interrupt line update. * * @param vcpu virtual cpu - * @param vhart_index virtual hart to update the interrupt line. - * If UPDATE_ALL_HARTS were passed, this function will trigger - * the interrupt line update to all virtual harts running in this vm. + * @param vhart_index virtual hart to update the interrupt line. If UPDATE_ALL_HARTS were passed, + * this function will trigger the interrupt line update to all virtual harts running in this + * vm. */ static void vaplic_update_hart(struct vcpu* vcpu, int16_t vhart_index) { @@ -346,8 +343,8 @@ static void vaplic_set_sourcecfg(struct vcpu* vcpu, irqid_t intp_id, uint32_t ne spin_lock(&vaplic->lock); if (intp_id > 0 && intp_id < APLIC_MAX_INTERRUPTS && vaplic_get_sourcecfg(vcpu, intp_id) != new_val) { - /** If intp is being delegated make whole reg 0. - * This happens because a S domain is always a leaf. */ + /** If intp is being delegated make whole reg 0. This happens because a S domain is always + * a leaf. */ new_val &= (new_val & APLIC_SRCCFG_D) ? 0 : APLIC_SRCCFG_SM; /** If SM is reserved make intp inactive */ @@ -652,10 +649,8 @@ static void vaplic_set_target(struct vcpu* vcpu, irqid_t intp_id, uint32_t new_v spin_lock(&vaplic->lock); if (pcpu_id == INVALID_CPUID) { - /** If the hart index is invalid, make it vcpu = 0 - * and read the new pcpu. - * Software should not write anything other than legal - * values to such a field */ + /** If the hart index is invalid, make it vcpu = 0 and read the new pcpu. Software should + * not write anything other than legal values to such a field */ hart_index = 0; pcpu_id = vm_translate_to_pcpuid(vcpu->vm, hart_index); } @@ -832,8 +827,8 @@ static uint32_t vaplic_get_topi(struct vcpu* vcpu, idcid_t idc_id) /** * @brief Returns the highest pending and enabled interrupt. * - * Claimi has the same value as topi. However, reading claimi has the side - * effect of clearing the pending bit for the reported interrupt identity. + * Claimi has the same value as topi. However, reading claimi has the side effect of clearing the + * pending bit for the reported interrupt identity. * * @param vcpu virtual CPU * @param idc_id idc identifier @@ -862,8 +857,7 @@ static uint32_t vaplic_get_claimi(struct vcpu* vcpu, idcid_t idc_id) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_domaincfg_access(struct emul_access* acc) { @@ -879,8 +873,7 @@ static void vaplic_emul_domaincfg_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_srccfg_access(struct emul_access* acc) { @@ -897,8 +890,7 @@ static void vaplic_emul_srccfg_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_setip_access(struct emul_access* acc) { @@ -915,8 +907,7 @@ static void vaplic_emul_setip_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_setipnum_access(struct emul_access* acc) { @@ -930,8 +921,7 @@ static void vaplic_emul_setipnum_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_in_clrip_access(struct emul_access* acc) { @@ -948,8 +938,7 @@ static void vaplic_emul_in_clrip_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_clripnum_access(struct emul_access* acc) { @@ -963,8 +952,7 @@ static void vaplic_emul_clripnum_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_setie_access(struct emul_access* acc) { @@ -981,8 +969,7 @@ static void vaplic_emul_setie_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_setienum_access(struct emul_access* acc) { @@ -996,8 +983,7 @@ static void vaplic_emul_setienum_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_clrie_access(struct emul_access* acc) { @@ -1012,8 +998,7 @@ static void vaplic_emul_clrie_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_clrienum_access(struct emul_access* acc) { @@ -1027,8 +1012,7 @@ static void vaplic_emul_clrienum_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_target_access(struct emul_access* acc) { @@ -1045,8 +1029,7 @@ static void vaplic_emul_target_access(struct emul_access* acc) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_idelivery_access(struct emul_access* acc, idcid_t idc_id) { @@ -1062,8 +1045,7 @@ static void vaplic_emul_idelivery_access(struct emul_access* acc, idcid_t idc_id * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_iforce_access(struct emul_access* acc, idcid_t idc_id) { @@ -1079,8 +1061,7 @@ static void vaplic_emul_iforce_access(struct emul_access* acc, idcid_t idc_id) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_ithreshold_access(struct emul_access* acc, idcid_t idc_id) { @@ -1096,8 +1077,7 @@ static void vaplic_emul_ithreshold_access(struct emul_access* acc, idcid_t idc_i * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_topi_access(struct emul_access* acc, idcid_t idc_id) { @@ -1111,8 +1091,7 @@ static void vaplic_emul_topi_access(struct emul_access* acc, idcid_t idc_id) * * @param acc access information * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * It determines whether it needs to call the write or read funcion for the choosen register. */ static void vaplic_emul_claimi_access(struct emul_access* acc, idcid_t idc_id) { diff --git a/src/arch/riscv/irqc/plic/plic.c b/src/arch/riscv/irqc/plic/plic.c index f597f6fb9..2b41cc3a5 100644 --- a/src/arch/riscv/irqc/plic/plic.c +++ b/src/arch/riscv/irqc/plic/plic.c @@ -149,8 +149,8 @@ void plic_handle() } /** - * Context organization is spec-out by the vendor, this is the default - * mapping found in sifive's plic. + * Context organization is spec-out by the vendor, this is the default mapping found in sifive's + * plic. */ __attribute__((weak)) int plic_plat_cntxt_to_id(struct plic_cntxt cntxt) diff --git a/src/arch/riscv/mem.c b/src/arch/riscv/mem.c index ea3d3dacc..bb5eb8a2e 100644 --- a/src/arch/riscv/mem.c +++ b/src/arch/riscv/mem.c @@ -16,9 +16,8 @@ static inline void as_map_physical_identity(struct addr_space* as) pte_t* pt = as->pt.root; /** - * Create identity mapping of existing physical memory regions using - * the largest pages possible pte (in riscv this is always at level 0 - * pt). + * Create identity mapping of existing physical memory regions using the largest pages + * possible pte (in riscv this is always at level 0 pt). */ for (size_t i = 0; i < platform.region_num; i++) { diff --git a/src/arch/riscv/relocate.S b/src/arch/riscv/relocate.S index 4d1a5d943..fbe0c94a2 100644 --- a/src/arch/riscv/relocate.S +++ b/src/arch/riscv/relocate.S @@ -60,9 +60,8 @@ switch_space: csrw satp, a1 /** - * Invalidate TLB: we can do this directly here without sbi support - * because we don't really need any shootdown as all harts must go - * through here. + * Invalidate TLB: we can do this directly here without sbi support because we don't really + * need any shootdown as all harts must go through here. */ sfence.vma diff --git a/src/arch/riscv/sbi.c b/src/arch/riscv/sbi.c index 4936bb525..e880d4118 100644 --- a/src/arch/riscv/sbi.c +++ b/src/arch/riscv/sbi.c @@ -41,8 +41,8 @@ #define SBI_REMOTE_HFENCE_VVMA_ASID_FID (6) /** - * For now we're defining bao specific ecalls, ie, hypercall, under the - * experimental extension id space. + * For now we're defining bao specific ecalls, ie, hypercall, under the experimental extension id + * space. */ #define SBI_EXTID_BAO (0x08000ba0) diff --git a/src/arch/riscv/sync_exceptions.c b/src/arch/riscv/sync_exceptions.c index 0f0746381..28debf687 100644 --- a/src/arch/riscv/sync_exceptions.c +++ b/src/arch/riscv/sync_exceptions.c @@ -30,8 +30,8 @@ static uint32_t read_ins(uintptr_t ins_addr) } /** - * Read 16 bits at a time to make sure the access is aligned. If - * the instruction is not compressed, read the following 16-bits. + * Read 16 bits at a time to make sure the access is aligned. If the instruction is not + * compressed, read the following 16-bits. */ ins = hlvxhu(ins_addr); if ((ins & 0b11) == 3) { @@ -88,9 +88,8 @@ size_t guest_page_fault_handler() size_t ins_size; if (ins == 0) { /** - * If htinst does not provide information about the trap, - * we must read the instruction from the guest's memory - * manually. + * If htinst does not provide information about the trap, we must read the instruction + * from the guest's memory manually. */ vaddr_t ins_addr = CSRR(sepc); ins = read_ins(ins_addr); @@ -100,9 +99,8 @@ size_t guest_page_fault_handler() ERROR("fault on 1st stage page table walk"); } else { /** - * If htinst is valid and is not a pseudo isntruction make sure - * the opcode is valid even if it was a compressed instruction, - * but before save the real instruction size. + * If htinst is valid and is not a pseudo isntruction make sure the opcode is valid + * even if it was a compressed instruction, but before save the real instruction size. */ ins_size = TINST_INS_SIZE(ins); ins = ins | 0b10; @@ -115,8 +113,7 @@ size_t guest_page_fault_handler() emul.addr = addr; /** - * TODO: check if the access is aligned. - * If not, inject an exception in the vm. + * TODO: check if the access is aligned. If not, inject an exception in the vm. */ if (handler(&emul)) { @@ -146,8 +143,7 @@ void sync_exception_handler() internal_exception_handler(&cpu()->vcpu->regs.x[0]); } - // TODO: Do we need to check call comes from VS-mode and not VU-mode - // or U-mode ? + // TODO: Do we need to check call comes from VS-mode and not VU-mode or U-mode ? if (_scause < sync_handler_table_size && sync_handler_table[_scause]) { pc_step = sync_handler_table[_scause](); diff --git a/src/arch/riscv/vmm.c b/src/arch/riscv/vmm.c index e7fc40f9d..45826ee04 100644 --- a/src/arch/riscv/vmm.c +++ b/src/arch/riscv/vmm.c @@ -9,22 +9,19 @@ void vmm_arch_init() { /** - * At this point, we should make sure misa's H bit is set (at least by - * reading it). However, current SBI does not allow us to even read it. - * So we assume it is set - if not, the first acess to an hypervisor - * register will set an illegal inst fault. + * At this point, we should make sure misa's H bit is set (at least by reading it). However, + * current SBI does not allow us to even read it. So we assume it is set - if not, the first + * acess to an hypervisor register will set an illegal inst fault. */ /** - * Delegate all interrupts and exceptions not meant to be dealt by - * the hypervisor + * Delegate all interrupts and exceptions not meant to be dealt by the hypervisor */ CSRW(CSR_HIDELEG, HIDELEG_VSSI | HIDELEG_VSTI | HIDELEG_VSEI); CSRW(CSR_HEDELEG, HEDELEG_ECU | HEDELEG_IPF | HEDELEG_LPF | HEDELEG_SPF); /** - * TODO: consider delegating other exceptions e.g. breakpoint or ins - * misaligned + * TODO: consider delegating other exceptions e.g. breakpoint or ins misaligned */ } diff --git a/src/core/cpu.c b/src/core/cpu.c index 3d0eafa9c..aa6bfe8e8 100644 --- a/src/core/cpu.c +++ b/src/core/cpu.c @@ -93,9 +93,8 @@ void cpu_idle() cpu_arch_idle(); /** - * Should not return here. - * cpu should "wake up" from idle in cpu_idle_wakeup - * with a rewinded stack. + * Should not return here. cpu should "wake up" from idle in cpu_idle_wakeup with a rewinded + * stack. */ ERROR("Spurious idle wake up"); } diff --git a/src/core/inc/config.h b/src/core/inc/config.h index 28eee1f53..bd8c0d8ad 100644 --- a/src/core/inc/config.h +++ b/src/core/inc/config.h @@ -12,9 +12,8 @@ #include #ifndef GENERATING_DEFS -// clang-format wont correctly recognize the syntax of assembly strings -// interleaved with stringified tokens via XSTR and will format it in an -// unreadable way +// clang-format wont correctly recognize the syntax of assembly strings interleaved with +// stringified tokens via XSTR and will format it in an unreadable way // clang-format off #define VM_IMAGE(img_name, img_path) \ extern uint8_t _##img_name##_vm_size; \ @@ -55,8 +54,8 @@ struct vm_config { /** - * To setup the image field either the VM_IMAGE_BUILTIN or VM_IMAGE_LOADED - * macros should be used. + * To setup the image field either the VM_IMAGE_BUILTIN or VM_IMAGE_LOADED macros should be + * used. */ struct { /* Image load address in VM's address space */ @@ -66,8 +65,7 @@ struct vm_config { /* Image size */ size_t size; /** - * Informs the hypervisor if the VM image is to be loaded - * separately by a bootloader. + * Informs the hypervisor if the VM image is to be loaded separately by a bootloader. */ bool separately_loaded; /* Dont copy the image */ @@ -77,21 +75,21 @@ struct vm_config { /* Entry point address in VM's address space */ vaddr_t entry; /** - * A bitmap signaling the preferred physical cpus assigned to the VM. - * If this value is each mutual exclusive for all the VMs, this field - * allows to direcly assign specific physical cpus to the VM. + * A bitmap signaling the preferred physical cpus assigned to the VM. If this value is each + * mutual exclusive for all the VMs, this field allows to direcly assign specific physical cpus + * to the VM. */ cpumap_t cpu_affinity; /** - * A bitmap for the assigned colors of the VM. This value is truncated - * depending on the number of available colors calculated at runtime + * A bitmap for the assigned colors of the VM. This value is truncated depending on the number + * of available colors calculated at runtime */ colormap_t colors; /** - * A description of the virtual platform available to the guest, i.e., - * the virtual machine itself. + * A description of the virtual platform available to the guest, i.e., the virtual machine + * itself. */ struct vm_platform platform; @@ -100,11 +98,10 @@ struct vm_config { extern struct config { struct { /** - * Only meaningful for MPU-based platforms. The hypervisor base address - * will default to the platform's base address, i.e., the base address - * of the first region defined in the target platform's description. - * If the user wishes to relocate it to another address, they must set - * relocate to true and provide the new base address. + * Only meaningful for MPU-based platforms. The hypervisor base address will default to the + * platform's base address, i.e., the base address of the first region defined in the + * target platform's description. If the user wishes to relocate it to another address, + * they must set relocate to true and provide the new base address. */ bool relocate; paddr_t base_addr; diff --git a/src/core/inc/types.h b/src/core/inc/types.h index 10288b032..99ff33de5 100644 --- a/src/core/inc/types.h +++ b/src/core/inc/types.h @@ -12,10 +12,9 @@ #include /** - * We assume LP64 and ILP32 for 64- and 32-bit architectures, respectively, as - * throughout the code `unsigned long` is the type used for values of the - * architecture's word width. This is just a sanity check to verify this is the - * ABI the compiler is effectively using. + * We assume LP64 and ILP32 for 64- and 32-bit architectures, respectively, as throughout the code + * `unsigned long` is the type used for values of the architecture's word width. This is just a + * sanity check to verify this is the ABI the compiler is effectively using. */ #if UINTPTR_WIDTH != ULONG_WIDTH #error "Unsigned long type width is not the same as the architecture´s word with" diff --git a/src/core/interrupts.c b/src/core/interrupts.c index e74e3d21c..c9120430d 100644 --- a/src/core/interrupts.c +++ b/src/core/interrupts.c @@ -55,8 +55,8 @@ static inline bool interrupt_assigned_to_hyp(irqid_t int_id) } /** - * @brief For a given interrupt intp_id, return if this interrupt - * is already reserved by VMM or any VM + * @brief For a given interrupt intp_id, return if this interrupt is already reserved by VMM or any + * VM * * @param int_id interrupt ID * @return true if interrupt is reserved diff --git a/src/core/mem.c b/src/core/mem.c index aab94c166..34f1d4a01 100644 --- a/src/core/mem.c +++ b/src/core/mem.c @@ -32,8 +32,7 @@ bool pp_alloc(struct page_pool* pool, size_t num_pages, bool aligned, struct ppa spin_lock(&pool->lock); /** - * If we need a contigous segment aligned to its size, lets start - * at an already aligned index. + * If we need a contigous segment aligned to its size, lets start at an already aligned index. */ size_t start = aligned ? pool->base / PAGE_SIZE % num_pages : 0; size_t curr = pool->last + ((pool->last + start) % num_pages); @@ -49,8 +48,8 @@ bool pp_alloc(struct page_pool* pool, size_t num_pages, bool aligned, struct ppa if (bit < 0) { /** - * No num_page page sement was found. If this is the first - * iteration set position to 0 to start next search from index + * No num_page page sement was found. If this is the first iteration set position + * to 0 to start next search from index * 0. */ size_t next_aligned = @@ -59,15 +58,14 @@ bool pp_alloc(struct page_pool* pool, size_t num_pages, bool aligned, struct ppa break; } else if (aligned && (((bit + start) % num_pages) != 0)) { /** - * If we're looking for an aligned segment and the found - * contigous segment is not aligned, start the search again - * from the last aligned index + * If we're looking for an aligned segment and the found contigous segment is not + * aligned, start the search again from the last aligned index */ curr = bit + ((bit + start) % num_pages); } else { /** - * We've found our pages. Fill output argument info, mark - * them as allocated, and update page pool bookkeeping. + * We've found our pages. Fill output argument info, mark them as allocated, and + * update page pool bookkeeping. */ ppages->base = pool->base + (bit * PAGE_SIZE); ppages->num_pages = num_pages; @@ -280,12 +278,11 @@ bool mem_reserve_physical_memory(struct page_pool* pool) size_t n_pg = NUM_PAGES(vm_cfg->image.size); struct ppages ppages = mem_ppages_get(vm_cfg->image.load_addr, n_pg); - // If the vm image is part of a statically allocated region of the same - // vm, we defer the reservation of this memory to when we reserve the - // physical region below. Note that this not allow partial overlaps. If - // the image must be entirely inside a statically allocated region, or - // completely outside of it. This avoid overcamplicating the reservation - // logic while still covering all the useful use cases. + // If the vm image is part of a statically allocated region of the same vm, we defer the + // reservation of this memory to when we reserve the physical region below. Note that this + // not allow partial overlaps. If the image must be entirely inside a statically allocated + // region, or completely outside of it. This avoid overcamplicating the reservation logic + // while still covering all the useful use cases. if (mem_vm_img_in_phys_rgn(vm_cfg)) { continue; } diff --git a/src/core/mmu/io.c b/src/core/mmu/io.c index 2b0f75a54..15b8f147f 100644 --- a/src/core/mmu/io.c +++ b/src/core/mmu/io.c @@ -28,8 +28,7 @@ bool io_vm_add_device(struct vm* vm, deviceid_t dev_id) bool res = false; /* - * If dev_id == 0 assume global mask includes - * the relevant devices for this VM. + * If dev_id == 0 assume global mask includes the relevant devices for this VM. * * Assume there's no device id = 0 */ if (dev_id != 0) { diff --git a/src/core/mmu/mem.c b/src/core/mmu/mem.c index 225c30929..1473d2ad7 100644 --- a/src/core/mmu/mem.c +++ b/src/core/mmu/mem.c @@ -22,8 +22,8 @@ extern uint8_t _image_start, _image_load_end, _image_end, _dmem_phys_beg, _dmem_ void switch_space(struct cpu*, paddr_t); /** - * An important note about sections its that they must have diferent entries - * at the root page table. + * An important note about sections its that they must have diferent entries at the root page + * table. */ struct section { @@ -106,16 +106,15 @@ bool pp_alloc_clr(struct page_pool* pool, size_t n, colormap_t colors, struct pp spin_lock(&pool->lock); /** - * Lets start the search at the first available color after the last - * known free position to the top of the pool. + * Lets start the search at the first available color after the last known free position to the + * top of the pool. */ size_t index = pp_next_clr(pool->base, pool->last, colors); size_t top = pool->size; /** - * Two iterations. One starting from the last known free page, - * other starting from the beggining of page pool to the start of the - * previous iteration. + * Two iterations. One starting from the last known free page, other starting from the + * beggining of page pool to the start of the previous iteration. */ for (size_t i = 0; i < 2 && !ok; i++) { while ((allocated < n) && (index < top)) { @@ -128,9 +127,8 @@ bool pp_alloc_clr(struct page_pool* pool, size_t n, colormap_t colors, struct pp first_index = index; /** - * Count the number of free pages contigous on the target - * color segement until n pages are found or we reach top page - * of the search. + * Count the number of free pages contigous on the target color segement until n pages + * are found or we reach top page of the search. */ while ((index < top) && (bitmap_get(pool->bitmap, index) == 0) && (allocated < n)) { allocated++; @@ -142,9 +140,8 @@ bool pp_alloc_clr(struct page_pool* pool, size_t n, colormap_t colors, struct pp if (allocated == n) { /** - * We've found n contigous free pages that fit the color pattern, - * Fill the output ppage arg, mark the pages as allocated and - * update page pool internal state. + * We've found n contigous free pages that fit the color pattern, Fill the output ppage + * arg, mark the pages as allocated and update page pool internal state. */ ppages->num_pages = n; ppages->base = pool->base + (first_index * PAGE_SIZE); @@ -158,9 +155,8 @@ bool pp_alloc_clr(struct page_pool* pool, size_t n, colormap_t colors, struct pp break; } else { /** - * If this is the first iteration, setup index and top to search - * from base of the page pool until the previous iteration start - * point + * If this is the first iteration, setup index and top to search from base of the page + * pool until the previous iteration start point */ index = 0; } @@ -234,8 +230,7 @@ static void mem_expand_pte(struct addr_space* as, vaddr_t va, size_t lvl) pte_t* pte = pt_get_pte(&as->pt, lvl, va); /** - * only can expand if the pte exists and it isnt pointing to - * a next level table already. + * only can expand if the pte exists and it isnt pointing to a next level table already. */ if (pte != NULL && !pte_table(&as->pt, pte, lvl)) { pte_t pte_val = *pte; // save the original pte @@ -245,23 +240,20 @@ static void mem_expand_pte(struct addr_space* as, vaddr_t va, size_t lvl) if (vld || rsv) { /** - * If this was valid before and it wasn't a table, it must - * have been a superpage, so fill the new expanded table to - * have the same mappings; + * If this was valid before and it wasn't a table, it must have been a superpage, so + * fill the new expanded table to have the same mappings; */ /** - * Invalidate the old TLB entries with superpage entries. - * This means that from now on to the end of the function, - * the original spaced mapped by the entry will be unmaped. - * Therefore this function cannot be call on the entry mapping - * hypervisor code or data used in it (including stack). + * Invalidate the old TLB entries with superpage entries. This means that from now on + * to the end of the function, the original spaced mapped by the entry will be unmaped. + * Therefore this function cannot be call on the entry mapping hypervisor code or data + * used in it (including stack). */ tlb_inv_va(&cpu()->as, va); /** - * Now traverse the new next level page table to replicate the - * original mapping. + * Now traverse the new next level page table to replicate the original mapping. */ lvl++; @@ -293,8 +285,8 @@ static void mem_inflate_pt(struct addr_space* as, vaddr_t va, size_t length) /* Must have lock on as and va section to call */ /** - * For each level in the pt, expand each entry in the specified range - * as a next level page table. + * For each level in the pt, expand each entry in the specified range as a next level page + * table. */ for (size_t lvl = 0; lvl < as->pt.dscr->lvls - 1; lvl++) { vaddr_t vaddr = va; @@ -341,9 +333,9 @@ vaddr_t mem_alloc_vpage(struct addr_space* as, enum AS_SEC section, vaddr_t at, } while (count < n && !failed) { - // Check if there is still enough space in the address space. - // The corner case of top being the highest address in the address - // space and the target address being 0 is handled separate + // Check if there is still enough space in the address space. The corner case of top being + // the highest address in the address space and the target address being 0 is handled + // separate size_t full_as = (addr == 0) && (top == MAX_VA); if (!full_as && (((top + 1 - addr) / PAGE_SIZE) < n)) { vpage = INVALID_VA; @@ -478,8 +470,7 @@ void mem_unmap(struct addr_space* as, vaddr_t at, size_t num_pages, bool free_pp } /** - * TODO: check if the current pt is now empty and if so, - * free it too up to the root. + * TODO: check if the current pt is now empty and if so, free it too up to the root. */ } } @@ -510,8 +501,7 @@ bool mem_map(struct addr_space* as, vaddr_t va, struct ppages* ppages, size_t nu } /** - * TODO check if entry is reserved. Unrolling mapping if something - * goes wrong. + * TODO check if entry is reserved. Unrolling mapping if something goes wrong. */ struct ppages temp_ppages; @@ -603,9 +593,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz } /** - * Count how many pages are not colored in original images. - * Allocate the necessary colored pages. - * Mapped onto hypervisor address space. + * Count how many pages are not colored in original images. Allocate the necessary colored + * pages. Mapped onto hypervisor address space. */ size_t reclrd_num = num_pages / (COLOR_NUM * COLOR_SIZE) * COLOR_SIZE * bit_count(~(as->colors & BIT_MASK(0, COLOR_NUM))); @@ -617,8 +606,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz } /** - * If the address space was not assigned any specific color, - * or there are no pages to recolor defer to vanilla mapping. + * If the address space was not assigned any specific color, or there are no pages to recolor + * defer to vanilla mapping. */ if (all_clrs(as->colors) || (reclrd_num == 0)) { return mem_map(as, va, ppages, num_pages, flags); @@ -642,8 +631,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz size_t index = 0; /** - * Inflate reserved page tables to the last level. This assumes - * coloring always needs the finest grained mapping possible. + * Inflate reserved page tables to the last level. This assumes coloring always needs the + * finest grained mapping possible. */ mem_inflate_pt(as, vaddr, num_pages * PAGE_SIZE); @@ -651,8 +640,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz pte = pt_get_pte(&as->pt, as->pt.dscr->lvls - 1, vaddr); /** - * If image page is already color, just map it. - * Otherwise first copy it to the previously allocated pages. + * If image page is already color, just map it. Otherwise first copy it to the previously + * allocated pages. */ if (bit_get(as->colors, ((i + clr_offset) / COLOR_SIZE % COLOR_NUM))) { pte_set(pte, paddr, PTE_PAGE, flags); @@ -671,8 +660,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz } /** - * Flush the newly allocated colored pages to which parts of the - * image was copied, and might stayed in the cache system. + * Flush the newly allocated colored pages to which parts of the image was copied, and might + * stayed in the cache system. */ cache_flush_range(reclrd_va_base, reclrd_num * PAGE_SIZE); @@ -734,16 +723,16 @@ void* copy_space(void* base, const size_t size, struct ppages* pages) } /** - * To have the true benefits of coloring it's necessary that not only the guest - * images, but also the hypervisor itself, are colored. + * To have the true benefits of coloring it's necessary that not only the guest images, but also + * the hypervisor itself, are colored. * - * Bao is coloring itself by copying everything that has been allocated until - * this point in a new colored space, jumping into this new region and then - * then deleting all that was allocated before. + * Bao is coloring itself by copying everything that has been allocated until this point in a new + * colored space, jumping into this new region and then then deleting all that was allocated + * before. * - * Some regions need to be aligned due to some ARM restraint with the pagetable - * structure, so true coloring is actually never achieved. The drawbacks of - * this limitation are yet to be seen, and are in need of more testing. + * Some regions need to be aligned due to some ARM restraint with the pagetable structure, so true + * coloring is actually never achieved. The drawbacks of this limitation are yet to be seen, and + * are in need of more testing. */ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_region) { @@ -771,9 +760,8 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio /* * Copy the CPU space into a colored region. * - * It's not possible to simply copy the CPU space as-is, since there are a - * few pointers and structures that would point to the old, non-colored - * data. + * It's not possible to simply copy the CPU space as-is, since there are a few pointers and + * structures that would point to the old, non-colored data. * * the new CPU region is created, cleaned, prepared and finally mapped. */ @@ -787,10 +775,9 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio mem_map(&cpu_new->as, va, &p_cpu, NUM_PAGES(sizeof(struct cpu)), PTE_HYP_FLAGS); /* - * Also, map the root page table in the new address space and keep both the - * virtual address and physical address in local variables as they will be - * needed later to perform the address space switch and new address space - * initialization. + * Also, map the root page table in the new address space and keep both the virtual address and + * physical address in local variables as they will be needed later to perform the address + * space switch and new address space initialization. */ paddr_t p_root_pt_addr; vaddr_t v_root_pt_addr; @@ -804,12 +791,11 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio mem_map(&cpu_new->as, v_root_pt_addr, &p_root_pt_pages, root_pt_num_pages, PTE_HYP_FLAGS); /* - * Copy the Hypervisor image and root page pool bitmap into a colored - * region. + * Copy the Hypervisor image and root page pool bitmap into a colored region. * - * CPU_MASTER allocates, copies and maps the image and the root page pool - * bitmap on a shared space, whilst other CPUs only have to copy the image - * from the CPU_MASTER in order to be able to access it. + * CPU_MASTER allocates, copies and maps the image and the root page pool bitmap on a shared + * space, whilst other CPUs only have to copy the image from the CPU_MASTER in order to be able + * to access it. */ if (cpu()->id == CPU_MASTER) { copy_space(&_image_start, image_size, &p_image); @@ -833,12 +819,10 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio cpu_sync_barrier(&cpu_glb_sync); /* - * CPU_MASTER will also take care of mapping the configuration onto the new - * space. + * CPU_MASTER will also take care of mapping the configuration onto the new space. * - * The root page pool bitmap tracks all the physical allocation, so it - * needs to be the last thing to be copied, as after that, no physical - * allocation will be tracked. + * The root page pool bitmap tracks all the physical allocation, so it needs to be the last + * thing to be copied, as after that, no physical allocation will be tracked. */ if (cpu()->id == CPU_MASTER) { /* Copy root pool bitmap */ @@ -857,21 +841,19 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio switch_space(cpu_new, p_root_pt_addr); /** - * Make sure the new physical pages containing image and cpu are flushed - * to main memmory + * Make sure the new physical pages containing image and cpu are flushed to main memmory */ cache_flush_range((vaddr_t)&_image_start, image_size); cache_flush_range((vaddr_t)&_cpu_private_beg, sizeof(struct cpu)); /** - * Bao's code from here's on still uses the static global variables, so - * they need to be updated. + * Bao's code from here's on still uses the static global variables, so they need to be + * updated. * - * The synchronization objects are in an inconsistent state, and they need - * to be re-initialized before they get used again, so CPUs need a way to - * communicate between themselves without an explicit barrier. To - * accomplish this a static global variable is used. + * The synchronization objects are in an inconsistent state, and they need to be re-initialized + * before they get used again, so CPUs need a way to communicate between themselves without an + * explicit barrier. To accomplish this a static global variable is used. */ if (cpu()->id == CPU_MASTER) { cpu_sync_init(&cpu_glb_sync, platform.cpu_num); @@ -885,8 +867,8 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio /* * Clear the old region that have been copied. * - * CPU space regions and Hypervisor image region are contingent, starting - * from `load_addr`. The bitmap region is on top of the root pool region. + * CPU space regions and Hypervisor image region are contingent, starting from `load_addr`. The + * bitmap region is on top of the root pool region. */ if (cpu()->id == CPU_MASTER) { p_image = mem_ppages_get(load_addr, NUM_PAGES(image_load_size)); diff --git a/src/core/mpu/config.c b/src/core/mpu/config.c index 7f2f5407d..231830288 100644 --- a/src/core/mpu/config.c +++ b/src/core/mpu/config.c @@ -11,8 +11,7 @@ void config_mem_prot_init(paddr_t load_addr) for (size_t i = 0; i < config.vmlist_size; i++) { for (size_t j = 0; j < config.vmlist[i].platform.region_num; j++) { /** - * On MPU systems all VM regions must be physical regions with - * 1-to-1 mapping. + * On MPU systems all VM regions must be physical regions with 1-to-1 mapping. */ config.vmlist[i].platform.regions[j].place_phys = true; vaddr_t region_base = config.vmlist[i].platform.regions[j].base; @@ -21,8 +20,8 @@ void config_mem_prot_init(paddr_t load_addr) for (size_t j = 0; j < config.vmlist[i].platform.ipc_num; j++) { /** - * In MPU-based systems, the address of the VM's IPC object and - * the used must follow a 1-1 mapping. + * In MPU-based systems, the address of the VM's IPC object and the used must follow a + * 1-1 mapping. */ size_t shmem_id = config.vmlist[i].platform.ipcs[j].shmem_id; vaddr_t ipc_base_addr = config.vmlist[i].platform.ipcs[j].base; @@ -35,8 +34,7 @@ void config_mem_prot_init(paddr_t load_addr) for (size_t i = 0; i < config.shmemlist_size; i++) { /** - * On MPU systems all shared memory regions must be physical - * regions with 1-to-1 mapping. + * On MPU systems all shared memory regions must be physical regions with 1-to-1 mapping. */ config.shmemlist[i].place_phys = true; } diff --git a/src/core/mpu/inc/mem_prot/mem.h b/src/core/mpu/inc/mem_prot/mem.h index a1b5ffb93..e8b7b83db 100644 --- a/src/core/mpu/inc/mem_prot/mem.h +++ b/src/core/mpu/inc/mem_prot/mem.h @@ -40,11 +40,11 @@ static inline bool mem_regions_overlap(struct mp_region* reg1, struct mp_region* } /** - * This functions must be defined for the physical MPU. The abstraction provided - * by the physical MPU layer is minimal. Besides initialization: - * i) It must provide the view of a separate physical MPU for each privilege; - * ii) It must allow the mapping and unmapping of regions on these MPUs, - * returning a binary return success value. + * This functions must be defined for the physical MPU. The abstraction provided by the physical + * MPU layer is minimal. Besides initialization: + * i) It must provide the view of a separate physical MPU for each privilege; + * ii) It must allow the mapping and unmapping of regions on these MPUs,returning a binary return + * success value. */ void mpu_init(); bool mpu_map(priv_t priv, struct mp_region* mem); diff --git a/src/core/mpu/mem.c b/src/core/mpu/mem.c index d9f541f9a..96981a612 100644 --- a/src/core/mpu/mem.c +++ b/src/core/mpu/mem.c @@ -226,10 +226,9 @@ static cpumap_t mem_section_shared_cpus(struct addr_space* as, as_sec_t section) cpus = BIT_MASK(0, PLAT_CPU_NUM); } else if (section == SEC_HYP_VM) { /** - * If we don't have a valid vcpu at this point, it means we are - * creating this region before even having a vm. Therefore, the - * sharing of the region must be guaranteed by other means (e.g. - * vmm_vm_install) + * If we don't have a valid vcpu at this point, it means we are creating this region + * before even having a vm. Therefore, the sharing of the region must be guaranteed by + * other means (e.g. vmm_vm_install) */ if (cpu()->vcpu != NULL) { cpus = cpu()->vcpu->vm->cpus; @@ -416,10 +415,9 @@ bool mem_unmap_range(struct addr_space* as, vaddr_t vaddr, size_t size, bool bro mpid_t mpid = mem_vmpu_find_overlapping_region(as, ®); if (mpid == INVALID_MPID) { /** - * FIXME: right now we are ignoring the fact that the range or - * parts of it might not be mapped. This is in line to what the MMU - * mem_unmap function does. We should change this to only go ahead - * with the unpamming if the full range is indeed mapped. + * FIXME: right now we are ignoring the fact that the range or parts of it might not + * be mapped. This is in line to what the MMU mem_unmap function does. We should change + * this to only go ahead with the unpamming if the full range is indeed mapped. */ break; } @@ -475,10 +473,9 @@ vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas, vaddr_t va_res = INVALID_VA; if ((ass != asd) && (vad == INVALID_VA || vad == vas)) { - // In mpu-based systems, we can only copy mappings between address - // spaces, as copying a mapping in a single address space would overlap - // the orignal mapping. Also because only identify mappings are - // supported, the source va must equal the destination va, or be an + // In mpu-based systems, we can only copy mappings between address spaces, as copying a + // mapping in a single address space would overlap the orignal mapping. Also because only + // identify mappings are supported, the source va must equal the destination va, or be an // invalid va. This still covers the most useful uses cases. spin_lock(&ass->lock); diff --git a/src/core/vm.c b/src/core/vm.c index 96a032469..4069229fc 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -115,10 +115,9 @@ static void vm_install_image(struct vm* vm, struct vm_mem_region* reg) } if (range_overlap_range(img_base, img_sz, img_load_pa, img_sz)) { - // We impose an image load region cannot overlap its runtime region. - // This both simplifies the copying procedure as well as avoids - // limitations of mpu-based memory management which does not allow - // overlapping mappings on the same address space. + // We impose an image load region cannot overlap its runtime region. This both + // simplifies the copying procedure as well as avoids limitations of mpu-based memory + // management which does not allow overlapping mappings on the same address space. ERROR("failed installing vm image. Image load region overlaps with" " image runtime region"); } @@ -257,15 +256,13 @@ struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* confi cpu_sync_barrier(&vm->sync); /** - * Perform architecture dependent initializations. This includes, - * for example, setting the page table pointer and other virtualization - * extensions specifics. + * Perform architecture dependent initializations. This includes, for example, setting the page + * table pointer and other virtualization extensions specifics. */ vm_arch_init(vm, config); /** - * Create the VM's address space according to configuration and where - * its image was loaded. + * Create the VM's address space according to configuration and where its image was loaded. */ if (master) { vm_init_mem_regions(vm, config); diff --git a/src/core/vmm.c b/src/core/vmm.c index 808dbbbd2..0866b32d9 100644 --- a/src/core/vmm.c +++ b/src/core/vmm.c @@ -78,12 +78,10 @@ static bool vmm_assign_vcpu(bool* master, vmid_t* vm_id) static bool vmm_alloc_vm(struct vm_allocation* vm_alloc, struct vm_config* config) { /** - * We know that we will allocate a block aligned to the PAGE_SIZE, which - * is guaranteed to fulfill the alignment of all types. - * However, to guarantee the alignment of all fields, when we calculate - * the size of a field in the vm_allocation struct, we must align the - * previous total size calculated until that point, to the alignment of - * the type of the next field. + * We know that we will allocate a block aligned to the PAGE_SIZE, which is guaranteed to + * fulfill the alignment of all types. However, to guarantee the alignment of all fields, when + * we calculate the size of a field in the vm_allocation struct, we must align the previous + * total size calculated until that point, to the alignment of the type of the next field. */ size_t total_size = sizeof(struct vm); diff --git a/src/lib/inc/bit.h b/src/lib/inc/bit.h index 7e28efc79..27ab1c4b3 100644 --- a/src/lib/inc/bit.h +++ b/src/lib/inc/bit.h @@ -9,10 +9,10 @@ #include /** - * The extra shift is because both arm and riscv logical shift instructions - * support a maximum of machine word length minus one bit shits. This covers - * the corner case of runtime full machine word length masks with the cost of - * an extra shift instruction. For static masks, there should be no extra costs. + * The extra shift is because both arm and riscv logical shift instructions support a maximum of + * machine word length minus one bit shits. This covers the corner case of runtime full machine + * word length masks with the cost of an extra shift instruction. For static masks, there should be + * no extra costs. */ #define BIT32_MASK(OFF, LEN) ((((UINT32_C(1) << ((LEN)-1)) << 1) - 1) << (OFF)) #define BIT64_MASK(OFF, LEN) ((((UINT64_C(1) << ((LEN)-1)) << 1) - 1) << (OFF)) diff --git a/src/lib/inc/util.h b/src/lib/inc/util.h index efa6bcb8d..61d203797 100644 --- a/src/lib/inc/util.h +++ b/src/lib/inc/util.h @@ -72,9 +72,9 @@ static inline bool range_in_range(unsigned long base1, unsigned long size1, unsi #define in_range(_addr, _base, _size) range_in_range(_addr, 0, _base, _size) /** - * Check if a given macro was defined. Note it only works wither if the macro - * is undefined or defined to the value 1. If the macro is defined with any - * other value it will fail recognizing its defined. + * Check if a given macro was defined. Note it only works wither if the macro is undefined or + * defined to the value 1. If the macro is defined with any other value it will fail recognizing + * its defined. */ #define DEFINED(MACRO) _DEFINED(MACRO) #define _DEFINED_1 0, diff --git a/src/lib/printk.c b/src/lib/printk.c index 1c2dc1f36..9e88a1bbe 100644 --- a/src/lib/printk.c +++ b/src/lib/printk.c @@ -82,15 +82,14 @@ static size_t vprintd(char** buf, unsigned int flags, va_list* args) } /** - * This is a limited printf implementation. The format string only supports - * integer, string and char arguments. That is, 'd', 'u' or 'x', 's' and 'c' - * specifiers, respectively. For integers, it only supports the none and 'l' - * lengths. It does not support any flags, width or precision fields. If - * present, this fields are ignored. + * This is a limited printf implementation. The format string only supports integer, string and + * char arguments. That is, 'd', 'u' or 'x', 's' and 'c' specifiers, respectively. For integers, it + * only supports the none and 'l' lengths. It does not support any flags, width or precision + * fields. If present, this fields are ignored. * - * Note this does not follow the C lib vsnprintf specification. It returns the - * numbers of characters written to the buffer, and changes fmt to point to the - * first character that was not printed. + * Note this does not follow the C lib vsnprintf specification. It returns the numbers of + * characters written to the buffer, and changes fmt to point to the first character that was not + * printed. */ size_t vsnprintk(char* buf, size_t buf_size, const char** fmt, va_list* args) { diff --git a/src/platform/drivers/8250_uart/8250_uart.c b/src/platform/drivers/8250_uart/8250_uart.c index f12344198..538eba499 100644 --- a/src/platform/drivers/8250_uart/8250_uart.c +++ b/src/platform/drivers/8250_uart/8250_uart.c @@ -10,10 +10,9 @@ void uart_init(volatile struct uart8250_hw* uart) /* set baudrate */ uart->lcr |= UART8250_LCR_DLAB; /** - * should set dll and dlh, - * to simplify instead lets assume the firmware did this for us. - * TODO: we should add uart clk and baudrate info to platform descrption - * and use this to calculate this values in runtime. + * should set dll and dlh, to simplify instead lets assume the firmware did this for us. + * TODO: we should add uart clk and baudrate info to platform descrption and use this to + * calculate this values in runtime. */ uart->lcr &= ~UART8250_LCR_DLAB; diff --git a/src/platform/rpi4/rpi4_desc.c b/src/platform/rpi4/rpi4_desc.c index 0248dcc2e..f6d0bceb3 100644 --- a/src/platform/rpi4/rpi4_desc.c +++ b/src/platform/rpi4/rpi4_desc.c @@ -12,8 +12,8 @@ struct platform platform = { { /* * - 0x8000 at the bottom reserved for atf - * - 0x4c00000 (76 MiB) at the top reserved for gpu (depends on - * gpu_mem in config.txt. this is the default) + * - 0x4c00000 (76 MiB) at the top reserved for gpu (depends on gpu_mem in config.txt. + * this is the default) */ .base = 0x80000, .size = 0x40000000 - 0x80000 - 0x4c00000, diff --git a/src/platform/zcu102/zcu102_desc.c b/src/platform/zcu102/zcu102_desc.c index 5f974ce06..cf599a6fe 100644 --- a/src/platform/zcu102/zcu102_desc.c +++ b/src/platform/zcu102/zcu102_desc.c @@ -11,18 +11,16 @@ struct platform platform = { .regions = (struct mem_region[]) { { /** - * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN - * is loaded in a non secure zone, more specifically at the end of - * the first memory bank. Being in a non-secure zone means that can - * be easily overwritten. + * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN is loaded in a non + * secure zone, more specifically at the end of the first memory bank. Being in a + * non-secure zone means that can be easily overwritten. * - * The memory size is therefore shrunk to take this into account and - * avoid memory corruption. + * The memory size is therefore shrunk to take this into account and avoid memory + * corruption. * - * Note that if the ATF is compiled with debug symbols or with a - * custom SPD service, then it gets loaded at the *beginning* of the - * first memory bank, in that case the base address should be - * changed to 0x80000, and the size shrunk accorindgly. + * Note that if the ATF is compiled with debug symbols or with a custom SPD service, + * then it gets loaded at the *beginning* of the first memory bank, in that case the + * base address should be changed to 0x80000, and the size shrunk accorindgly. */ .base = 0x00000000, .size = 0x80000000 - 0x16000, diff --git a/src/platform/zcu104/zcu104_desc.c b/src/platform/zcu104/zcu104_desc.c index 1cb38dc92..4baa5abaa 100644 --- a/src/platform/zcu104/zcu104_desc.c +++ b/src/platform/zcu104/zcu104_desc.c @@ -11,18 +11,16 @@ struct platform platform = { .regions = (struct mem_region[]) { { /** - * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN - * is loaded in a non secure zone, more specifically at the end of - * the first memory bank. Being in a non-secure zone means that can - * be easily overwritten. + * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN is loaded in a non + * secure zone, more specifically at the end of the first memory bank. Being in a + * non-secure zone means that can be easily overwritten. * - * The memory size is therefore shrunk to take this into account and - * avoid memory corruption. + * The memory size is therefore shrunk to take this into account and avoid memory + * corruption. * - * Note that if the ATF is compiled with debug symbols or with a - * custom SPD service, then it gets loaded at the *beginning* of the - * first memory bank, in that case the base address should be - * changed to 0x80000, and the size shrunk accorindgly. + * Note that if the ATF is compiled with debug symbols or with a custom SPD service, + * then it gets loaded at the *beginning* of the first memory bank, in that case the + * base address should be changed to 0x80000, and the size shrunk accorindgly. */ .base = 0x00080000, .size = 0x7FF00000 - 0x16000,