Skip to content

Commit

Permalink
style: format comment blocks to wrap on 100 col limit
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel Oliveira <[email protected]>
Signed-off-by: Jose Martins <[email protected]>
  • Loading branch information
danielRep committed Nov 13, 2023
1 parent 4d4c183 commit 51e8fac
Show file tree
Hide file tree
Showing 66 changed files with 456 additions and 602 deletions.
36 changes: 17 additions & 19 deletions src/arch/armv8/aarch32/boot.S
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,14 @@
.data
.balign 4
/**
* barrier is used to minimal synchronization in boot - other cores wait for
* bsp to set it.
* barrier is used to minimal synchronization in boot - other cores wait for bsp to set it.
*/
_barrier: .4byte 0

/**
* The following code MUST be at the base of the image, as this is bao's entry
* point. Therefore .boot section must also be the first in the linker script.
* DO NOT implement any code before the _reset_handler in this section.
* The following code MUST be at the base of the image, as this is bao's entry point. Therefore
* .boot section must also be the first in the linker script. DO NOT implement any code before the
* _reset_handler in this section.
*/
.section ".boot", "ax"
.globl _reset_handler
Expand Down Expand Up @@ -57,10 +56,10 @@ _reset_handler:
adr r1, _el2_entry

/**
* Linearize cpu id according to the number of clusters and processors
* per cluster. We are only considering two levels of affinity.
* TODO: this should be done some other way. We shouldn't depend on the platform
* description this early in the initialization.
* Linearize cpu id according to the number of clusters and processors per cluster. We are only
* considering two levels of affinity.
* TODO: this should be done some other way. We shouldn't depend on the platform description
* this early in the initialization.
*/
mov r3, r0, lsr #8
and r3, r3, #0xff
Expand Down Expand Up @@ -88,8 +87,8 @@ _reset_handler:
add r0, r0, r7

/*
* Install vector table physical address early, in case exception occurs
* during this initialization.
* Install vector table physical address early, in case exception occurs during this
* initialization.
*/
get_phys_addr r3, r4, _hyp_vector_table
mcr p15, 4, r3, c12, c0, 0 // write HVBAR
Expand All @@ -103,9 +102,9 @@ _reset_handler:
bne 1f
#else
/**
* If the cpu master is not fixed, for setting it, we assume only one cpu is
* initially activated which later will turn on all the others. Therefore, there
* is no concurrency when setting CPU_MASTER and no atomic operations are needed.
* If the cpu master is not fixed, for setting it, we assume only one cpu is initially activated
* which later will turn on all the others. Therefore, there is no concurrency when setting
* CPU_MASTER and no atomic operations are needed.
*/
.pushsection .data
_master_set:
Expand All @@ -124,8 +123,8 @@ _set_master_cpu:
1:

/**
* TODO: bring the system to a well known state. This includes disabling
* the MPU, all caches, BP and others, and invalidating them.
* TODO: bring the system to a well known state. This includes disabling the MPU, all caches,
* BP and others, and invalidating them.
*/

/* Clear stack pointer to avoid unaligned SP exceptions during boot */
Expand Down Expand Up @@ -182,7 +181,7 @@ _set_master_cpu:
/* This point should never be reached */
b .

/***** Helper functions for boot code. ******/
/***** Helper functions for boot code. ******/

.global boot_clear
.func boot_clear
Expand All @@ -199,8 +198,7 @@ boot_clear:
.endfunc

/*
* Code adapted from "Application Note Bare-metal Boot Code for ARMv8-A
* Processors - Version 1.0"
* Code adapted from "Application Note Bare-metal Boot Code for ARMv8-A Processors - Version 1.0"
*
* r0 - cache level to be invalidated (0 - dl1$, 1 - il1$)
*/
Expand Down
4 changes: 2 additions & 2 deletions src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@
}

/**
* We give aarch32 registers the same name as aarch64's to which they are
* architecturally mapped to, so that we can use the same name in common code.
* We give aarch32 registers the same name as aarch64's to which they are architecturally mapped
* to, so that we can use the same name in common code.
*/
SYSREG_GEN_ACCESSORS(esr_el2, 4, c5, c2, 0); // hsr
SYSREG_GEN_ACCESSORS_BANKED(elr_el2, elr_hyp);
Expand Down
40 changes: 18 additions & 22 deletions src/arch/armv8/aarch64/boot.S
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,15 @@
.data
.align 3
/**
* barrier is used to minimal synchronization in boot - other cores wait for
* bsp to set it.
* barrier is used to minimal synchronization in boot - other cores wait for bsp to set it.
*/
.global _boot_barrier
_boot_barrier: .8byte 0

/**
* The following code MUST be at the base of the image, as this is bao's entry
* point. Therefore .boot section must also be the first in the linker script.
* DO NOT implement any code before the _reset_handler in this section.
* The following code MUST be at the base of the image, as this is bao's entry point. Therefore
* .boot section must also be the first in the linker script. DO NOT implement any code before the
* _reset_handler in this section.
*/
.section ".boot", "ax"
.globl _reset_handler
Expand All @@ -31,9 +30,8 @@ _reset_handler:

/**
* TODO: before anything...
* perform sanity checks on ID registers to ensure support for
* VE and TZ, 4K granule and possibly other needed features.
* Also, check current exception level. Act accordingly.
* perform sanity checks on ID registers to ensure support for VE and TZ, 4K granule and
* possibly other needed features. Also, check current exception level. Act accordingly.
* However, we expect to be running at EL2 at this point.
*/

Expand All @@ -51,17 +49,17 @@ _reset_handler:
adrp x1, _image_start

/*
* Install vector table physical address early, in case exception occurs
* during this initialization.
* Install vector table physical address early, in case exception occurs during this
* initialization.
*/
adr x3, _hyp_vector_table
msr VBAR_EL2, x3

/**
* Linearize cpu id according to the number of clusters and processors per
* cluster. We are only considering two levels of affinity.
* TODO: this should be done some other way. We shouldn't depend on the platform
* description this early in the initialization.
* Linearize cpu id according to the number of clusters and processors per cluster. We are
* only considering two levels of affinity.
* TODO: this should be done some other way. We shouldn't depend on the platform description
* this early in the initialization.
*/

mov x3, x0, lsr #8
Expand Down Expand Up @@ -96,9 +94,9 @@ _reset_handler:
cbnz x9, 1f
#else
/**
* If the cpu master is not fixed, for setting it, we assume only one cpu is
* initially activated which later will turn on all the others. Therefore, there
* is no concurrency when setting CPU_MASTER and no atomic operations are needed.
* If the cpu master is not fixed, for setting it, we assume only one cpu is initially activated
* which later will turn on all the others. Therefore, there is no concurrency when setting
* CPU_MASTER and no atomic operations are needed.
*/
.pushsection .data
_master_set:
Expand All @@ -116,9 +114,8 @@ _set_master_cpu:
1:

/**
* TODO: bring the system to a well known state. This includes disabling
* the MMU (done), all caches (missing i$), BP and others...
* and invalidating them.
* TODO: bring the system to a well known state. This includes disabling the MMU (done),
* all caches (missing i$), BP and others... and invalidating them.
*/

/* boot_clear stack pointer to avoid unaligned SP exceptions during boot */
Expand Down Expand Up @@ -195,8 +192,7 @@ boot_clear:
.endfunc

/*
* Code taken from "Application Note Bare-metal Boot Code for ARMv8-A
* Processors - Version 1.0"
* Code taken from "Application Note Bare-metal Boot Code for ARMv8-A Processors - Version 1.0"
*
* x0 - cache level to be invalidated (0 - dl1$, 1 - il1$, 2 - l2$)
*/
Expand Down
3 changes: 1 addition & 2 deletions src/arch/armv8/aarch64/inc/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ static inline void spinlock_init(spinlock_t* lock)
}

/**
* This lock follows the ticket lock algorithm described in Arm's ARM DDI0487I.a
* Appendix K13.
* This lock follows the ticket lock algorithm described in Arm's ARM DDI0487I.a Appendix K13.
*/

static inline void spin_lock(spinlock_t* lock)
Expand Down
3 changes: 1 addition & 2 deletions src/arch/armv8/aarch64/inc/arch/subarch/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ struct arch_regs {
uint64_t x[31];
uint64_t elr_el2;
uint64_t spsr_el2;
} __attribute__((aligned(16))); // makes size always aligned to 16 to respect
// stack alignment
} __attribute__((aligned(16))); // makes size always aligned to 16 to respect stack alignment

#endif /* VM_SUBARCH_H */
9 changes: 4 additions & 5 deletions src/arch/armv8/aborts.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@ void aborts_data_lower(unsigned long iss, unsigned long far, unsigned long il, u
emul.reg_width = 4 + (4 * bit64_extract(iss, ESR_ISS_DA_SF_OFF, ESR_ISS_DA_SF_LEN));
emul.sign_ext = bit64_extract(iss, ESR_ISS_DA_SSE_OFF, ESR_ISS_DA_SSE_LEN);

// TODO: check if the access is aligned. If not, inject an exception in
// the vm
// TODO: check if the access is aligned. If not, inject an exception in the vm

if (handler(&emul)) {
unsigned long pc_step = 2 + (2 * il);
Expand Down Expand Up @@ -101,9 +100,9 @@ void smc_handler(unsigned long iss, unsigned long far, unsigned long il, unsigne
syscall_handler(iss, far, il, ec);

/**
* Since SMCs are trapped due to setting hcr_el2.tsc, the "preferred
* exception return address" is the address of the actual smc instruction.
* Thus, we need to adjust it to the next instruction.
* Since SMCs are trapped due to setting hcr_el2.tsc, the "preferred exception return address"
* is the address of the actual smc instruction. Thus, we need to adjust it to the next
* instruction.
*/
vcpu_writepc(cpu()->vcpu, vcpu_readpc(cpu()->vcpu) + 4);
}
Expand Down
4 changes: 2 additions & 2 deletions src/arch/armv8/armv8-a/aarch32/boot.S
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ boot_arch_profile_init:
mov r13, lr

/*
* Register r12 contains the size of the allocated physical memory between
* the loadable sections of the image and the non-loadable.
* Register r12 contains the size of the allocated physical memory between the loadable
* sections of the image and the non-loadable.
*/
ldr r10, =extra_allocated_phys_mem

Expand Down
3 changes: 1 addition & 2 deletions src/arch/armv8/armv8-a/aarch32/vmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
void vmm_arch_init_tcr()
{
if (cpu()->id == CPU_MASTER) {
/* Despite LPAE, we only support 32-bit guest physical address spaces.
*/
/* Despite LPAE, we only support 32-bit guest physical address spaces. */
parange = PAR_32BIT;
}

Expand Down
18 changes: 8 additions & 10 deletions src/arch/armv8/armv8-a/aarch64/boot.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ boot_arch_profile_init:
mov x20, x30

/*
* Register x18 contains the size of the allocated physical memory between
* the loadable sections of the image and the non-loadable.
* Register x18 contains the size of the allocated physical memory between the loadable
* sections of the image and the non-loadable.
*/
ldr x18, =extra_allocated_phys_mem

Expand Down Expand Up @@ -172,8 +172,8 @@ map_cpu:
setup_cpu:

/**
* The operation is purposely commented out.
* We are assuming monitor code already enabled smp coherency.
* The operation is purposely commented out. We are assuming monitor code already enabled smp
* coherency.
*/

/* setup translation configurations */
Expand All @@ -195,14 +195,12 @@ setup_cpu:
msr TTBR0_EL2, x3

/**
* TODO: set implementation defined registers such as ACTLR or AMAIR.
* Maybe define a macro for this in a implementation oriented directory
* inside arch.
* TODO: set implementation defined registers such as ACTLR or AMAIR. Maybe define a macro for
* this in a implementation oriented directory inside arch.
*/

/**
* TODO: invalidate caches, TLBs and branch prediction.
* Need for barriers?
* TODO: invalidate caches, TLBs and branch prediction. Need for barriers?
*/

ldr x5, =_enter_vas
Expand Down Expand Up @@ -248,7 +246,7 @@ warm_boot:

/* save x0 which contains pointer to saved state psci context */
mov x19, x0
/* invalidate l1$ */
/* invalidate l1$ */
mov x0, #0
bl boot_cache_invalidate

Expand Down
3 changes: 1 addition & 2 deletions src/arch/armv8/armv8-a/aarch64/relocate.S
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,7 @@ memcpy:
switch_space:

/**
* update flat maping page table entry to feature new physical address space
* entry page
* update flat maping page table entry to feature new physical address space entry page
*/
adr x3, _image_start
PTE_INDEX_ASM x4, x3, 1
Expand Down
12 changes: 5 additions & 7 deletions src/arch/armv8/armv8-a/aarch64/vmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,12 @@
void vmm_arch_init_tcr()
{
/**
* Check available physical address range which will limit
* IPA size. Patch 2-stage page table descriptors if this forces
* the initial lookup to level 1.
* Check available physical address range which will limit IPA size. Patch 2-stage page table
* descriptors if this forces the initial lookup to level 1.
*
* In multi-cluster heterogenous we only support the minimum parange
* for a vm's physicall adress space.
* TODO: we could make this more dynamic and adapt it to each virtual
* machine.
* In multi-cluster heterogenous we only support the minimum parange for a vm's physicall
* adress space.
* TODO: we could make this more dynamic and adapt it to each virtual machine.
*/

static size_t min_parange = 0b111;
Expand Down
11 changes: 5 additions & 6 deletions src/arch/armv8/armv8-a/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr)
{
if (cpuid == CPU_MASTER) {
/* power on necessary, but still sleeping, secondary cpu cores
* Assumes CPU zero is doing this */
/* power on necessary, but still sleeping, secondary cpu cores Assumes CPU zero is doing
* this */
for (size_t cpu_core_id = 0; cpu_core_id < platform.cpu_num; cpu_core_id++) {
if (cpu_core_id == cpuid) {
continue;
Expand All @@ -34,8 +34,7 @@ void cpu_arch_profile_idle()
switch (err) {
case PSCI_E_NOT_SUPPORTED:
/**
* If power down is not supported let's just wait for an
* interrupt
* If power down is not supported let's just wait for an interrupt
*/
asm volatile("wfi");
break;
Expand All @@ -45,7 +44,7 @@ void cpu_arch_profile_idle()
}

/**
* Power down was sucessful but did not jump to requested entry
* point. Just return to the architectural
* Power down was sucessful but did not jump to requested entry point. Just return to the
* architectural
*/
}
6 changes: 3 additions & 3 deletions src/arch/armv8/armv8-a/inc/arch/page_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
#define PTE_INDEX_SHIFT(LEVEL) ((9 * (3 - LEVEL)) + 12)
#define PTE_INDEX(LEVEL, ADDR) ((ADDR >> PTE_INDEX_SHIFT(LEVEL)) & (0x1FF))

// We turn clang-format off at this point since this is an assembly macro
// and thus is incorrectly formatted. Despite this being assembly we keep
// this macro here so that is next to its C macro counter-part defined above.
// We turn clang-format off at this point since this is an assembly macro and thus is incorrectly
// formatted. Despite this being assembly we keep this macro here so that is next to its C macro
// counter-part defined above.
// clang-format off
.macro PTE_INDEX_ASM index, addr, level
lsr \index, \addr, #PTE_INDEX_SHIFT(\level)
Expand Down
5 changes: 2 additions & 3 deletions src/arch/armv8/armv8-a/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@ void as_arch_init(struct addr_space* as)
size_t index;

/*
* If the address space is a copy of an existing hypervisor space it's not
* possible to use the PT_CPU_REC index to navigate it, so we have to use
* the PT_VM_REC_IND.
* If the address space is a copy of an existing hypervisor space it's not possible to use the
* PT_CPU_REC index to navigate it, so we have to use the PT_VM_REC_IND.
*/
if (as->type == AS_HYP_CPY || as->type == AS_VM) {
index = PT_VM_REC_IND;
Expand Down
Loading

0 comments on commit 51e8fac

Please sign in to comment.