diff --git a/tests/arch/arc/arc_vpx_lock/CMakeLists.txt b/tests/arch/arc/arc_vpx_lock/CMakeLists.txt new file mode 100644 index 000000000000000..8818f4f73f459e7 --- /dev/null +++ b/tests/arch/arc/arc_vpx_lock/CMakeLists.txt @@ -0,0 +1,16 @@ +# Copyright (c) 2022 Synopsys +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(vpx_lock) + +target_sources(app PRIVATE + src/main.c + ) + +if(COMPILER STREQUAL arcmwdt) +zephyr_include_directories(${ARCMWDT_TOOLCHAIN_PATH}/MetaWare/arc/lib/src/fx/include/) +get_property(Z_ARC_DSP_OPTIONS GLOBAL PROPERTY z_arc_dsp_options) +target_compile_options(app PRIVATE ${Z_ARC_DSP_OPTIONS}) +endif() diff --git a/tests/arch/arc/arc_vpx_lock/README.txt b/tests/arch/arc/arc_vpx_lock/README.txt new file mode 100644 index 000000000000000..429b73381c66e2a --- /dev/null +++ b/tests/arch/arc/arc_vpx_lock/README.txt @@ -0,0 +1,12 @@ +Title: ARC VPX Lock + +Description: + +This test verifies that the ARC VPX lock/unlock mechanism used to bookend +code that uses the ARC VPX vector registers works correctly. As this VPX +lock/unlock mechanism does not technically require those registers to be +used to control access to them (they bookend the relevant code sections), +the test does not actually access those VPX registers. + +However, it does check that the system behaves as expected when the ARC VPX +lock/unlock mechanism is used. diff --git a/tests/arch/arc/arc_vpx_lock/prj.conf b/tests/arch/arc/arc_vpx_lock/prj.conf new file mode 100644 index 000000000000000..8414394ec0dd085 --- /dev/null +++ b/tests/arch/arc/arc_vpx_lock/prj.conf @@ -0,0 +1,5 @@ +CONFIG_ZTEST=y +CONFIG_ARC_DSP=y +CONFIG_MAIN_STACK_SIZE=1024 +CONFIG_ARC_VPX_COOPERATIVE_SHARING=y +CONFIG_IRQ_OFFLOAD=y diff --git a/tests/arch/arc/arc_vpx_lock/src/main.c b/tests/arch/arc/arc_vpx_lock/src/main.c new file mode 100644 index 000000000000000..b6722c546f31d96 --- /dev/null +++ b/tests/arch/arc/arc_vpx_lock/src/main.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2024 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include + +#ifndef CONFIG_ARC_DSP +#error "Rebuild with the ARC_DSP config option enabled" +#endif + +#ifndef CONFIG_ARC_VPX_COOPERATIVE_SHARING +#error "Rebuild with the ARC_VPX_COOPERATIVE_SHARING config option enabled" +#endif + +#define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE) + +#define ISR_VPX_LOCK 0 +#define ISR_VPX_UNLOCK 1 + +struct test_data { + k_tid_t thread; + uint32_t cpu_mask; +}; + +static void timer_func(struct k_timer *timer); + +K_THREAD_STACK_DEFINE(helper_stack, STACK_SIZE); + +static K_TIMER_DEFINE(my_timer, timer_func, NULL); +static K_SEM_DEFINE(test_sem1, 0, 1); +static K_SEM_DEFINE(test_sem2, 0, 1); + +static struct test_data data; +static struct k_thread helper_thread; + +static volatile int isr_result; +static volatile unsigned int isr_vpx_lock_id; + +/** + * Obtain the current CPU id. + */ +static int current_cpu_id_get(void) +{ + int key; + int id; + + key = arch_irq_lock(); + id = _current_cpu->id; + arch_irq_unlock(key); + + return id; +} + +static void test_helper_entry(void *p1, void *p2, void *p3) +{ + uint32_t i; + + while (1) { + k_sem_take(&test_sem1, K_FOREVER); + +#if CONFIG_SCHED_CPU_MASK + /* + * Suspend the target thread to ensure that it does not run + * while we modify the set of CPUs upon which it may execute. + */ + + k_thread_suspend(data->thread); + + for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) { + if ((data->cpu_mask & BIT(i)) == 0) { + k_thread_cpu_mask_disable(data->thread, i); + } else { + k_thread_cpu_mask_enable(data->thread, i); + } + } + + k_thread_resume(data->thread); +#endif + + k_sem_give(&test_sem2); + } +} + +static unsigned int pin_thread_to_current_cpu(void) +{ + unsigned int cpu_id; + + data.thread = k_current_get(); + cpu_id = current_cpu_id_get(); + data.cpu_mask = BIT(cpu_id); + + k_sem_give(&test_sem1); + k_sem_take(&test_sem2, K_FOREVER); + + return cpu_id; +} + +static void vpx_isr(const void *arg) +{ + uint32_t isr_action = (uint32_t)(uintptr_t)(arg); + + switch (isr_action) { + case ISR_VPX_LOCK: + isr_result = arc_vpx_lock(&isr_vpx_lock_id, K_NO_WAIT); + break; + case ISR_VPX_UNLOCK: + arc_vpx_unlock(isr_vpx_lock_id); + break; + } +} + +static void timer_func(struct k_timer *timer) +{ + arc_vpx_unlock(isr_vpx_lock_id); +} + +ZTEST(vpx_lock, arc_vpx_lock_unlock_timed) +{ + int status; + unsigned int cpu_id; + unsigned int lock_id; + unsigned int new_lock_id; + + cpu_id = pin_thread_to_current_cpu(); + + status = arc_vpx_lock(&lock_id, K_NO_WAIT); + zassert_equal(0, status, + "Expected return value %d, not %d\n", 0, status); + zassert_equal(cpu_id, lock_id, + "Expected lock ID %u, not %u\n", cpu_id, lock_id); + + /* + * In 1 second, release the VPX lock. However, wait up to + * 5 seconds before considering this a failure. + */ + + isr_vpx_lock_id = lock_id; + k_timer_start(&my_timer, K_MSEC(1000), K_FOREVER); + + status = arc_vpx_lock(&new_lock_id, K_MSEC(5000)); + zassert_equal(0, status, + "Expected return value %d, not %d\n", 0, status); + zassert_equal(lock_id, new_lock_id, + "Expected lock ID %u, not %u\n", lock_id, new_lock_id); + + arc_vpx_unlock(new_lock_id); +} + +ZTEST(vpx_lock, arc_vpx_lock_unlock_isr) +{ + int status; + unsigned int cpu_id; + unsigned int lock_id; + + cpu_id = pin_thread_to_current_cpu(); + + status = arc_vpx_lock(&lock_id, K_NO_WAIT); + zassert_equal(0, status, + "Expected return value %d, not %d\n", 0, status); + zassert_equal(cpu_id, lock_id, + "Expected lock ID %u, not %u\n", cpu_id, lock_id); + + /* VPX already locked. */ + irq_offload(vpx_isr, (void *)(uintptr_t)ISR_VPX_LOCK); + zassert_equal(-EBUSY, isr_result, /* Expect failure */ + "Expected return value %d (-EBUSY), not %d\n", + -EBUSY, isr_result); + + /* Force the VPX unlock via ISR */ + isr_vpx_lock_id = lock_id; + irq_offload(vpx_isr, (void *)(uintptr_t)ISR_VPX_UNLOCK); + + /* Lock/unlock from an ISR */ + irq_offload(vpx_isr, (void *)(uintptr_t)ISR_VPX_LOCK); + zassert_equal(0, isr_result, + "Expected return value %d, not %d\n", 0, isr_result); + zassert_equal(cpu_id, isr_vpx_lock_id, + "Expected lock ID %u, not %u\n", cpu_id, isr_vpx_lock_id); + irq_offload(vpx_isr, (void *)(uintptr_t)ISR_VPX_UNLOCK); +} + +ZTEST(vpx_lock, arc_vpx_lock_unlock) +{ + int status; + unsigned int cpu_id; + unsigned int lock_id; + unsigned int tmp; + + cpu_id = pin_thread_to_current_cpu(); + + /* The VPX lock is available; take it. */ + + status = arc_vpx_lock(&lock_id, K_NO_WAIT); + zassert_equal(0, status, + "Expected return value %d, not %d\n", 0, status); + zassert_equal(cpu_id, lock_id, + "Expected lock ID %u, not %d\n", cpu_id, lock_id); + + /* The VPX lock has already been taken; expect errors */ + + status = arc_vpx_lock(&tmp, K_NO_WAIT); + zassert_equal(-EBUSY, status, + "Expected return value %d (-EBUSY), not %d\n", + -EBUSY, status); + + status = arc_vpx_lock(&tmp, K_MSEC(10)); + zassert_equal(-EAGAIN, status, + "Expected return value %d (-EAGAIN), not %d\n", + -EAGAIN, status); + + /* Verify that unlocking makes it available */ + + arc_vpx_unlock(lock_id); + + status = arc_vpx_lock(&tmp, K_NO_WAIT); + zassert_equal(0, status, + "Expected return value %d, not %d\n", 0, status); + zassert_equal(lock_id, tmp, + "Expected lock ID %d, not %d\n", lock_id, tmp); + arc_vpx_unlock(tmp); +} + +static void *arc_vpx_test_setup(void) +{ + int priority; + + priority = k_thread_priority_get(k_current_get()); + + k_thread_create(&helper_thread, helper_stack, STACK_SIZE, + test_helper_entry, NULL, NULL, NULL, + priority - 2, 0, K_NO_WAIT); + + return NULL; +} + +ZTEST_SUITE(vpx_lock, NULL, arc_vpx_test_setup, NULL, NULL, NULL); diff --git a/tests/arch/arc/arc_vpx_lock/testcase.yaml b/tests/arch/arc/arc_vpx_lock/testcase.yaml new file mode 100644 index 000000000000000..8726d6a6de0e784 --- /dev/null +++ b/tests/arch/arc/arc_vpx_lock/testcase.yaml @@ -0,0 +1,11 @@ +tests: + arch.arc.vpx_lock: + filter: CONFIG_ISA_ARCV2 and CONFIG_CPU_HAS_DSP + toolchain_allow: arcmwdt + platform_allow: nsim/nsim_vpx5 + arch.arc.vpx_lock.cpu_mask: + filter: CONFIG_ISA_ARCV2 and CONFIG_CPU_HAS_DSP and (CONFIG_MP_MAX_NUM_CPUS > 1) + toolchain_allow: arcmwdt + platform_allow: nsim/nsim_vpx5 + extra_configs: + - CONFIG_SCHED_CPU_MASK=y