From 4b02a7e320981e079a7f421dbeaa78f379ef4ed2 Mon Sep 17 00:00:00 2001 From: Meet Patel Date: Tue, 6 Jan 2026 10:01:09 +0530 Subject: [PATCH] fix(spinlock): Added fence instruction in spinlock acquire and release The existing spinlock mechanism possibly has an overlap of memory operations during multi core execution, as visible in CI testing. When running the example inter_cpu_critical_section, shared count increment stops at 299999 instead of reaching 300000, but this only happens randomly 1 out of 10 times. It is suspected that a memory operation happens simultaneously from both core, even though spinlock protection is in place. To handle this problem, a memory barrier (fence instruction) is added at critical places during spinlock acquire and release, to ensure that all memory operations upto that point are completed and synchronised before proceeding further. --- .../lp_core/shared/ulp_lp_core_critical_section_shared.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/components/ulp/lp_core/shared/ulp_lp_core_critical_section_shared.c b/components/ulp/lp_core/shared/ulp_lp_core_critical_section_shared.c index fa0816996a..54787ffe11 100644 --- a/components/ulp/lp_core/shared/ulp_lp_core_critical_section_shared.c +++ b/components/ulp/lp_core/shared/ulp_lp_core_critical_section_shared.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2024-2026 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -106,8 +106,12 @@ static void ulp_lp_core_spinlock_acquire(ulp_lp_core_spinlock_t *lock) for (lv = 0; lv < (int)LOCK_CANDIDATE_NUM_MAX - 1; lv++) { /* Each candidate has to go through all the levels in order to get the spinlock. Start by notifying other candidates, we have reached level `lv` */ lock->level[lock_candidate_id] = lv; + /* Fence instruction to ensure ordering of memory operations */ + __asm__ volatile("fence rw, rw" ::: "memory"); /* Notify other candidates we are the latest one who entered level `lv` */ lock->last_to_enter[lv] = lock_candidate_id; + /* Fence instruction to ensure ordering of memory operations */ + __asm__ volatile("fence rw, rw" ::: "memory"); /* If there is any candidate that reached the same or a higher level than this candidate, wait for it to finish. Advance to the next level if another candidate becomes the latest one to arrive at our current level */ for (candidate = 0; candidate < (int)LOCK_CANDIDATE_NUM_MAX; candidate++) { while ((candidate != lock_candidate_id) && (lock->level[candidate] >= lv && lock->last_to_enter[lv] == lock_candidate_id)) { @@ -128,6 +132,8 @@ static void ulp_lp_core_spinlock_release(ulp_lp_core_spinlock_t *lock) int lock_candidate_id = ulp_lp_core_spinlock_get_candidate_id(); lock->level[lock_candidate_id] = -1; + /* Fence instruction to ensure ordering of memory operations */ + __asm__ volatile("fence rw, rw" ::: "memory"); } #endif