fix(riscv): implement a workaround for Zcmp hardware bug

This commit is contained in:
Omar Chebib
2025-12-09 19:31:29 +08:00
parent add311ee4f
commit c27c33a832
8 changed files with 129 additions and 86 deletions
@@ -50,29 +50,25 @@ static DRAM_ATTR sleep_cpu_retention_t s_cpu_retention;
extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame; extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
FORCE_INLINE_ATTR uint32_t save_mstatus_and_disable_global_int(void) FORCE_INLINE_ATTR void save_csr_disable_global_int(uint32_t *mstatus_val, uint32_t *mintthresh_val)
{ {
return RV_READ_MSTATUS_AND_DISABLE_INTR(); #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
*mintthresh_val = rv_utils_set_intlevel_regval(0xff);
#else
(void) mintthresh_val;
#endif
*mstatus_val = RV_READ_MSTATUS_AND_DISABLE_INTR();
} }
FORCE_INLINE_ATTR void restore_mstatus(uint32_t mstatus_val) FORCE_INLINE_ATTR void restore_csr_enable_global_int(uint32_t mstatus_val, uint32_t mintthresh_val)
{ {
RV_WRITE_CSR(mstatus, mstatus_val); RV_WRITE_CSR(mstatus, mstatus_val);
}
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
FORCE_INLINE_ATTR uint32_t save_mintthresh_and_disable_global_int(void) rv_utils_restore_intlevel_regval(mintthresh_val);
{ #else
/* Due to the reason described in IDF-14279, when mie is set to 0, mintthresh needs to be set to 0xff. */ (void) mintthresh_val;
// TODO: IDF-14279 DIG-661
return RV_READ_MINTTHRESH_AND_DISABLE_INTR();
}
FORCE_INLINE_ATTR void restore_mintthresh(uint32_t mintthresh_val)
{
RV_RESTORE_MINTTHRESH(mintthresh_val);
}
#endif #endif
}
static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void) static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
{ {
@@ -304,11 +300,10 @@ static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool), esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
{ {
uint32_t mstatus = 0;
uint32_t mintthresh = 0;
esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0); esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0);
uint32_t mstatus = save_mstatus_and_disable_global_int(); save_csr_disable_global_int(&mstatus, &mintthresh);
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
uint32_t mintthresh = save_mintthresh_and_disable_global_int();
#endif
cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame); cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame);
cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame); cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
@@ -331,10 +326,7 @@ esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uin
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame); cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame); cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame);
} }
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND restore_csr_enable_global_int(mstatus, mintthresh);
restore_mintthresh(mintthresh);
#endif
restore_mstatus(mstatus);
return err; return err;
} }
@@ -50,14 +50,24 @@ static DRAM_ATTR sleep_cpu_retention_t s_cpu_retention;
extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame; extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
FORCE_INLINE_ATTR uint32_t save_mstatus_and_disable_global_int(void) FORCE_INLINE_ATTR void save_csr_disable_global_int(uint32_t *mstatus_val, uint32_t *mintthresh_val)
{ {
return RV_READ_MSTATUS_AND_DISABLE_INTR(); #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
*mintthresh_val = rv_utils_set_intlevel_regval(0xff);
#else
(void) mintthresh_val;
#endif
*mstatus_val = RV_READ_MSTATUS_AND_DISABLE_INTR();
} }
FORCE_INLINE_ATTR void restore_mstatus(uint32_t mstatus_val) FORCE_INLINE_ATTR void restore_csr_enable_global_int(uint32_t mstatus_val, uint32_t mintthresh_val)
{ {
RV_WRITE_CSR(mstatus, mstatus_val); RV_WRITE_CSR(mstatus, mstatus_val);
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
rv_utils_restore_intlevel_regval(mintthresh_val);
#else
(void) mintthresh_val;
#endif
} }
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
@@ -304,11 +314,10 @@ static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool), esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
{ {
uint32_t mstatus = 0;
uint32_t mintthresh = 0;
esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0); esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0);
uint32_t mstatus = save_mstatus_and_disable_global_int(); save_csr_disable_global_int(&mstatus, &mintthresh);
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
uint32_t mintthresh = save_mintthresh_and_disable_global_int();
#endif
cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame); cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame);
cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame); cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
@@ -331,10 +340,7 @@ esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uin
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame); cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame); cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame);
} }
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND restore_csr_enable_global_int(mstatus, mintthresh);
restore_mintthresh(mintthresh);
#endif
restore_mstatus(mstatus);
return err; return err;
} }
@@ -64,29 +64,25 @@ static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame[portNUM_PROCESSORS]; extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame[portNUM_PROCESSORS];
FORCE_INLINE_ATTR uint32_t save_mstatus_and_disable_global_int(void) FORCE_INLINE_ATTR void save_csr_disable_global_int(uint32_t *mstatus_val, uint32_t *mintthresh_val)
{ {
return RV_READ_MSTATUS_AND_DISABLE_INTR(); #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
*mintthresh_val = rv_utils_set_intlevel_regval(0xff);
#else
(void) mintthresh_val;
#endif
*mstatus_val = RV_READ_MSTATUS_AND_DISABLE_INTR();
} }
FORCE_INLINE_ATTR void restore_mstatus(uint32_t mstatus_val) FORCE_INLINE_ATTR void restore_csr_enable_global_int(uint32_t mstatus_val, uint32_t mintthresh_val)
{ {
RV_WRITE_CSR(mstatus, mstatus_val); RV_WRITE_CSR(mstatus, mstatus_val);
}
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
FORCE_INLINE_ATTR uint32_t save_mintthresh_and_disable_global_int(void) rv_utils_restore_intlevel_regval(mintthresh_val);
{ #else
/* Due to the reason described in IDF-14279, when mie is set to 0, mintthresh needs to be set to 0xff. */ (void) mintthresh_val;
// TODO: IDF-14279 DIG-661
return RV_READ_MINTTHRESH_AND_DISABLE_INTR();
}
FORCE_INLINE_ATTR void restore_mintthresh(uint32_t mintthresh_val)
{
RV_RESTORE_MINTTHRESH(mintthresh_val);
}
#endif #endif
}
static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void) static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
{ {
@@ -347,15 +343,14 @@ typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep, static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
{ {
uint32_t mstatus = 0;
uint32_t mintthresh = 0;
__attribute__((unused)) uint8_t core_id = esp_cpu_get_core_id(); __attribute__((unused)) uint8_t core_id = esp_cpu_get_core_id();
bool reject = false; bool reject = false;
RvCoreCriticalSleepFrame * frame = s_cpu_retention.retent.critical_frame[core_id]; RvCoreCriticalSleepFrame * frame = s_cpu_retention.retent.critical_frame[core_id];
/* mstatus is core privated CSR, do it near the core critical regs restore */ /* mstatus is core privated CSR, do it near the core critical regs restore */
uint32_t mstatus = save_mstatus_and_disable_global_int(); save_csr_disable_global_int(&mstatus, &mintthresh);
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
uint32_t mintthresh = save_mintthresh_and_disable_global_int();
#endif
s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id); s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id);
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_save(frame); rv_core_fpu_save(frame);
@@ -387,10 +382,7 @@ static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_restore(frame); rv_core_fpu_restore(frame);
} }
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND restore_csr_enable_global_int(mstatus, mintthresh);
restore_mintthresh(mintthresh);
#endif
restore_mstatus(mstatus);
return reject ? reject : pmu_sleep_finish(dslp); return reject ? reject : pmu_sleep_finish(dslp);
} }
@@ -512,11 +504,10 @@ static IRAM_ATTR void smp_core_do_retention(void)
ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-infinite-loop") ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-infinite-loop")
if (!smp_skip_retention) { if (!smp_skip_retention) {
uint32_t mstatus = 0;
uint32_t mintthresh = 0;
atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START); atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START);
uint32_t mstatus = save_mstatus_and_disable_global_int(); save_csr_disable_global_int(&mstatus, &mintthresh);
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
uint32_t mintthresh = save_mintthresh_and_disable_global_int();
#endif
RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id]; RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id];
s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id); s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id);
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
@@ -550,10 +541,7 @@ static IRAM_ATTR void smp_core_do_retention(void)
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_restore(frame_critical); rv_core_fpu_restore(frame_critical);
} }
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND restore_csr_enable_global_int(mstatus, mintthresh);
restore_mintthresh(mintthresh);
#endif
restore_mstatus(mstatus);
atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE); atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE);
} }
} }
@@ -52,14 +52,24 @@ static SPM_DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retenti
extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame[portNUM_PROCESSORS]; extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame[portNUM_PROCESSORS];
FORCE_INLINE_ATTR uint32_t save_mstatus_and_disable_global_int(void) FORCE_INLINE_ATTR void save_csr_disable_global_int(uint32_t *mstatus_val, uint32_t *mintthresh_val)
{ {
return RV_READ_MSTATUS_AND_DISABLE_INTR(); #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
*mintthresh_val = rv_utils_set_intlevel_regval(0xff);
#else
(void) mintthresh_val;
#endif
*mstatus_val = RV_READ_MSTATUS_AND_DISABLE_INTR();
} }
FORCE_INLINE_ATTR void restore_mstatus(uint32_t mstatus_val) FORCE_INLINE_ATTR void restore_csr_enable_global_int(uint32_t mstatus_val, uint32_t mintthresh_val)
{ {
RV_WRITE_CSR(mstatus, mstatus_val); RV_WRITE_CSR(mstatus, mstatus_val);
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
rv_utils_restore_intlevel_regval(mintthresh_val);
#else
(void) mintthresh_val;
#endif
} }
static SPM_IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void) static SPM_IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
@@ -263,7 +273,9 @@ static SPM_IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
bool reject = false; bool reject = false;
RvCoreCriticalSleepFrame *frame = s_cpu_retention.retent.critical_frame[core_id]; RvCoreCriticalSleepFrame *frame = s_cpu_retention.retent.critical_frame[core_id];
/* mstatus is core privated CSR, do it near the core critical regs restore */ /* mstatus is core privated CSR, do it near the core critical regs restore */
uint32_t mstatus = save_mstatus_and_disable_global_int(); uint32_t mstatus = 0;
uint32_t mintthresh = 0;
save_csr_disable_global_int(&mstatus, &mintthresh);
s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id); s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id);
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_save(frame); rv_core_fpu_save(frame);
@@ -284,7 +296,7 @@ static SPM_IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
} }
#endif #endif
reject = (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp); reject = (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
} }
#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
else { else {
@@ -294,7 +306,7 @@ static SPM_IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_restore(frame); rv_core_fpu_restore(frame);
} }
restore_mstatus(mstatus); restore_csr_enable_global_int(mstatus, mintthresh);
return reject ? reject : pmu_sleep_finish(dslp); return reject ? reject : pmu_sleep_finish(dslp);
} }
@@ -410,11 +422,13 @@ static SPM_IRAM_ATTR void smp_core_do_retention(void)
ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-infinite-loop") ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-infinite-loop")
if (!smp_skip_retention) { if (!smp_skip_retention) {
uint32_t mstatus = 0;
uint32_t mintthresh = 0;
atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START); atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START);
rv_core_noncritical_regs_save(); rv_core_noncritical_regs_save();
cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]); cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]);
RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id]; RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id];
uint32_t mstatus = save_mstatus_and_disable_global_int(); save_csr_disable_global_int(&mstatus, &mintthresh);
s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id); s_fpu_saved[core_id] = xPortFPUContextIsDirty(core_id);
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_save(frame_critical); rv_core_fpu_save(frame_critical);
@@ -440,7 +454,7 @@ static SPM_IRAM_ATTR void smp_core_do_retention(void)
if (s_fpu_saved[core_id]) { if (s_fpu_saved[core_id]) {
rv_core_fpu_restore(frame_critical); rv_core_fpu_restore(frame_critical);
} }
restore_mstatus(mstatus); restore_csr_enable_global_int(mstatus, mintthresh);
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]); cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]);
rv_core_noncritical_regs_restore(); rv_core_noncritical_regs_restore();
atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE); atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE);
-9
View File
@@ -239,15 +239,6 @@ extern "C" {
#define RV_READ_MSTATUS_AND_DISABLE_INTR() ({ unsigned long __tmp; \ #define RV_READ_MSTATUS_AND_DISABLE_INTR() ({ unsigned long __tmp; \
asm volatile ("csrrci %0, mstatus, 0x8" : "=r"(__tmp)); __tmp; }) asm volatile ("csrrci %0, mstatus, 0x8" : "=r"(__tmp)); __tmp; })
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
#define RV_READ_MINTTHRESH_AND_DISABLE_INTR() ({ unsigned long __tmp; \
asm volatile ( \
"li t0, 0xff\n\t" \
"csrrw %0, %1, t0" : "=r"(__tmp) : "i"(MINTTHRESH_CSR) : "t0", "memory"); __tmp; })
#define RV_RESTORE_MINTTHRESH(val) \
asm volatile ("csrw %0, %1" :: "i"(MINTTHRESH_CSR), "r"(val) : "memory")
#endif
#define _CSR_STRINGIFY(REG) #REG /* needed so the 'reg' argument can be a macro or a register name */ #define _CSR_STRINGIFY(REG) #REG /* needed so the 'reg' argument can be a macro or a register name */
+43
View File
@@ -17,6 +17,10 @@
#include "riscv/csr_dsp.h" #include "riscv/csr_dsp.h"
#include "sdkconfig.h" #include "sdkconfig.h"
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
#include "esp_private/interrupt_clic.h"
#endif
#if CONFIG_SECURE_ENABLE_TEE && !NON_OS_BUILD #if CONFIG_SECURE_ENABLE_TEE && !NON_OS_BUILD
#include "secure_service_num.h" #include "secure_service_num.h"
#endif #endif
@@ -167,15 +171,50 @@ FORCE_INLINE_ATTR void rv_utils_set_xtvec(uint32_t xtvec_val)
// ------------------ Interrupt Control -------------------- // ------------------ Interrupt Control --------------------
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
extern uint32_t g_xintthresh[SOC_CPU_CORES_NUM];
FORCE_INLINE_ATTR void rv_utils_xintthres_raise(void)
{
/**
* Make sure NOT to use `g_xintthresh[rv_utils_get_core_id()] = rv_utils_set_intlevel_regval(0xff);`
* since that statement would let the compiler first calculate the offset in `g_xintthresh` array
* before setting the interrupt threshold, which may lead to a race condition if an interrupt occurs in between.
*/
uint32_t threshold = rv_utils_set_intlevel_regval(0xff);
g_xintthresh[rv_utils_get_core_id()] = threshold;
}
FORCE_INLINE_ATTR void rv_utils_xintthres_lower(void)
{
rv_utils_restore_intlevel_regval(g_xintthresh[rv_utils_get_core_id()]);
}
#else
FORCE_INLINE_ATTR void rv_utils_xintthres_raise(void)
{
}
FORCE_INLINE_ATTR void rv_utils_xintthres_lower(void)
{
}
#endif // __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
FORCE_INLINE_ATTR void rv_utils_intr_enable(uint32_t intr_mask) FORCE_INLINE_ATTR void rv_utils_intr_enable(uint32_t intr_mask)
{ {
#if CONFIG_SECURE_ENABLE_TEE && !NON_OS_BUILD #if CONFIG_SECURE_ENABLE_TEE && !NON_OS_BUILD
esp_tee_intr_sec_srv_cb(2, SS_RV_UTILS_INTR_ENABLE, intr_mask); esp_tee_intr_sec_srv_cb(2, SS_RV_UTILS_INTR_ENABLE, intr_mask);
#else #else
// Disable all interrupts to make updating of the interrupt mask atomic. // Disable all interrupts to make updating of the interrupt mask atomic.
rv_utils_xintthres_raise();
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE); unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_int_enable(intr_mask); esprv_int_enable(intr_mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE); RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
rv_utils_xintthres_lower();
#endif #endif
} }
@@ -185,9 +224,11 @@ FORCE_INLINE_ATTR void rv_utils_intr_disable(uint32_t intr_mask)
esp_tee_intr_sec_srv_cb(2, SS_RV_UTILS_INTR_DISABLE, intr_mask); esp_tee_intr_sec_srv_cb(2, SS_RV_UTILS_INTR_DISABLE, intr_mask);
#else #else
// Disable all interrupts to make updating of the interrupt mask atomic. // Disable all interrupts to make updating of the interrupt mask atomic.
rv_utils_xintthres_raise();
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE); unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_int_disable(intr_mask); esprv_int_disable(intr_mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE); RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
rv_utils_xintthres_lower();
#endif #endif
} }
@@ -198,10 +239,12 @@ FORCE_INLINE_ATTR void rv_utils_intr_global_enable(void)
#else #else
RV_SET_CSR(mstatus, MSTATUS_MIE); RV_SET_CSR(mstatus, MSTATUS_MIE);
#endif #endif
rv_utils_xintthres_lower();
} }
FORCE_INLINE_ATTR void rv_utils_intr_global_disable(void) FORCE_INLINE_ATTR void rv_utils_intr_global_disable(void)
{ {
rv_utils_xintthres_raise();
#if CONFIG_SECURE_ENABLE_TEE #if CONFIG_SECURE_ENABLE_TEE
if (IS_PRV_M_MODE()) { if (IS_PRV_M_MODE()) {
RV_CLEAR_CSR(mstatus, MSTATUS_MIE); RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
+9
View File
@@ -7,7 +7,16 @@
#include "riscv/rv_utils.h" #include "riscv/rv_utils.h"
#include "hal/interrupt_clic_ll.h" #include "hal/interrupt_clic_ll.h"
#include "esp_private/interrupt_clic.h" #include "esp_private/interrupt_clic.h"
#include "esp_attr.h"
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
/* Due to a hardware bug, the interrupt threshold must be saved before disabling the interrupts.
* Make sure the array is always accessible. */
#if CONFIG_IDF_TARGET_ESP32P4
SPM_DRAM_ATTR
#endif
uint32_t g_xintthresh[SOC_CPU_CORES_NUM];
#endif
void intr_matrix_route(int intr_src, int intr_num) void intr_matrix_route(int intr_src, int intr_num)
{ {
+2 -2
View File
@@ -184,8 +184,8 @@
.macro mintthresh_csr_disable reg .macro mintthresh_csr_disable reg
#if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND #if __riscv_zcmp && SOC_CPU_ZCMP_WORKAROUND
/* Workaround for triggering an interrupt even when mstatus.mie is 0, when cm.push is called. */ /* Workaround for triggering an interrupt even when mstatus.mie is 0, when cm.push is called. */
li t0, 0xff li \reg, 0xff
csrrw \reg, MINTTHRESH_CSR, t0 csrrw \reg, MINTTHRESH_CSR, \reg
#endif #endif
.endm .endm