feat(cache): support cache preload hal api

This commit is contained in:
armando
2026-03-10 15:03:05 +08:00
parent 638a47b704
commit a699805d39
22 changed files with 1090 additions and 44 deletions
+3 -9
View File
@@ -952,15 +952,9 @@ static IRAM_ATTR bool lcd_rgb_panel_fill_bounce_buffer(esp_rgb_panel_t *panel, u
// Preload the next bit of buffer to the cache memory, this can improve the performance
if (panel->num_fbs > 0 && panel->flags.fb_behind_cache) {
#if CONFIG_IDF_TARGET_ESP32S3
Cache_Start_DCache_Preload((uint32_t)&panel->fbs[panel->bb_fb_index][panel->bounce_pos_px * bytes_per_pixel],
panel->bb_size, 0);
#elif CONFIG_IDF_TARGET_ESP32P4
Cache_Start_L2_Cache_Preload((uint32_t)&panel->fbs[panel->bb_fb_index][panel->bounce_pos_px * bytes_per_pixel],
panel->bb_size, 0);
#else
#error "Unsupported target"
#endif
cache_hal_preload(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA,
(uint32_t)&panel->fbs[panel->bb_fb_index][panel->bounce_pos_px * bytes_per_pixel],
panel->bb_size, false);
}
return need_yield;
}
+13 -2
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -15,7 +15,6 @@
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "soc/soc_caps.h"
#include "rom/cache.h"
/*------------------------------------------------------------------------------
* Unified Cache Control
@@ -103,6 +102,8 @@ void cache_hal_init(const cache_hal_config_t *config)
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
cache_hal_init_l2_cache(config);
#endif
//CACHE_LL_PRELOAD_ARBITRARY will have better performance for preload
cache_ll_preload_set_strategy(CACHE_LL_LEVEL_ALL, CACHE_TYPE_ALL, CACHE_LL_ID_ALL, CACHE_LL_PRELOAD_ARBITRARY);
}
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
@@ -324,3 +325,13 @@ uint32_t cache_hal_get_cache_line_size(uint32_t cache_level, cache_type_t type)
return line_size;
}
void cache_hal_preload(uint32_t cache_level, cache_type_t type, uint32_t vaddr, uint32_t size, bool ascending)
{
cache_ll_preload(cache_level, type, CACHE_LL_ID_ALL, vaddr, size, ascending);
}
void cache_hal_preload_wait_done(uint32_t cache_level, cache_type_t type)
{
cache_ll_preload_wait_done(cache_level, type, CACHE_LL_ID_ALL);
}
+6 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -76,3 +76,8 @@ bool cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size)
//esp32 doesn't support invalidate certain addr
abort();
}
void cache_hal_preload(uint32_t cache_level, cache_type_t type, uint32_t vaddr, uint32_t size, bool ascending)
{
//not supported, for compatibility
}
+47 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -44,6 +44,15 @@ extern "C" {
//On ESP32C2, the auto preload flag is always 0
#define CACHE_LL_L1_ICACHE_AUTOLOAD 0
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
*
@@ -188,6 +197,43 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_ICache_Disable();
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (no-op; ROM has no manual preload API)
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)vaddr;
(void)size;
(void)ascending;
}
/**
* @brief Wait until cache preload is done (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_level;
(void)type;
(void)cache_id;
}
/**
* @brief Get Cache line size, in bytes
*
+55 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -43,6 +43,15 @@ extern "C" {
#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<2)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
*
@@ -191,6 +200,51 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_ICache_Disable();
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (L1 ICache only)
*
* Starts preload for the given region and does not wait. Use
* cache_ll_preload_wait_done() to wait for completion.
* DATA type is no-op.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
Cache_Start_ICache_Preload(vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
while (Cache_ICache_Preload_Done() == 0) {
}
}
/**
* @brief Get Cache line size, in bytes
*
+49 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -34,6 +34,14 @@ extern "C" {
#define CACHE_LL_L1_ACCESS_EVENT_MASK (0x1f)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
@@ -185,6 +193,46 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_Disable();
}
/**
* @brief Set the preload strategy (L1 unified)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_id;
(void)type;
if (cache_level == CACHE_LL_LEVEL_EXT_MEM || cache_level == CACHE_LL_LEVEL_ALL) {
CACHE.l1_icache_ctrl.l1_icache_undef_op = strategy;
}
}
/**
* @brief Preload cache (L1 unified)
*
* Starts preload and does not wait. Use cache_ll_preload_wait_done() to wait for completion.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
(void)type;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
Cache_Start_Preload(vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
(void)type;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
while (Cache_Preload_Done() == 0) {
}
}
/**
* @brief Get Cache line size, in bytes
*
+55 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -33,6 +33,15 @@ extern "C" {
#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
*
@@ -166,6 +175,51 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_ICache_Disable();
}
/**
* @brief Set the preload strategy (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_id;
(void)type;
if (cache_level == CACHE_LL_LEVEL_EXT_MEM || cache_level == CACHE_LL_LEVEL_ALL) {
REG_SET_FIELD(EXTMEM_L1_CACHE_CTRL_REG, EXTMEM_L1_CACHE_UNDEF_OP, strategy);
}
}
/**
* @brief Preload cache (L1 ICache only)
*
* Starts preload and does not wait. Use cache_ll_preload_wait_done() to wait for completion.
* DATA type is no-op.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
Cache_Start_ICache_Preload(vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
while (Cache_ICache_Preload_Done() == 0) {
}
}
/**
* @brief Get Cache line size, in bytes
*
+49 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -33,6 +33,14 @@ extern "C" {
#define CACHE_LL_L1_ACCESS_EVENT_MASK (0x1f)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
@@ -184,6 +192,46 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_Disable();
}
/**
* @brief Set the preload strategy (L1 unified)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_id;
(void)type;
if (cache_level == CACHE_LL_LEVEL_EXT_MEM || cache_level == CACHE_LL_LEVEL_ALL) {
CACHE.l1_cache_ctrl.l1_icache_undef_op = strategy;
}
}
/**
* @brief Preload cache (L1 unified)
*
* Starts preload and does not wait. Use cache_ll_preload_wait_done() to wait for completion.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
(void)type;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
Cache_Start_Preload(vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
(void)type;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
while (Cache_Preload_Done() == 0) {
}
}
/**
* @brief Get Cache line size, in bytes
*
+54 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -33,6 +33,15 @@ extern "C" {
#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
*
@@ -166,6 +175,50 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_ICache_Disable();
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (L1 ICache only)
*
* Starts preload and does not wait. Use cache_ll_preload_wait_done() to wait for completion.
* DATA type is no-op.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
Cache_Start_ICache_Preload(vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
while (Cache_ICache_Preload_Done() == 0) {
}
}
/**
* @brief Get Cache line size, in bytes
*
+54 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -34,6 +34,15 @@ extern "C" {
#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if Cache auto preload is enabled or not.
*
@@ -167,6 +176,50 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
Cache_Freeze_ICache_Disable();
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (L1 ICache only)
*
* Starts preload and does not wait. Use cache_ll_preload_wait_done() to wait for completion.
* DATA type is no-op.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
Cache_Start_ICache_Preload(vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
if (type == CACHE_TYPE_DATA) {
return;
}
while (Cache_ICache_Preload_Done() == 0) {
}
}
/**
* @brief Get Cache line size, in bytes
*
+76 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -34,6 +34,15 @@ extern "C" {
#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels
#define CACHE_LL_CACHE_AUTOLOAD (1<<0)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if L1 ICache autoload is enabled or not
*
@@ -681,6 +690,72 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
}
}
/*------------------------------------------------------------------------------
* Cache Preload
*----------------------------------------------------------------------------*/
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (L1 only)
*
* Starts preload for the given map and does not wait. Use cache_ll_preload_wait_done() to wait for completion.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
uint32_t map;
switch (type) {
case CACHE_TYPE_INSTRUCTION:
map = CACHE_MAP_ICACHE0 | CACHE_MAP_ICACHE1;
break;
case CACHE_TYPE_DATA:
map = CACHE_MAP_DCACHE;
break;
case CACHE_TYPE_ALL:
default:
map = CACHE_MAP_ALL;
break;
}
Cache_Start_Preload(map, vaddr, size, ascending ? 0 : 1);
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
uint32_t map;
switch (type) {
case CACHE_TYPE_INSTRUCTION:
map = CACHE_MAP_ICACHE0 | CACHE_MAP_ICACHE1;
break;
case CACHE_TYPE_DATA:
map = CACHE_MAP_DCACHE;
break;
case CACHE_TYPE_ALL:
default:
map = CACHE_MAP_ALL;
break;
}
while (Cache_Preload_Done(map) == 0) {
}
}
/*------------------------------------------------------------------------------
* Cache Line Size
*----------------------------------------------------------------------------*/
+226 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -13,6 +13,7 @@
#include "soc/cache_struct.h"
#include "soc/ext_mem_defs.h"
#include "hal/cache_types.h"
#include "hal/config.h"
#include "hal/assert.h"
#include "esp32p4/rom/cache.h"
@@ -53,6 +54,15 @@ extern "C" {
#define CACHE_LL_L1_CORE0_EVENT_MASK (1<<0)
#define CACHE_LL_L1_CORE1_EVENT_MASK (1<<1)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/*------------------------------------------------------------------------------
* Autoload
*----------------------------------------------------------------------------*/
@@ -898,6 +908,221 @@ static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t ty
}
}
/*------------------------------------------------------------------------------
* Preload (L1 / L2)
*----------------------------------------------------------------------------*/
/**
* @brief Start L1 ICache manual preload
*
* Starts preload for the given region and does not wait. Use
* cache_ll_l1_icache_preload_wait_done() to wait for completion.
*
* @param cache_id id of the cache in this type and level (0: Core0, 1: Core1, CACHE_LL_ID_ALL: both)
* @param vaddr start virtual address of the preload region
* @param size size of the preload region in bytes
* @param ascending true: ascending (positive) order; false: descending (negative) order
*/
__attribute__((always_inline))
static inline void cache_ll_l1_icache_preload(uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
uint32_t order = ascending ? 0 : 1;
if (cache_id == 0) {
Cache_Start_L1_CORE0_ICache_Preload(vaddr, size, order);
} else if (cache_id == 1) {
Cache_Start_L1_CORE1_ICache_Preload(vaddr, size, order);
} else if (cache_id == CACHE_LL_ID_ALL) {
Cache_Start_L1_CORE0_ICache_Preload(vaddr, size, order);
Cache_Start_L1_CORE1_ICache_Preload(vaddr, size, order);
}
}
/**
* @brief Wait until L1 ICache manual preload is done
*
* @param cache_id id of the cache in this type and level (0: Core0, 1: Core1, CACHE_LL_ID_ALL: both)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_icache_preload_wait_done(uint32_t cache_id)
{
if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) {
while (Cache_L1_CORE0_ICache_Preload_Done() == 0) {
}
}
if (cache_id == 1 || cache_id == CACHE_LL_ID_ALL) {
while (Cache_L1_CORE1_ICache_Preload_Done() == 0) {
}
}
}
/**
* @brief Start L1 DCache manual preload
*
* Starts preload for the given region and does not wait. Use
* cache_ll_l1_dcache_preload_wait_done() to wait for completion.
*
* @param cache_id id of the cache in this type and level (0 or CACHE_LL_ID_ALL)
* @param vaddr start virtual address of the preload region
* @param size size of the preload region in bytes
* @param ascending true: ascending (positive) order; false: descending (negative) order
*/
__attribute__((always_inline))
static inline void cache_ll_l1_dcache_preload(uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) {
Cache_Start_L1_DCache_Preload(vaddr, size, ascending ? 0 : 1);
}
}
/**
* @brief Wait until L1 DCache manual preload is done
*
* @param cache_id id of the cache in this type and level (0 or CACHE_LL_ID_ALL)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_dcache_preload_wait_done(uint32_t cache_id)
{
if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) {
while (Cache_L1_DCache_Preload_Done() == 0) {
}
}
}
/**
* @brief Start L2 Cache manual preload
*
* Starts preload for the given region and does not wait. Use
* cache_ll_l2_preload_wait_done() to wait for completion.
*
* @param cache_id id of the cache in this type and level (0 or CACHE_LL_ID_ALL)
* @param vaddr start virtual address of the preload region
* @param size size of the preload region in bytes
* @param ascending true: ascending (positive) order; false: descending (negative) order
*/
__attribute__((always_inline))
static inline void cache_ll_l2_preload(uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) {
Cache_Start_L2_Cache_Preload(vaddr, size, ascending ? 0 : 1);
}
}
/**
* @brief Wait until L2 Cache manual preload is done
*
* @param cache_id id of the cache in this type and level (0 or CACHE_LL_ID_ALL)
*/
__attribute__((always_inline))
static inline void cache_ll_l2_preload_wait_done(uint32_t cache_id)
{
if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) {
while (Cache_L2_Cache_Preload_Done() == 0) {
}
}
}
/*------------------------------------------------------------------------------
* Cache Preload
*----------------------------------------------------------------------------*/
/**
* @brief Set the preload strategy
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
* @param strategy the preload strategy
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
#if HAL_CONFIG(CHIP_SUPPORT_MIN_REV) >= 300
if (cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) {
CACHE.l2_cache_ctrl.l2_cache_undef_op = strategy;
}
if (cache_level == 1 || cache_level == CACHE_LL_LEVEL_ALL) {
switch (type) {
case CACHE_TYPE_INSTRUCTION:
CACHE.l1_icache_ctrl.l1_icache_undef_op = strategy;
break;
case CACHE_TYPE_DATA:
CACHE.l1_dcache_ctrl.l1_dcache_undef_op = strategy;
break;
case CACHE_TYPE_ALL:
default:
CACHE.l1_icache_ctrl.l1_icache_undef_op = strategy;
CACHE.l1_dcache_ctrl.l1_dcache_undef_op = strategy;
break;
}
}
#endif
}
/**
* @brief Preload cache
*
* Starts preload for the given level/type and does not wait. Use
* cache_ll_preload_wait_done() to wait for completion.
*
* @param cache_level level of the cache (1: L1, 2: L2)
* @param type see `cache_type_t` (INSTRUCTION, DATA, or ALL)
* @param cache_id id of the cache in this type and level (0, 1, or CACHE_LL_ID_ALL)
* @param vaddr start virtual address of the preload region
* @param size size of the preload region in bytes
* @param ascending true: ascending (positive) order; false: descending (negative) order
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
if (cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) {
cache_ll_l2_preload(cache_id, vaddr, size, ascending);
}
if (cache_level == 1 || cache_level == CACHE_LL_LEVEL_ALL) {
switch (type) {
case CACHE_TYPE_INSTRUCTION:
cache_ll_l1_icache_preload(cache_id, vaddr, size, ascending);
break;
case CACHE_TYPE_DATA:
cache_ll_l1_dcache_preload(cache_id, vaddr, size, ascending);
break;
case CACHE_TYPE_ALL:
default:
cache_ll_l1_icache_preload(cache_id, vaddr, size, ascending);
cache_ll_l1_dcache_preload(cache_id, vaddr, size, ascending);
break;
}
}
}
/**
* @brief Wait until cache preload is done
*
* @param cache_level level of the cache (1: L1, 2: L2)
* @param type see `cache_type_t` (INSTRUCTION, DATA, or ALL)
* @param cache_id id of the cache in this type and level (0, 1, or CACHE_LL_ID_ALL)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
if (cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) {
cache_ll_l2_preload_wait_done(cache_id);
}
if (cache_level == 1 || cache_level == CACHE_LL_LEVEL_ALL) {
switch (type) {
case CACHE_TYPE_INSTRUCTION:
cache_ll_l1_icache_preload_wait_done(cache_id);
break;
case CACHE_TYPE_DATA:
cache_ll_l1_dcache_preload_wait_done(cache_id);
break;
case CACHE_TYPE_ALL:
default:
cache_ll_l1_icache_preload_wait_done(cache_id);
cache_ll_l1_dcache_preload_wait_done(cache_id);
break;
}
}
}
/*------------------------------------------------------------------------------
* Cache Line Size
*----------------------------------------------------------------------------*/
+75 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -31,6 +31,15 @@ extern "C" {
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0)
#define CACHE_LL_L1_DCACHE_AUTOLOAD (1<<0)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if ICache auto preload is enabled or not
*
@@ -90,6 +99,71 @@ static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cach
return enabled;
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (L1 only)
*
* Starts preload for the given region and does not wait. Use
* cache_ll_preload_wait_done() to wait for completion.
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
switch (type) {
case CACHE_TYPE_INSTRUCTION:
Cache_Start_ICache_Preload(vaddr, size, ascending ? 0 : 1);
break;
case CACHE_TYPE_DATA:
Cache_Start_DCache_Preload(vaddr, size, ascending ? 0 : 1);
break;
case CACHE_TYPE_ALL:
default:
Cache_Start_ICache_Preload(vaddr, size, ascending ? 0 : 1);
Cache_Start_DCache_Preload(vaddr, size, ascending ? 0 : 1);
break;
}
}
/**
* @brief Wait until cache preload is done (L1 only)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void)cache_id;
HAL_ASSERT(cache_level == CACHE_LL_LEVEL_EXT_MEM);
switch (type) {
case CACHE_TYPE_INSTRUCTION:
while (Cache_ICache_Preload_Done() == 0) {
}
break;
case CACHE_TYPE_DATA:
while (Cache_DCache_Preload_Done() == 0) {
}
break;
case CACHE_TYPE_ALL:
default:
while (Cache_ICache_Preload_Done() == 0) {
}
while (Cache_DCache_Preload_Done() == 0) {
}
break;
}
}
/**
* @brief Disable ICache
*/
+165 -1
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -46,6 +46,15 @@ extern "C" {
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<2)
#define CACHE_LL_L1_DCACHE_AUTOLOAD (1<<2)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/**
* @brief Check if ICache auto preload is enabled or not
*
@@ -105,6 +114,161 @@ static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cach
return enabled;
}
/*------------------------------------------------------------------------------
* Preload (manual preload only; no autoload, via ROM API)
*----------------------------------------------------------------------------*/
/**
* @brief Check if L1 ICache preload is in progress
*
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL on S3)
* @return true: preload in progress; false: idle
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_icache_preload_busy(uint32_t cache_id)
{
(void)cache_id;
return Cache_ICache_Preload_Done() == 0;
}
/**
* @brief Check if L1 DCache preload is in progress
*
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL on S3)
* @return true: preload in progress; false: idle
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_dcache_preload_busy(uint32_t cache_id)
{
(void)cache_id;
return Cache_DCache_Preload_Done() == 0;
}
/**
* @brief Set L1 ICache preload address and start preload (ROM: suspends autoload)
*
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL on S3)
* @param vaddr start virtual address for preload
* @param size_bytes size of region in bytes
* @param ascending true: ascending (order 0); false: descending (order 1)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_icache_preload(uint32_t cache_id, uint32_t vaddr, uint32_t size_bytes, bool ascending)
{
(void)cache_id;
Cache_Start_ICache_Preload(vaddr, size_bytes, ascending ? 0 : 1);
}
/**
* @brief Set L1 DCache preload address and start preload (ROM: suspends autoload)
*
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL on S3)
* @param vaddr start virtual address for preload
* @param size_bytes size of region in bytes
* @param ascending true: ascending (order 0); false: descending (order 1)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_dcache_preload(uint32_t cache_id, uint32_t vaddr, uint32_t size_bytes, bool ascending)
{
(void)cache_id;
Cache_Start_DCache_Preload(vaddr, size_bytes, ascending ? 0 : 1);
}
/**
* @brief Wait until L1 ICache preload is done
*
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL on S3)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_icache_preload_wait_done(uint32_t cache_id)
{
(void)cache_id;
while (Cache_ICache_Preload_Done() == 0) {
}
}
/**
* @brief Wait until L1 DCache preload is done
*
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL on S3)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_dcache_preload_wait_done(uint32_t cache_id)
{
(void)cache_id;
while (Cache_DCache_Preload_Done() == 0) {
}
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache (L1 only)
*
* Starts preload for the given region and does not wait. Use
* cache_ll_l1_*_preload_wait_done() to wait for completion.
*
* @param cache_level level of the cache (must be 1)
* @param type see `cache_type_t`
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL)
* @param vaddr start virtual address for preload
* @param size size of region in bytes
* @param ascending true: ascending order; false: descending
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
HAL_ASSERT(cache_level == 1);
switch (type) {
case CACHE_TYPE_INSTRUCTION:
cache_ll_l1_icache_preload(cache_id, vaddr, size, ascending);
break;
case CACHE_TYPE_DATA:
cache_ll_l1_dcache_preload(cache_id, vaddr, size, ascending);
break;
case CACHE_TYPE_ALL:
default:
cache_ll_l1_icache_preload(cache_id, vaddr, size, ascending);
cache_ll_l1_dcache_preload(cache_id, vaddr, size, ascending);
break;
}
}
/**
* @brief Wait until cache preload is done (L1 only)
*
* @param cache_level level of the cache (must be 1)
* @param type see `cache_type_t`
* @param cache_id id of the cache (0 or CACHE_LL_ID_ALL)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
HAL_ASSERT(cache_level == 1);
switch (type) {
case CACHE_TYPE_INSTRUCTION:
cache_ll_l1_icache_preload_wait_done(cache_id);
break;
case CACHE_TYPE_DATA:
cache_ll_l1_dcache_preload_wait_done(cache_id);
break;
case CACHE_TYPE_ALL:
default:
cache_ll_l1_icache_preload_wait_done(cache_id);
cache_ll_l1_dcache_preload_wait_done(cache_id);
break;
}
}
/**
* @brief Disable ICache
*/
@@ -42,6 +42,15 @@ extern "C" {
#define CACHE_LL_L1_CORE0_EVENT_MASK (1<<0)
#define CACHE_LL_L1_CORE1_EVENT_MASK (1<<1)
/**
* @brief Preload strategy
*/
typedef enum {
CACHE_LL_PRELOAD_UNTIL_FETCH_DONE = 0,
CACHE_LL_PRELOAD_AFTER_FETCH = 1,
CACHE_LL_PRELOAD_ARBITRARY = 2,
} cache_ll_preload_strategy_t;
/*------------------------------------------------------------------------------
* Autoload
*----------------------------------------------------------------------------*/
@@ -842,6 +851,34 @@ static inline uint32_t cache_ll_l2_cache_get_line_size(uint32_t cache_id)
return 0;
}
/**
* @brief Set the preload strategy (no-op)
*/
__attribute__((always_inline))
static inline void cache_ll_preload_set_strategy(uint32_t cache_level, cache_type_t type, uint32_t cache_id, cache_ll_preload_strategy_t strategy)
{
(void)cache_level;
(void)type;
(void)cache_id;
(void)strategy;
}
/**
* @brief Preload cache
*/
__attribute__((always_inline))
static inline void cache_ll_preload(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size, bool ascending)
{
}
/**
* @brief Wait until cache preload is done
*/
__attribute__((always_inline))
static inline void cache_ll_preload_wait_done(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
}
/**
* @brief Get Cache line size, in bytes
*
+24 -1
View File
@@ -1,6 +1,6 @@
/*
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -145,6 +145,29 @@ void cache_hal_unfreeze(uint32_t cache_level, cache_type_t type);
*/
uint32_t cache_hal_get_cache_line_size(uint32_t cache_level, cache_type_t type);
/**
* @brief Start cache preload for a region (manual preload)
*
* Preloads the given address range into cache, this can improve
* performance when the region will be read soon.
*
* @param cache_level Level of the cache (e.g. CACHE_LL_LEVEL_EXT_MEM)
* @param type CACHE_TYPE_DATA, CACHE_TYPE_INSTRUCTION, or CACHE_TYPE_ALL
* @param vaddr Start virtual address of the region to preload
* @param size Size in bytes. Should be cache-line aligned; if not,
* the actual preloaded length is rounded down to cache-line boundary.
* @param ascending true for ascending order, false for descending
*/
void cache_hal_preload(uint32_t cache_level, cache_type_t type, uint32_t vaddr, uint32_t size, bool ascending);
/**
* @brief Wait until cache preload started by cache_hal_preload() is done
*
* @param cache_level Level of the cache (must match the level used in cache_hal_preload)
* @param type CACHE_TYPE_DATA, CACHE_TYPE_INSTRUCTION, or CACHE_TYPE_ALL
*/
void cache_hal_preload_wait_done(uint32_t cache_level, cache_type_t type);
/**
* @brief Get Cache level and the ID of the vaddr
*
@@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -25,7 +25,17 @@ typedef union {
* Reserved
*/
uint32_t l1_icache_shut_ibus3:1;
uint32_t reserved_4:28;
uint32_t reserved_4:4;
/** l1_icache_undef_op : R/W; bitpos: [15:8]; default: 0;
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l1_icache_undef_op:8;
uint32_t reserved_16:16;
};
uint32_t val;
} cache_l1_icache_ctrl_reg_t;
@@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -24,6 +24,14 @@ extern "C" {
#define EXTMEM_L1_CACHE_SHUT_IBUS_M (BIT(0))
#define EXTMEM_L1_CACHE_SHUT_IBUS_V 0x1
#define EXTMEM_L1_CACHE_SHUT_IBUS_S 0
/* EXTMEM_L1_CACHE_UNDEF_OP : R/W ;bitpos:[11:8] ;default: 4'h0 ; */
/*description: Internal debug control field. bits[1:0]: Arbitration mode. 0: Round-robin, hold bus
until data fetch completes. 1: Round-robin, release bus after request is issued. 2/3: Random
arbitration..*/
#define EXTMEM_L1_CACHE_UNDEF_OP 0x0000000F
#define EXTMEM_L1_CACHE_UNDEF_OP_M ((EXTMEM_L1_CACHE_UNDEF_OP_V)<<(EXTMEM_L1_CACHE_UNDEF_OP_S))
#define EXTMEM_L1_CACHE_UNDEF_OP_V 0xF
#define EXTMEM_L1_CACHE_UNDEF_OP_S 8
#define EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG (DR_REG_EXTMEM_BASE + 0x20)
/* EXTMEM_L1_CACHE_WRAP : R/W ;bitpos:[4] ;default: 1'h0 ; */
@@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -33,7 +33,11 @@ typedef union {
*/
uint32_t l1_icache_shut_ibus3:1;
/** l1_icache_undef_op : HRO; bitpos: [7:4]; default: 0;
* Reserved
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
*/
uint32_t l1_icache_undef_op:4;
uint32_t reserved_8:24;
@@ -68,7 +72,11 @@ typedef union {
uint32_t l1_cache_shut_dma:1;
uint32_t reserved_5:3;
/** l1_cache_undef_op : R/W; bitpos: [11:8]; default: 0;
* Reserved
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
*/
uint32_t l1_cache_undef_op:4;
uint32_t reserved_12:20;
@@ -87,7 +95,11 @@ typedef union {
*/
uint32_t l2_cache_shut_dma:1;
/** l2_cache_undef_op : HRO; bitpos: [8:5]; default: 0;
* Reserved
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
*/
uint32_t l2_cache_undef_op:4;
uint32_t reserved_9:23;
@@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -28,7 +28,17 @@ typedef union {
* 1: Disable
*/
uint32_t l1_cache_shut_bus1:1;
uint32_t reserved_2:30;
uint32_t reserved_2:6;
/** l1_icache_undef_op : R/W; bitpos: [15:8]; default: 0;
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l1_icache_undef_op:8;
uint32_t reserved_16:16;
};
uint32_t val;
} cache_l1_cache_ctrl_reg_t;
@@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -32,7 +32,17 @@ typedef union {
* Reserved
*/
uint32_t l1_icache_shut_ibus3:1;
uint32_t reserved_4:28;
uint32_t reserved_4:4;
/** l1_icache_undef_op : R/W; bitpos: [15:8]; default: 0;
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l1_icache_undef_op:8;
uint32_t reserved_16:16;
};
uint32_t val;
} cache_l1_icache_ctrl_reg_t;
@@ -62,7 +72,17 @@ typedef union {
* The bit is used to disable DMA access L1-DCache, 0: enable, 1: disable
*/
uint32_t l1_dcache_shut_dma:1;
uint32_t reserved_5:27;
uint32_t reserved_5:3;
/** l1_dcache_undef_op : R/W; bitpos: [15:8]; default: 0;
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l1_dcache_undef_op:8;
uint32_t reserved_16:16;
};
uint32_t val;
} cache_l1_dcache_ctrl_reg_t;
@@ -77,7 +97,17 @@ typedef union {
* The bit is used to disable DMA access L2-Cache, 0: enable, 1: disable
*/
uint32_t l2_cache_shut_dma:1;
uint32_t reserved_5:27;
uint32_t reserved_5:3;
/** l2_cache_undef_op : HRO; bitpos: [15:8]; default: 0;
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l2_cache_undef_op:8;
uint32_t reserved_16:16;
};
uint32_t val;
} cache_l2_cache_ctrl_reg_t;
@@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -38,8 +38,12 @@ typedef union {
uint32_t l1_icache_shut_ibus3:1;
uint32_t reserved_4:4;
/** l1_icache_undef_op : R/W; bitpos: [15:8]; default: 0;
* Reserved
* This field is only for internal debugging purposes. Do not use it in applications.
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l1_icache_undef_op:8;
uint32_t reserved_16:16;
@@ -79,8 +83,12 @@ typedef union {
uint32_t l1_dcache_shut_dma:1;
uint32_t reserved_5:3;
/** l1_dcache_undef_op : R/W; bitpos: [15:8]; default: 0;
* Reserved
* This field is only for internal debugging purposes. Do not use it in applications.
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l1_dcache_undef_op:8;
uint32_t reserved_16:16;
@@ -101,8 +109,12 @@ typedef union {
uint32_t l2_cache_shut_dma:1;
uint32_t reserved_5:3;
/** l2_cache_undef_op : HRO; bitpos: [15:8]; default: 0;
* Reserved
* This field is only for internal debugging purposes. Do not use it in applications.
* Internal debug control field.
* bits[1:0]: Arbitration mode.
* 0: Round-robin, hold bus until data fetch completes.
* 1: Round-robin, release bus after request is issued.
* 2/3: Random arbitration.
* bit[5]: 1: Disable auto clock gating.
*/
uint32_t l2_cache_undef_op:8;
uint32_t reserved_16:16;