Merge branch 'bugfix/fix_gdma_acquire_and_release_v6.0' into 'release/v6.0'

fix(gdma): fix the concurrent contention issue of gdma application or release (v6.0)

See merge request espressif/esp-idf!43024
This commit is contained in:
morris
2025-11-13 10:17:15 +08:00
10 changed files with 173 additions and 167 deletions
@@ -8,8 +8,8 @@
#include "unity_test_runner.h"
#include "esp_heap_caps.h"
// Some resources are lazy allocated in I2S driver, the threadhold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-300)
// Some resources are lazy allocated in I2S driver, the threshold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-350)
static size_t before_free_8bit;
static size_t before_free_32bit;
+3 -3
View File
@@ -162,11 +162,11 @@ static sdm_group_t *sdm_acquire_group_handle(int group_id, sdm_clock_source_t cl
ESP_LOGD(TAG, "new group (%d) at %p", group_id, group);
#if CONFIG_PM_ENABLE
esp_pm_lock_type_t pm_type = ESP_PM_NO_LIGHT_SLEEP;
#if TIMER_LL_FUNC_CLOCK_SUPPORT_APB
#if SDM_LL_FUNC_CLOCK_SUPPORT_APB
if (clk_src == SDM_CLK_SRC_APB) {
pm_type = ESP_PM_APB_FREQ_MAX;
}
#endif // TIMER_LL_FUNC_CLOCK_SUPPORT_APB
#endif // SDM_LL_FUNC_CLOCK_SUPPORT_APB
if (esp_pm_lock_create(pm_type, 0, soc_sdm_signals[group_id].module_name, &group->pm_lock) != ESP_OK) {
ESP_LOGE(TAG, "fail to create PM lock for group %d", group_id);
}
@@ -327,7 +327,7 @@ esp_err_t sdm_new_channel(const sdm_config_t *config, sdm_channel_handle_t *ret_
// preset the duty cycle to zero
sdm_ll_set_pulse_density(group->hal.dev, chan_id, 0);
// initialize other members of timer
// initialize other members
chan->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
atomic_init(&chan->fsm, SDM_FSM_INIT); // set the initial state to INIT
@@ -9,7 +9,7 @@
#include "esp_heap_caps.h"
#include "esp_newlib.h"
#define TEST_MEMORY_LEAK_THRESHOLD (200)
#define TEST_MEMORY_LEAK_THRESHOLD (300)
void setUp(void)
{
@@ -9,7 +9,7 @@
#include "esp_heap_caps.h"
#include "esp_newlib.h"
#define TEST_MEMORY_LEAK_THRESHOLD (200)
#define TEST_MEMORY_LEAK_THRESHOLD (300)
void setUp(void)
{
+82 -59
View File
@@ -11,7 +11,7 @@
* +-----------------------------------+--+ +--+-----------------------------------+
* | GDMA-Group-X | | | | GDMA-Group-Y |
* | +-------------+ +------------+ | | | | +-------------+ +------------+ |
* | | GDMA-Pair-0 |... |GDMA-Pair-N | | | | | | GDMA-Pair-0 |... |GDMA-Pair-N | |
* | | GDMA-Pair-0 |... |GDMA-Pair-N | | | | | | GDMA-Pair-0 |... |GDMA-Pair-M | |
* | | | | | | | | | | | | | |
* | | TX-Chan |... | TX-Chan | | | | | | TX-Chan |... | TX-Chan | |
* | | RX-Chan | | RX-Chan | | | | | | RX-Chan | | RX-Chan | |
@@ -29,25 +29,13 @@
#include "gdma_priv.h"
#include "esp_memory_utils.h"
#include "esp_flash_encrypt.h"
#include "esp_private/critical_section.h"
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
#include "esp_private/gdma_sleep_retention.h"
#endif
#if !SOC_RCC_IS_INDEPENDENT
// Reset and Clock Control registers are mixing with other peripherals, so we need to use a critical section
#define GDMA_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
#else
#define GDMA_RCC_ATOMIC()
#endif
#define GDMA_INVALID_PERIPH_TRIG (0x3F)
#define SEARCH_REQUEST_RX_CHANNEL (1 << 0)
#define SEARCH_REQUEST_TX_CHANNEL (1 << 1)
typedef struct gdma_platform_t {
portMUX_TYPE spinlock; // platform level spinlock, protect the group handle slots and reference count of each group.
portMUX_TYPE spinlock; // platform level spinlock, protect the group handle slots and reference count of each group.
gdma_group_t *groups[GDMA_LL_GET(INST_NUM)]; // array of GDMA group instances
int group_ref_counts[GDMA_LL_GET(INST_NUM)]; // reference count used to protect group install/uninstall
} gdma_platform_t;
@@ -84,7 +72,23 @@ static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *sear
int search_code = 0;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
// Validate input parameters
ESP_RETURN_ON_FALSE(search_info && config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(search_info->start_group_id < search_info->end_group_id, ESP_ERR_INVALID_ARG, TAG, "invalid group range");
ESP_RETURN_ON_FALSE(search_info->pairs_per_group > 0, ESP_ERR_INVALID_ARG, TAG, "invalid pairs_per_group");
if (config->sibling_chan) {
ESP_RETURN_ON_FALSE(config->sibling_chan->direction != config->direction, ESP_ERR_INVALID_ARG, TAG,
"sibling channel should have a different direction");
ESP_RETURN_ON_FALSE(config->sibling_chan->pair, ESP_ERR_INVALID_ARG, TAG, "invalid sibling channel");
}
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
// retention module is per GDMA pair, before we allocate the pair object, some common registers are already configured in "hal_init"
// if a light sleep happens and powers off the gdma module, those registers will get lost
// to work around it, we can acquire a power lock first, before any register configuration
sleep_retention_power_lock_acquire();
#endif
if (config->flags.reserve_sibling) {
search_code = SEARCH_REQUEST_RX_CHANNEL | SEARCH_REQUEST_TX_CHANNEL; // search for a pair of channels
@@ -101,13 +105,13 @@ static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *sear
if (config->sibling_chan) {
pair = config->sibling_chan->pair;
ESP_GOTO_ON_FALSE(pair, ESP_ERR_INVALID_ARG, err, TAG, "invalid sibling channel");
ESP_GOTO_ON_FALSE(config->sibling_chan->direction != config->direction, ESP_ERR_INVALID_ARG, err, TAG, "sibling channel should have a different direction");
group = pair->group;
// Acquire reference early, before any additional error checks
esp_os_enter_critical(&group->spinlock);
group->pair_ref_counts[pair->pair_id]++; // channel obtains a reference to pair
esp_os_exit_critical(&group->spinlock);
goto search_done; // skip the search path below if user has specify a sibling channel
// skip the search path below if user has specify a sibling channel
goto search_done;
}
int start_group_id = search_info->start_group_id;
@@ -129,17 +133,22 @@ static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *sear
esp_os_exit_critical(&pair->spinlock);
// found a pair that satisfies the search condition
if (search_code == 0) {
esp_os_enter_critical(&group->spinlock);
group->pair_ref_counts[pair->pair_id]++; // channel obtains a reference to pair
esp_os_exit_critical(&group->spinlock);
// gdma_acquire_pair_handle already increased pair ref count, no need to do it again
// pair is ready to use, don't release it
// however, we need to release the group reference acquired by gdma_acquire_group_handle
// because gdma_acquire_pair_handle already added its own group reference
gdma_release_group_handle(group);
break; // exit inner loop, will also exit outer loop due to search_code == 0
} else {
// not suitable pair, release it
gdma_release_pair_handle(pair);
pair = NULL;
}
gdma_release_pair_handle(pair);
} // loop used to search pair
gdma_release_group_handle(group);
// restore to initial state if no suitable channel slot is found
if (search_code) {
// Only release group if we didn't find a suitable pair in this group
if (search_code != 0) {
gdma_release_group_handle(group);
group = NULL;
pair = NULL;
}
} // loop used to search group
ESP_GOTO_ON_FALSE(search_code == 0, ESP_ERR_NOT_FOUND, err, TAG, "no free gdma channel, search code=%d", search_code);
@@ -170,6 +179,13 @@ search_done:
// set 1 as default weight, can be overwritten by user
gdma_set_weight(*ret_chan, 1);
#endif
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
// set up the sleep retention link for the pair
gdma_acquire_sleep_retention(pair);
// release the helper power lock because we have finished setting up the sleep retention link
sleep_retention_power_lock_release();
#endif
(*ret_chan)->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
ESP_LOGD(TAG, "new %s channel (%d,%d) at %p", (config->direction == GDMA_CHANNEL_DIRECTION_TX) ? "tx" : "rx",
group->group_id, pair->pair_id, *ret_chan);
@@ -184,10 +200,15 @@ err:
}
if (pair) {
gdma_release_pair_handle(pair);
}
if (group) {
// pair release will also release group if it's the last reference
// so we don't need to release group separately to avoid double-release
} else if (group) {
// only release group if pair wasn't allocated/released
gdma_release_group_handle(group);
}
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
sleep_retention_power_lock_release();
#endif
return ret;
}
@@ -219,14 +240,6 @@ esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_c
}
#endif // SOC_HAS(AXI_GDMA)
#if SOC_HAS(AHB_GDMA)
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
__attribute__((alias("gdma_new_ahb_channel")));
#elif SOC_HAS(AXI_GDMA)
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
__attribute__((alias("gdma_new_axi_channel")));
#endif
esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan)
{
ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
@@ -613,14 +626,14 @@ static void gdma_release_group_handle(gdma_group_t *group)
do_deinitialize = true;
// deregister from the platform
s_platform.groups[group_id] = NULL;
gdma_hal_deinit(&group->hal);
PERIPH_RCC_ATOMIC() {
gdma_ll_enable_bus_clock(group_id, false);
}
}
esp_os_exit_critical(&s_platform.spinlock);
if (do_deinitialize) {
gdma_hal_deinit(&group->hal);
GDMA_RCC_ATOMIC() {
gdma_ll_enable_bus_clock(group_id, false);
}
free(group);
ESP_LOGD(TAG, "del group %d", group_id);
}
@@ -640,18 +653,11 @@ static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gd
new_group = true;
group = pre_alloc_group;
s_platform.groups[group_id] = group; // register to platform
} else {
group = s_platform.groups[group_id];
}
// someone acquired the group handle means we have a new object that refer to this group
s_platform.group_ref_counts[group_id]++;
esp_os_exit_critical(&s_platform.spinlock);
if (new_group) {
group->group_id = group_id;
group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
// enable APB to access GDMA registers
GDMA_RCC_ATOMIC() {
PERIPH_RCC_ATOMIC() {
gdma_ll_enable_bus_clock(group_id, true);
gdma_ll_reset_register(group_id);
}
@@ -662,6 +668,14 @@ static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gd
#endif
};
hal_init(&group->hal, &config);
} else {
group = s_platform.groups[group_id];
}
// someone acquired the group handle means we have a new object that refer to this group
s_platform.group_ref_counts[group_id]++;
esp_os_exit_critical(&s_platform.spinlock);
if (new_group) {
ESP_LOGD(TAG, "new group (%d) at %p", group_id, group);
} else {
free(pre_alloc_group);
@@ -687,9 +701,6 @@ static void gdma_release_pair_handle(gdma_pair_t *pair)
if (do_deinitialize) {
free(pair);
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
gdma_sleep_retention_deinit(group->group_id, pair_id);
#endif
ESP_LOGD(TAG, "del pair (%d,%d)", group->group_id, pair_id);
gdma_release_group_handle(group);
}
@@ -708,6 +719,10 @@ static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
if (!group->pairs[pair_id]) {
new_pair = true;
pair = pre_alloc_pair;
// initialize pair before registering to avoid accessing uninitialized pair
pair->group = group;
pair->pair_id = pair_id;
pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
// register the pair to the group
group->pairs[pair_id] = pair;
} else {
@@ -717,23 +732,18 @@ static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
group->pair_ref_counts[pair_id]++;
esp_os_exit_critical(&group->spinlock);
// Update group reference count outside of group->spinlock to avoid deadlock
// This must be done after pair registration to ensure group remains valid
if (new_pair) {
pair->group = group;
pair->pair_id = pair_id;
pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
esp_os_enter_critical(&s_platform.spinlock);
// pair obtains a reference to group, so increase it
s_platform.group_ref_counts[group->group_id]++;
esp_os_exit_critical(&s_platform.spinlock);
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
gdma_sleep_retention_init(group->group_id, pair_id);
#endif
ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair_id, pair);
} else {
free(pre_alloc_pair);
}
out:
return pair;
}
@@ -743,6 +753,7 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
gdma_pair_t *pair = dma_channel->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
// Store IDs early to avoid use-after-free
int pair_id = pair->pair_id;
int group_id = group->group_id;
gdma_tx_channel_t *tx_chan = __containerof(dma_channel, gdma_tx_channel_t, base);
@@ -762,6 +773,11 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
free(tx_chan);
ESP_LOGD(TAG, "del tx channel (%d,%d)", group_id, pair_id);
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
// release sleep retention lock
gdma_release_sleep_retention(pair);
#endif
// channel has a reference on pair, release it now
gdma_release_pair_handle(pair);
return ESP_OK;
@@ -772,6 +788,7 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
gdma_pair_t *pair = dma_channel->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
// Store IDs early to avoid use-after-free
int pair_id = pair->pair_id;
int group_id = group->group_id;
gdma_rx_channel_t *rx_chan = __containerof(dma_channel, gdma_rx_channel_t, base);
@@ -791,6 +808,12 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
free(rx_chan);
ESP_LOGD(TAG, "del rx channel (%d,%d)", group_id, pair_id);
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && SOC_GDMA_SUPPORT_SLEEP_RETENTION
// release sleep retention lock
gdma_release_sleep_retention(pair);
#endif
// channel has a reference on pair, release it now
gdma_release_pair_handle(pair);
return ESP_OK;
}
@@ -33,6 +33,8 @@
#include "soc/periph_defs.h"
#include "esp_private/gdma.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/critical_section.h"
#include "esp_private/sleep_retention.h"
#if CONFIG_GDMA_OBJ_DRAM_SAFE
#define GDMA_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
@@ -98,6 +100,9 @@ struct gdma_rx_channel_t {
gdma_rx_event_callbacks_t cbs; // RX event callbacks
};
void gdma_acquire_sleep_retention(gdma_pair_t* pair);
void gdma_release_sleep_retention(gdma_pair_t* pair);
#ifdef __cplusplus
}
#endif
@@ -4,61 +4,79 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include <sys/lock.h>
#include "gdma_priv.h"
#include "esp_private/sleep_retention.h"
#include "esp_private/esp_regdma.h"
typedef struct {
int group_id;
int pair_id;
} gdma_channel_retention_arg_t;
// Note!: the sleep retention APIs are using OS locks, so here we have to use a lock rather than a light-weight critical section
static _lock_t gdma_sleep_retention_lock;
static uint8_t pair_ref_counts[GDMA_LL_GET(INST_NUM)][GDMA_LL_GET(PAIRS_PER_INST)];
ESP_LOG_ATTR_TAG(TAG, "gdma");
static esp_err_t sleep_gdma_channel_retention_init(void *arg)
{
gdma_channel_retention_arg_t *parg = (gdma_channel_retention_arg_t *)arg;
int group_id = parg->group_id;
int pair_id = parg->pair_id;
gdma_pair_t *pair = (gdma_pair_t *)arg;
int group_id = pair->group->group_id;
int pair_id = pair->pair_id;
sleep_retention_module_t module = gdma_chx_regs_retention[group_id][pair_id].module_id;
esp_err_t err = sleep_retention_entries_create(gdma_chx_regs_retention[group_id][pair_id].link_list, gdma_chx_regs_retention[group_id][pair_id].link_num, REGDMA_LINK_PRI_GDMA, module);
esp_err_t err = sleep_retention_entries_create(gdma_chx_regs_retention[group_id][pair_id].link_list,
gdma_chx_regs_retention[group_id][pair_id].link_num,
REGDMA_LINK_PRI_GDMA, module);
if (err == ESP_OK) {
ESP_LOGD(TAG, "GDMA pair (%d, %d) retention initialization", group_id, pair_id);
ESP_LOGD(TAG, "retention link created for pair (%d, %d)", group_id, pair_id);
} else {
ESP_LOGE(TAG, "failed to create retention link for pair (%d, %d)", group_id, pair_id);
}
ESP_RETURN_ON_ERROR(err, TAG, "Failed to create sleep retention linked list for GDMA pair (%d, %d) retention", group_id, pair_id);
return err;
}
esp_err_t gdma_sleep_retention_init(int group_id, int pair_id)
void gdma_acquire_sleep_retention(gdma_pair_t* pair)
{
gdma_channel_retention_arg_t arg = { .group_id = group_id, .pair_id = pair_id };
int group_id = pair->group->group_id;
int pair_id = pair->pair_id;
sleep_retention_module_init_param_t init_param = {
.cbs = { .create = { .handle = sleep_gdma_channel_retention_init, .arg = &arg } },
.cbs = { .create = { .handle = sleep_gdma_channel_retention_init, .arg = pair } },
.depends = RETENTION_MODULE_BITMAP_INIT(CLOCK_SYSTEM)
};
sleep_retention_module_t module = gdma_chx_regs_retention[group_id][pair_id].module_id;
esp_err_t err = sleep_retention_module_init(module, &init_param);
if (err == ESP_OK) {
err = sleep_retention_module_allocate(module);
_lock_acquire(&gdma_sleep_retention_lock);
// First time acquiring this pair, initialize the module
if (pair_ref_counts[group_id][pair_id] == 0) {
esp_err_t err = sleep_retention_module_init(module, &init_param);
if (err != ESP_OK) {
ESP_LOGW(TAG, "Failed to allocate sleep retention linked list for GDMA retention");
ESP_LOGW(TAG, "init retention module failed for pair (%d, %d), power domain may be turned off during sleep", group_id, pair_id);
} else {
err = sleep_retention_module_allocate(module);
if (err != ESP_OK) {
ESP_LOGW(TAG, "fail to allocate retention link list for pair (%d, %d)", group_id, pair_id);
// don't call sleep_retention_module_deinit here, otherwise GDMA peripheral may be powered off during sleep
}
}
}
return err;
pair_ref_counts[group_id][pair_id]++;
_lock_release(&gdma_sleep_retention_lock);
}
esp_err_t gdma_sleep_retention_deinit(int group_id, int pair_id)
void gdma_release_sleep_retention(gdma_pair_t* pair)
{
int group_id = pair->group->group_id;
int pair_id = pair->pair_id;
sleep_retention_module_t module = gdma_chx_regs_retention[group_id][pair_id].module_id;
esp_err_t err = sleep_retention_module_free(module);
if (err != ESP_OK) {
ESP_LOGW(TAG, "GDMA pair (%d, %d) retention destroy failed", group_id, pair_id);
_lock_acquire(&gdma_sleep_retention_lock);
pair_ref_counts[group_id][pair_id]--;
// Last reference, free the module
if (pair_ref_counts[group_id][pair_id] == 0) {
esp_err_t err = sleep_retention_module_free(module);
if (err != ESP_OK) {
ESP_LOGW(TAG, "fail to free the retention link list for pair (%d, %d)", group_id, pair_id);
}
err = sleep_retention_module_deinit(module);
if (err != ESP_OK) {
ESP_LOGW(TAG, "fail to deinit the retention module for pair (%d, %d)", group_id, pair_id);
}
}
err = sleep_retention_module_deinit(module);
if (err != ESP_OK) {
ESP_LOGW(TAG, "GDMA pair (%d, %d) retention deinit failed", group_id, pair_id);
}
return err;
_lock_release(&gdma_sleep_retention_lock);
}
@@ -52,7 +52,7 @@ typedef struct {
/**
* @brief Type of GDMA event callback
* @param dma_chan GDMA channel handle, created from `gdma_new_channel`
* @param dma_chan GDMA channel handle, created from `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param event_data GDMA event data. Different event share the same data structure, but the caller may only use a few or none of the data members.
* @param user_data User registered data from `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`
*
@@ -146,7 +146,7 @@ esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_c
* @note Suggest to use helper macro `GDMA_MAKE_TRIGGER` to construct parameter `trig_periph`. e.g. GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SHA,0)
* @note Connecting to a peripheral will also reset the DMA FIFO and FSM automatically
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] trig_periph GDMA trigger peripheral
* @return
* - ESP_OK: Connect GDMA channel successfully
@@ -159,7 +159,7 @@ esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_perip
/**
* @brief Disconnect GMA channel from peripheral
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @return
* - ESP_OK: Disconnect GDMA channel successfully
* - ESP_ERR_INVALID_ARG: Disconnect GDMA channel failed because of invalid argument
@@ -184,7 +184,7 @@ typedef struct {
* @note It's highly recommended to enable the burst mode and set proper burst size for the DMA channel,
* which can improve the performance in accessing external memory by a lot.
*
* @param[in] chan DMA channel handle, allocated by `gdma_new_channel`
* @param[in] chan DMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] config Transfer configurations
* @return
* - ESP_OK: Configure DMA transfer parameters successfully
@@ -202,7 +202,7 @@ esp_err_t gdma_config_transfer(gdma_channel_handle_t dma_chan, const gdma_transf
* @note The returned alignment doesn't take the cache line size into account, if you want to do aligned memory allocation,
* you should align the buffer size to the cache line size by yourself if the DMA buffer is behind a cache.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[out] int_mem_alignment Internal memory alignment
* @param[out] ext_mem_alignment External memory alignment
* @return
@@ -215,7 +215,7 @@ esp_err_t gdma_get_alignment_constraints(gdma_channel_handle_t dma_chan, size_t
/**
* @brief Apply channel strategy for GDMA channel
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] config Configuration of GDMA channel strategy
* - ESP_OK: Apply channel strategy successfully
* - ESP_ERR_INVALID_ARG: Apply channel strategy failed because of invalid argument
@@ -228,7 +228,7 @@ esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strateg
*
* @note By default, all GDMA channels are with the same priority: 0. Channels with the same priority are served in round-robin manner.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] priority Priority of GDMA channel, higher value means higher priority
* @return
* - ESP_OK: Set GDMA channel priority successfully
@@ -239,9 +239,9 @@ esp_err_t gdma_set_priority(gdma_channel_handle_t dma_chan, uint32_t priority);
/**
* @brief Delete GDMA channel
* @note If you call `gdma_new_channel` several times for a same peripheral, make sure you call this API the same times.
* @note If you call `gdma_new_ahb_channel/gdma_new_axi_channel` several times for a same peripheral, make sure you call this API the same times.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @return
* - ESP_OK: Delete GDMA channel successfully
* - ESP_ERR_INVALID_ARG: Delete GDMA channel failed because of invalid argument
@@ -255,7 +255,7 @@ esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan);
* @note This API breaks the encapsulation of GDMA Channel Object.
* With the returned group/channel ID, you can even bypass all other GDMA driver API and access Low Level API directly.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[out] group_id Returned group ID
* @param[out] channel_id Returned channel ID
* @return
@@ -271,7 +271,7 @@ esp_err_t gdma_get_group_channel_id(gdma_channel_handle_t dma_chan, int *group_i
* @brief Set GDMA event callbacks for TX channel
* @note This API will install GDMA interrupt service for the channel internally
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] cbs Group of callback functions
* @param[in] user_data User data, which will be passed to callback functions directly
* @return
@@ -285,7 +285,7 @@ esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_
* @brief Set GDMA event callbacks for RX channel
* @note This API will install GDMA interrupt service for the channel internally
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] cbs Group of callback functions
* @param[in] user_data User data, which will be passed to callback functions directly
* @return
@@ -301,7 +301,7 @@ esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_
* @note This function is allowed to run within ISR context
* @note This function is also allowed to run when Cache is disabled, if `CONFIG_GDMA_CTRL_FUNC_IN_IRAM` is enabled
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] desc_base_addr Base address of descriptors (usually the descriptors are chained into a link or ring)
* @return
* - ESP_OK: Start DMA engine successfully
@@ -317,7 +317,7 @@ esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr);
* @note This function is allowed to run within ISR context
* @note This function is also allowed to run when Cache is disabled, if `CONFIG_GDMA_CTRL_FUNC_IN_IRAM` is enabled
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @return
* - ESP_OK: Stop DMA engine successfully
* - ESP_ERR_INVALID_ARG: Stop DMA engine failed because of invalid argument
@@ -333,7 +333,7 @@ esp_err_t gdma_stop(gdma_channel_handle_t dma_chan);
* @note This function is also allowed to run when Cache is disabled, if `CONFIG_GDMA_CTRL_FUNC_IN_IRAM` is enabled
* @note This API could also resume a paused DMA engine, make sure new descriptors have been appended to the descriptor chain before calling it.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @return
* - ESP_OK: Send append command to DMA engine successfully
* - ESP_ERR_INVALID_ARG: Send append command to DMA engine failed because of invalid argument
@@ -348,7 +348,7 @@ esp_err_t gdma_append(gdma_channel_handle_t dma_chan);
* @note This function is also allowed to run when Cache is disabled, if `CONFIG_GDMA_CTRL_FUNC_IN_IRAM` is enabled
* @note Resetting a DMA channel won't break the connection with the target peripheral
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @return
* - ESP_OK: DMA channel reset successfully
* - ESP_ERR_INVALID_ARG: DMA channel reset failed due to invalid arguments
@@ -369,7 +369,7 @@ typedef struct {
*
* @note The created ETM event object can be deleted later by calling `esp_etm_del_event`
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] config GDMA ETM event configuration
* @param[out] out_event Returned ETM event handle
* @return
@@ -393,7 +393,7 @@ typedef struct {
* @note The created ETM task object can be deleted later by calling `esp_etm_del_task`
* @note If the GDMA task (e.g. start/stop) is controlled by ETM, then you can't use `gdma_start`/`gdma_stop` to control it.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] config GDMA ETM task configuration
* @param[out] out_task Returned ETM task handle
* @return
@@ -412,7 +412,7 @@ esp_err_t gdma_new_etm_task(gdma_channel_handle_t dma_chan, const gdma_etm_task_
* which can bring conflict if the peripheral is also using the same trigger ID. This function can return the free IDs
* for memory copy, at the runtime.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[out] mask Returned mask of free M2M trigger IDs
* @return
* - ESP_OK: Get free M2M trigger IDs successfully
@@ -438,7 +438,7 @@ typedef struct {
* @note This function must be called before `gdma_start`.
* @note The CRC Calculator will reset itself automatically if the DMA stops and starts again.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] config CRC Calculator configuration
* @return
* - ESP_OK: Configure CRC Calculator successfully
@@ -452,7 +452,7 @@ esp_err_t gdma_config_crc_calculator(gdma_channel_handle_t dma_chan, const gdma_
*
* @note You need to call this function before a new DMA transaction starts, otherwise the CRC results may be overridden.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[out] result Returned CRC result
* @return
* - ESP_OK: Get CRC result successfully
@@ -474,7 +474,7 @@ esp_err_t gdma_crc_get_result(gdma_channel_handle_t dma_chan, uint32_t *result);
* @note Weighted arbitration is different from priority arbitration. "Weight" is used after comparing "priority"
* After the priority comparison, then arbitrator checks whether there are still unused tokens in the channel.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_ahb_channel/gdma_new_axi_channel`
* @param[in] weight Weight of GDMA channel, higher value means higher priority in weighted arbitration.
* @return
* - ESP_OK: Set GDMA channel weight successfully
@@ -1,40 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// DO NOT USE THESE APIS IN ANY APPLICATIONS
// GDMA driver is not public for end users, but for ESP-IDF developers.
#pragma once
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Initialize GDMA channel retention link for powerdown the TOP powerdomain during lightsleep
* @param group_id Group id
* @param pair_id Pair id
* @return
* - ESP_OK: Create DMA retention link successfully
* - ESP_ERR_NO_MEM: Create DMA retention link failed because out of memory
*/
esp_err_t gdma_sleep_retention_init(int group_id, int pair_id);
/**
* Destroy GDMA channel retention link
* @param group_id Group id
* @param pair_id Pair id
* @return
* - ESP_OK: GDMA channel retention link destrory successfully
* - ESP_ERR_INVALID_STATE: GDMA channel retention link not create yet
*/
esp_err_t gdma_sleep_retention_deinit(int group_id, int pair_id);
#ifdef __cplusplus
}
#endif
@@ -25,9 +25,9 @@ extern "C" {
* @note This macro will increase the reference lock of that peripheral.
* You can get the value before the increment from the `rc_name` local variable
*/
#define PERIPH_RCC_ACQUIRE_ATOMIC(rc_periph, rc_name) \
for (uint8_t rc_name, _rc_cnt = 1, __DECLARE_RCC_RC_ATOMIC_ENV; \
_rc_cnt ? (rc_name = periph_rcc_acquire_enter(rc_periph), 1) : 0; \
#define PERIPH_RCC_ACQUIRE_ATOMIC(rc_periph, rc_name) \
for (uint8_t rc_name, _rc_cnt = 1, __DECLARE_RCC_RC_ATOMIC_ENV __attribute__((unused)); \
_rc_cnt ? (rc_name = periph_rcc_acquire_enter(rc_periph), 1) : 0; \
periph_rcc_acquire_exit(rc_periph, rc_name), _rc_cnt--)
/**
@@ -37,9 +37,9 @@ extern "C" {
* @note This macro will decrease the reference lock of that peripheral.
* You can get the value after the decrease from the `rc_name` local variable
*/
#define PERIPH_RCC_RELEASE_ATOMIC(rc_periph, rc_name) \
for (uint8_t rc_name, _rc_cnt = 1, __DECLARE_RCC_RC_ATOMIC_ENV; \
_rc_cnt ? (rc_name = periph_rcc_release_enter(rc_periph), 1) : 0; \
#define PERIPH_RCC_RELEASE_ATOMIC(rc_periph, rc_name) \
for (uint8_t rc_name, _rc_cnt = 1, __DECLARE_RCC_RC_ATOMIC_ENV __attribute__((unused)); \
_rc_cnt ? (rc_name = periph_rcc_release_enter(rc_periph), 1) : 0; \
periph_rcc_release_exit(rc_periph, rc_name), _rc_cnt--)
/**
@@ -47,9 +47,9 @@ extern "C" {
*
* @note User code protected by this macro should be as short as possible, because it's a critical section
*/
#define PERIPH_RCC_ATOMIC() \
for (int _rc_cnt = 1, __DECLARE_RCC_ATOMIC_ENV; \
_rc_cnt ? (periph_rcc_enter(), 1) : 0; \
#define PERIPH_RCC_ATOMIC() \
for (int _rc_cnt = 1, __DECLARE_RCC_ATOMIC_ENV __attribute__((unused)); \
_rc_cnt ? (periph_rcc_enter(), 1) : 0; \
periph_rcc_exit(), _rc_cnt--)
/** @cond */