refactor(rmt): use gdma link list driver to mount buffer

This commit is contained in:
Chen Jichang
2025-05-22 14:05:20 +08:00
committed by Chen Ji Chang
parent 844dc17b43
commit 4ee0feeb6f
10 changed files with 226 additions and 168 deletions
+18
View File
@@ -30,3 +30,21 @@ entries:
if RMT_RECV_FUNC_IN_IRAM = y:
rmt_rx: rmt_receive (noflash)
[mapping:rmt_driver_gdma_link]
archive: libesp_hw_support.a
entries:
if RMT_TX_ISR_HANDLER_IN_IRAM = y && SOC_RMT_SUPPORT_DMA = y:
gdma_link: gdma_link_mount_buffers (noflash)
gdma_link: gdma_link_set_owner (noflash)
gdma_link: gdma_link_get_head_addr (noflash)
gdma_link: gdma_link_set_length (noflash)
gdma_link: gdma_link_concat (noflash)
gdma_link: gdma_link_check_end (noflash)
if RMT_RX_ISR_HANDLER_IN_IRAM = y && SOC_RMT_SUPPORT_DMA = y:
gdma_link: gdma_link_mount_buffers (noflash)
gdma_link: gdma_link_count_buffer_size_till_eof (noflash)
gdma_link: gdma_link_get_head_addr (noflash)
gdma_link: gdma_link_get_length (noflash)
gdma_link: gdma_link_get_buffer (noflash)
+9 -11
View File
@@ -32,8 +32,8 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
uint8_t *input_bytes = (uint8_t *)input_raw;
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
// bitscrambler encoder must be used with a TX channel with DMA enabled
assert(tx_chan->base.dma_chan != NULL);
@@ -52,9 +52,9 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
// mark the start descriptor
if (tx_chan->mem_off_bytes < tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
size_t len = copy_len;
@@ -65,15 +65,14 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
// mark the end descriptor
if (tx_chan->mem_off_bytes < tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
if (encoding_truncated) {
@@ -94,8 +93,7 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
// reset offset pointer when exceeds maximum range
if (tx_chan->mem_off_bytes >= tx_chan->ping_pong_symbols * 2 * sizeof(rmt_symbol_word_t)) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
tx_chan->mem_off_bytes = 0;
}
@@ -36,8 +36,6 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
const uint8_t *raw_data = (const uint8_t *)primary_data;
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
size_t byte_index = bytes_encoder->last_byte_index;
size_t bit_index = bytes_encoder->last_bit_index;
@@ -58,14 +56,18 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
bool encoding_truncated = mem_have < mem_want;
bool encoding_space_free = mem_have > mem_want;
#if SOC_RMT_SUPPORT_DMA
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
if (channel->dma_chan) {
// mark the start descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
}
#endif // SOC_RMT_SUPPORT_DMA
size_t len = encode_len;
while (len > 0) {
@@ -90,20 +92,21 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
}
}
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// mark the end descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
}
#endif // SOC_RMT_SUPPORT_DMA
if (encoding_truncated) {
// this encoding has not finished yet, save the truncated position
@@ -123,10 +126,11 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
// reset offset pointer when exceeds maximum range
if (symbol_off >= tx_chan->ping_pong_symbols * 2) {
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
}
#endif // SOC_RMT_SUPPORT_DMA
tx_chan->mem_off_bytes = 0;
} else {
tx_chan->mem_off_bytes = symbol_off * sizeof(rmt_symbol_word_t);
@@ -28,8 +28,6 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
rmt_symbol_word_t *symbols = (rmt_symbol_word_t *)input_symbols;
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
size_t symbol_index = copy_encoder->last_symbol_index;
// how many symbols will be copied by the encoder
@@ -49,14 +47,18 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
bool encoding_truncated = mem_have < mem_want;
bool encoding_space_free = mem_have > mem_want;
#if SOC_RMT_SUPPORT_DMA
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
if (channel->dma_chan) {
// mark the start descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
}
#endif // SOC_RMT_SUPPORT_DMA
size_t len = encode_len;
while (len > 0) {
@@ -64,20 +66,21 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
len--;
}
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// mark the end descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
}
#endif // SOC_RMT_SUPPORT_DMA
if (encoding_truncated) {
// this encoding has not finished yet, save the truncated position
@@ -95,10 +98,11 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
// reset offset pointer when exceeds maximum range
if (symbol_off >= tx_chan->ping_pong_symbols * 2) {
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
}
#endif // SOC_RMT_SUPPORT_DMA
tx_chan->mem_off_bytes = 0;
} else {
tx_chan->mem_off_bytes = symbol_off * sizeof(rmt_symbol_word_t);
@@ -37,8 +37,6 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
rmt_simple_encoder_t *simple_encoder = __containerof(encoder, rmt_simple_encoder_t, base);
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
size_t symbol_off = tx_chan->mem_off_bytes / sizeof(rmt_symbol_word_t);
// where to put the encoded symbols? DMA buffer or RMT HW memory
@@ -49,14 +47,18 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
mem_to_nc = channel->hw_mem_base;
}
#if SOC_RMT_SUPPORT_DMA
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
if (channel->dma_chan) {
// mark the start descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
}
#endif // SOC_RMT_SUPPORT_DMA
// While we're not done, we need to use the callback to fill the RMT memory until it is
// exactly entirely full. We cannot do that if the RMT memory still has N free spaces
@@ -131,20 +133,21 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
}
}
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// mark the end descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
}
#endif // SOC_RMT_SUPPORT_DMA
if (is_done) {
// reset internal index if encoding session has finished
@@ -158,10 +161,11 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
// reset offset pointer when exceeds maximum range
if (symbol_off >= tx_chan->ping_pong_symbols * 2) {
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
}
#endif // SOC_RMT_SUPPORT_DMA
tx_chan->mem_off_bytes = 0;
} else {
tx_chan->mem_off_bytes = symbol_off * sizeof(rmt_symbol_word_t);
+5 -6
View File
@@ -41,11 +41,13 @@
#include "esp_pm.h"
#include "esp_attr.h"
#include "esp_private/gdma.h"
#include "esp_private/gdma_link.h"
#include "esp_private/esp_gpio_reserve.h"
#include "esp_private/gpio.h"
#include "esp_private/sleep_retention.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk_tree_common.h"
#include "esp_private/esp_dma_utils.h"
#include "driver/rmt_types.h"
#ifdef __cplusplus
@@ -83,7 +85,6 @@ extern "C" {
// RMT is a slow peripheral, it only supports AHB-GDMA
#define RMT_DMA_DESC_ALIGN 4
typedef dma_descriptor_align4_t rmt_dma_descriptor_t;
#ifdef CACHE_LL_L2MEM_NON_CACHE_ADDR
#define RMT_GET_NON_CACHE_ADDR(addr) (CACHE_LL_L2MEM_NON_CACHE_ADDR(addr))
@@ -199,8 +200,7 @@ struct rmt_tx_channel_t {
rmt_tx_trans_desc_t *cur_trans; // points to current transaction
void *user_data; // user context
rmt_tx_done_callback_t on_trans_done; // callback, invoked on trans done
rmt_dma_descriptor_t *dma_nodes; // DMA descriptor nodes
rmt_dma_descriptor_t *dma_nodes_nc; // DMA descriptor nodes accessed in non-cached way
gdma_link_list_handle_t dma_link; // DMA link list handle
rmt_tx_trans_desc_t trans_desc_pool[]; // transfer descriptor pool
};
@@ -224,9 +224,8 @@ struct rmt_rx_channel_t {
void *user_data; // user context
rmt_rx_trans_desc_t trans_desc; // transaction description
size_t num_dma_nodes; // number of DMA nodes, determined by how big the memory block that user configures
size_t dma_int_mem_alignment; // DMA buffer alignment (both in size and address) for internal RX memory
rmt_dma_descriptor_t *dma_nodes; // DMA link nodes
rmt_dma_descriptor_t *dma_nodes_nc; // DMA descriptor nodes accessed in non-cached way
size_t dma_int_mem_alignment; // DMA buffer alignment (both in size and address) for internal RX memory
gdma_link_list_handle_t dma_link; // DMA link list handle
};
/**
+35 -54
View File
@@ -25,15 +25,19 @@ __attribute__((always_inline))
static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void *buffer, size_t buffer_size, size_t per_block_size, size_t last_block_size)
{
uint8_t *data = (uint8_t *)buffer;
gdma_buffer_mount_config_t mount_configs[rx_chan->num_dma_nodes];
memset(mount_configs, 0, sizeof(mount_configs));
for (int i = 0; i < rx_chan->num_dma_nodes; i++) {
rmt_dma_descriptor_t *desc_nc = &rx_chan->dma_nodes_nc[i];
desc_nc->buffer = data + i * per_block_size;
desc_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
desc_nc->dw0.suc_eof = 0;
desc_nc->dw0.length = 0;
desc_nc->dw0.size = per_block_size;
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = data + i * per_block_size,
.length = per_block_size,
.flags = {
.mark_final = false,
}
};
}
rx_chan->dma_nodes_nc[rx_chan->num_dma_nodes - 1].dw0.size = last_block_size;
mount_configs[rx_chan->num_dma_nodes - 1].length = last_block_size;
gdma_link_mount_buffers(rx_chan->dma_link, 0, mount_configs, rx_chan->num_dma_nodes, NULL);
}
static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx_channel_config_t *config)
@@ -60,11 +64,21 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
ESP_RETURN_ON_ERROR(gdma_register_rx_event_callbacks(rx_channel->base.dma_chan, &cbs, rx_channel), TAG, "register DMA callbacks failed");
// circular DMA descriptor
for (int i = 0; i < rx_channel->num_dma_nodes - 1; i++) {
rx_channel->dma_nodes_nc[i].next = &rx_channel->dma_nodes[i + 1];
}
rx_channel->dma_nodes_nc[rx_channel->num_dma_nodes - 1].next = &rx_channel->dma_nodes[0];
rx_channel->num_dma_nodes = esp_dma_calculate_node_count(config->mem_block_symbols * sizeof(rmt_symbol_word_t),
rx_channel->dma_int_mem_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
rx_channel->num_dma_nodes = MAX(2, rx_channel->num_dma_nodes); // at least 2 DMA nodes for ping-pong
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = rx_channel->dma_int_mem_alignment,
.item_alignment = RMT_DMA_DESC_ALIGN,
.num_items = rx_channel->num_dma_nodes,
.flags = {
// the reception may be interrupted by rmt_rx_disable(), DMA may not have accessed the descriptor yet
.check_owner = false,
},
};
ESP_RETURN_ON_ERROR(gdma_new_link_list(&dma_link_config, &rx_channel->dma_link), TAG, "create DMA link list failed");
return ESP_OK;
}
#endif // SOC_RMT_SUPPORT_DMA
@@ -156,14 +170,14 @@ static esp_err_t rmt_rx_destroy(rmt_rx_channel_t *rx_channel)
if (rx_channel->base.dma_chan) {
ESP_RETURN_ON_ERROR(gdma_del_channel(rx_channel->base.dma_chan), TAG, "delete dma channel failed");
}
if (rx_channel->dma_link) {
ESP_RETURN_ON_ERROR(gdma_del_link_list(rx_channel->dma_link), TAG, "delete dma link list failed");
}
#endif // SOC_RMT_SUPPORT_DMA
if (rx_channel->base.group) {
// de-register channel from RMT group
rmt_rx_unregister_from_group(&rx_channel->base, rx_channel->base.group);
}
if (rx_channel->dma_nodes) {
free(rx_channel->dma_nodes);
}
free(rx_channel);
return ESP_OK;
}
@@ -202,31 +216,6 @@ esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_
// gpio is not configured yet
rx_channel->base.gpio_num = -1;
#if SOC_RMT_SUPPORT_DMA
// create DMA descriptor
size_t num_dma_nodes = 0;
if (config->flags.with_dma) {
// DMA descriptors must be placed in internal SRAM
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
num_dma_nodes = config->mem_block_symbols * sizeof(rmt_symbol_word_t) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE + 1;
num_dma_nodes = MAX(2, num_dma_nodes); // at least 2 DMA nodes for ping-pong
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, num_dma_nodes, sizeof(rmt_dma_descriptor_t), mem_caps);
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel DMA nodes");
rx_channel->dma_nodes = dma_nodes;
// do memory sync only when the data cache exists
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (data_cache_line_size) {
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, num_dma_nodes * sizeof(rmt_dma_descriptor_t),
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED),
err, TAG, "cache sync failed");
}
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
rx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
}
rx_channel->num_dma_nodes = num_dma_nodes;
#endif // SOC_RMT_SUPPORT_DMA
// register the channel to group
ESP_GOTO_ON_ERROR(rmt_rx_register_to_group(rx_channel, config), err, TAG, "register channel failed");
rmt_group_t *group = rx_channel->base.group;
@@ -373,7 +362,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not in the internal RAM");
size_t max_buf_sz_per_dma_node = ALIGN_DOWN(DMA_DESCRIPTOR_BUFFER_MAX_SIZE, mem_alignment);
ESP_RETURN_ON_FALSE_ISR(buffer_size <= rx_chan->num_dma_nodes * max_buf_sz_per_dma_node,
ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity: %zu", rx_chan->num_dma_nodes * max_buf_sz_per_dma_node);
ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity: %"PRIu32"", rx_chan->num_dma_nodes * max_buf_sz_per_dma_node);
}
#endif // SOC_RMT_SUPPORT_DMA
@@ -420,7 +409,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
size_t last_dma_block_size = buffer_size - per_dma_block_size * (rx_chan->num_dma_nodes - 1);
rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, per_dma_block_size, last_dma_block_size);
gdma_reset(channel->dma_chan);
gdma_start(channel->dma_chan, (intptr_t)rx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
gdma_start(channel->dma_chan, gdma_link_get_head_addr(rx_chan->dma_link)); // note, we must use the cached descriptor address to start the DMA
}
#endif
@@ -771,15 +760,7 @@ static void rmt_rx_default_isr(void *args)
__attribute__((always_inline))
static inline size_t rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan, int start_index)
{
size_t received_bytes = 0;
for (int i = 0; i < rx_chan->num_dma_nodes; i++) {
received_bytes += rx_chan->dma_nodes_nc[start_index].dw0.length;
if (rx_chan->dma_nodes_nc[start_index].dw0.suc_eof) {
break;
}
start_index++;
start_index %= rx_chan->num_dma_nodes;
}
size_t received_bytes = gdma_link_count_buffer_size_till_eof(rx_chan->dma_link, start_index);
received_bytes = ALIGN_UP(received_bytes, sizeof(rmt_symbol_word_t));
return received_bytes / sizeof(rmt_symbol_word_t);
}
@@ -787,7 +768,7 @@ static inline size_t rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan, i
__attribute__((always_inline))
static inline size_t rmt_rx_count_symbols_for_single_block(rmt_rx_channel_t *rx_chan, int desc_index)
{
size_t received_bytes = rx_chan->dma_nodes_nc[desc_index].dw0.length;
size_t received_bytes = gdma_link_get_length(rx_chan->dma_link, desc_index);
received_bytes = ALIGN_UP(received_bytes, sizeof(rmt_symbol_word_t));
return received_bytes / sizeof(rmt_symbol_word_t);
}
@@ -821,7 +802,7 @@ static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_d
if (rx_chan->on_recv_done) {
int recycle_start_index = trans_desc->dma_desc_index;
rmt_rx_done_event_data_t edata = {
.received_symbols = rx_chan->dma_nodes_nc[recycle_start_index].buffer,
.received_symbols = gdma_link_get_buffer(rx_chan->dma_link, recycle_start_index),
.num_symbols = rmt_rx_count_symbols_until_eof(rx_chan, recycle_start_index),
.flags.is_last = true,
};
@@ -835,7 +816,7 @@ static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_d
if (rx_chan->on_recv_done) {
size_t dma_desc_index = trans_desc->dma_desc_index;
rmt_rx_done_event_data_t edata = {
.received_symbols = rx_chan->dma_nodes_nc[dma_desc_index].buffer,
.received_symbols = gdma_link_get_buffer(rx_chan->dma_link, dma_desc_index),
.num_symbols = rmt_rx_count_symbols_for_single_block(rx_chan, dma_desc_index),
.flags.is_last = false,
};
+56 -57
View File
@@ -83,15 +83,33 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
TAG, "mem_block_symbols can't exceed %zu", DMA_DESCRIPTOR_BUFFER_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t));
tx_channel->ping_pong_symbols = mount_size_per_node / sizeof(rmt_symbol_word_t);
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = int_alignment,
.item_alignment = RMT_DMA_DESC_ALIGN,
.num_items = RMT_DMA_NODES_PING_PONG,
.flags = {
.check_owner = true,
},
};
ESP_RETURN_ON_ERROR(gdma_new_link_list(&dma_link_config, &tx_channel->dma_link), TAG, "create DMA link list failed");
gdma_buffer_mount_config_t mount_configs[RMT_DMA_NODES_PING_PONG];
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
// each descriptor shares half of the DMA buffer
tx_channel->dma_nodes_nc[i].buffer = dma_mem_base + tx_channel->ping_pong_symbols * i;
tx_channel->dma_nodes_nc[i].dw0.size = tx_channel->ping_pong_symbols * sizeof(rmt_symbol_word_t);
// the ownership will be switched to DMA in `rmt_tx_do_transaction()`
tx_channel->dma_nodes_nc[i].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
// each node can generate the DMA eof interrupt, and the driver will do a ping-pong trick in the eof callback
tx_channel->dma_nodes_nc[i].dw0.suc_eof = 1;
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = tx_channel->dma_mem_base + tx_channel->ping_pong_symbols * i,
.length = tx_channel->ping_pong_symbols * sizeof(rmt_symbol_word_t),
.flags = {
// each node can generate the DMA eof interrupt, and the driver will do a ping-pong trick in the eof callback
.mark_eof = true,
// chain the descriptors into a ring, and will break it in `rmt_encode_eof()`
.mark_final = false,
}
};
}
ESP_RETURN_ON_ERROR(gdma_link_mount_buffers(tx_channel->dma_link, 0, mount_configs, RMT_DMA_NODES_PING_PONG, NULL), TAG, "mount DMA buffers failed");
return ESP_OK;
}
@@ -211,6 +229,9 @@ static esp_err_t rmt_tx_destroy(rmt_tx_channel_t *tx_channel)
if (tx_channel->base.dma_chan) {
ESP_RETURN_ON_ERROR(gdma_del_channel(tx_channel->base.dma_chan), TAG, "delete dma channel failed");
}
if (tx_channel->dma_link) {
ESP_RETURN_ON_ERROR(gdma_del_link_list(tx_channel->dma_link), TAG, "delete dma link list failed");
}
#endif // SOC_RMT_SUPPORT_DMA
for (int i = 0; i < RMT_TX_QUEUE_MAX; i++) {
if (tx_channel->trans_queues[i]) {
@@ -224,9 +245,6 @@ static esp_err_t rmt_tx_destroy(rmt_tx_channel_t *tx_channel)
// de-register channel from RMT group
rmt_tx_unregister_from_group(&tx_channel->base, tx_channel->base.group);
}
if (tx_channel->dma_nodes) {
free(tx_channel->dma_nodes);
}
free(tx_channel);
return ESP_OK;
}
@@ -264,23 +282,6 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
ESP_GOTO_ON_FALSE(tx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for tx channel");
// GPIO configuration is not done yet
tx_channel->base.gpio_num = -1;
// create DMA descriptors
if (config->flags.with_dma) {
// DMA descriptors must be placed in internal SRAM
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, RMT_DMA_NODES_PING_PONG, sizeof(rmt_dma_descriptor_t), mem_caps);
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for tx DMA nodes");
tx_channel->dma_nodes = dma_nodes;
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (data_cache_line_size) {
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, RMT_DMA_NODES_PING_PONG * sizeof(rmt_dma_descriptor_t),
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED),
err, TAG, "cache sync failed");
}
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
tx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
}
// create transaction queues
ESP_GOTO_ON_ERROR(rmt_tx_create_trans_queue(tx_channel, config), err, TAG, "install trans queues failed");
// register the channel to group
@@ -358,9 +359,9 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
tx_channel->base.disable = rmt_tx_disable;
// return general channel handle
*ret_chan = &tx_channel->base;
ESP_LOGD(TAG, "new tx channel(%d,%d) at %p, gpio=%d, res=%"PRIu32"Hz, hw_mem_base=%p, dma_mem_base=%p, dma_nodes=%p, ping_pong_size=%zu, queue_depth=%zu",
ESP_LOGD(TAG, "new tx channel(%d,%d) at %p, gpio=%d, res=%"PRIu32"Hz, hw_mem_base=%p, dma_mem_base=%p, ping_pong_size=%zu, queue_depth=%zu",
group_id, channel_id, tx_channel, config->gpio_num, tx_channel->base.resolution_hz,
tx_channel->base.hw_mem_base, tx_channel->dma_mem_base, tx_channel->dma_nodes, tx_channel->ping_pong_symbols, tx_channel->queue_size);
tx_channel->base.hw_mem_base, tx_channel->dma_mem_base, tx_channel->ping_pong_symbols, tx_channel->queue_size);
return ESP_OK;
err:
@@ -591,13 +592,12 @@ esp_err_t rmt_tx_wait_all_done(rmt_channel_handle_t channel, int timeout_ms)
return ESP_OK;
}
static size_t rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan, bool need_eof_marker)
size_t rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan, bool need_eof_marker)
{
rmt_channel_t *channel = &tx_chan->base;
rmt_group_t *group = channel->group;
int channel_id = channel->channel_id;
rmt_tx_trans_desc_t *cur_trans = tx_chan->cur_trans;
rmt_dma_descriptor_t *desc_nc = NULL;
if (need_eof_marker) {
rmt_symbol_word_t *mem_to_nc = NULL;
@@ -617,25 +617,28 @@ static size_t rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan, bool need_eof_marker)
tx_chan->mem_off_bytes += sizeof(rmt_symbol_word_t);
}
size_t off = 0;
if (channel->dma_chan) {
if (tx_chan->mem_off_bytes <= tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
desc_nc = &tx_chan->dma_nodes_nc[0];
off = tx_chan->mem_off_bytes;
} else {
desc_nc = &tx_chan->dma_nodes_nc[1];
off = tx_chan->mem_off_bytes - tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
}
desc_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
desc_nc->dw0.length = off;
// break down the DMA descriptor link
desc_nc->next = NULL;
} else {
if (!channel->dma_chan) {
portENTER_CRITICAL_ISR(&group->spinlock);
// This is the end of a sequence of encoding sessions, disable the threshold interrupt as no more data will be put into RMT memory block
rmt_ll_enable_interrupt(group->hal.regs, RMT_LL_EVENT_TX_THRES(channel_id), false);
portEXIT_CRITICAL_ISR(&group->spinlock);
}
#if SOC_RMT_SUPPORT_DMA
else {
int dma_lli_index = 0;
size_t off = 0;
if (tx_chan->mem_off_bytes <= tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
dma_lli_index = 0;
off = tx_chan->mem_off_bytes;
} else {
dma_lli_index = 1;
off = tx_chan->mem_off_bytes - tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
}
gdma_link_set_length(tx_chan->dma_link, dma_lli_index, off);
gdma_link_set_owner(tx_chan->dma_link, dma_lli_index, GDMA_LLI_OWNER_DMA);
gdma_link_concat(tx_chan->dma_link, dma_lli_index, NULL, 0);
}
#endif // SOC_RMT_SUPPORT_DMA
return need_eof_marker ? 1 : 0;
}
@@ -684,10 +687,10 @@ static void rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t
gdma_reset(channel->dma_chan);
// chain the descriptors into a ring, and will break it in `rmt_encode_eof()`
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
tx_chan->dma_nodes_nc[i].next = &tx_chan->dma_nodes[i + 1]; // note, we must use the cache address for the next pointer
tx_chan->dma_nodes_nc[i].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
// we will set the owner to DMA in the encoding session
gdma_link_set_owner(tx_chan->dma_link, i, GDMA_LLI_OWNER_CPU);
gdma_link_concat(tx_chan->dma_link, i, tx_chan->dma_link, i + 1);
}
tx_chan->dma_nodes_nc[RMT_DMA_NODES_PING_PONG - 1].next = &tx_chan->dma_nodes[0];
}
#endif // SOC_RMT_SUPPORT_DMA
@@ -739,7 +742,7 @@ static void rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
gdma_start(channel->dma_chan, (intptr_t)tx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
gdma_start(channel->dma_chan, gdma_link_get_head_addr(tx_chan->dma_link)); // note, we must use the cached descriptor address to start the DMA
// delay a while, wait for DMA data going to RMT memory block
esp_rom_delay_us(1);
}
@@ -1099,15 +1102,11 @@ static void rmt_tx_default_isr(void *args)
static bool rmt_dma_tx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
{
rmt_tx_channel_t *tx_chan = (rmt_tx_channel_t *)user_data;
// tx_eof_desc_addr must be non-zero, guaranteed by the hardware
rmt_dma_descriptor_t *eof_desc_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(event_data->tx_eof_desc_addr);
if (!eof_desc_nc->next) {
return false;
}
// next points to a cache address, convert it to a non-cached one
rmt_dma_descriptor_t *n = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(eof_desc_nc->next);
if (!n->next) {
return false;
// Due to concurrent software and DMA, check each node to ensure that this ring has been broken
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
if (gdma_link_check_end(tx_chan->dma_link, i)) {
return false;
}
}
// if the DMA descriptor link is still a ring (i.e. hasn't broken down by `rmt_tx_mark_eof()`), then we treat it as a valid ping-pong event
// continue ping-pong transmission
+34 -7
View File
@@ -227,17 +227,21 @@ uintptr_t gdma_link_get_head_addr(gdma_link_list_handle_t list)
esp_err_t gdma_link_concat(gdma_link_list_handle_t first_link, int first_link_item_index, gdma_link_list_handle_t second_link, int second_link_item_index)
{
ESP_RETURN_ON_FALSE(first_link && second_link, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(first_link, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_link_list_item_t *lli_nc = NULL;
// ensure the first_link_item_index is between 0 and `num_items - 1`
int num_items = first_link->num_items;
first_link_item_index = (first_link_item_index % num_items + num_items) % num_items;
lli_nc = (gdma_link_list_item_t *)(first_link->items_nc + first_link_item_index * first_link->item_size);
// ensure the second_link_item_index is between 0 and `num_items - 1`
num_items = second_link->num_items;
second_link_item_index = (second_link_item_index % num_items + num_items) % num_items;
// concatenate the two link lists
lli_nc->next = (gdma_link_list_item_t *)(second_link->items + second_link_item_index * second_link->item_size);
if (second_link == NULL) {
lli_nc->next = NULL;
} else {
// ensure the second_link_item_index is between 0 and `num_items - 1`
num_items = second_link->num_items;
second_link_item_index = (second_link_item_index % num_items + num_items) % num_items;
// concatenate the two link lists
lli_nc->next = (gdma_link_list_item_t *)(second_link->items + second_link_item_index * second_link->item_size);
}
return ESP_OK;
}
@@ -284,7 +288,7 @@ size_t gdma_link_count_buffer_size_till_eof(gdma_link_list_handle_t list, int st
return buf_size;
}
void *gdma_link_get_buffer(gdma_link_list_handle_t list, int item_index)
void* gdma_link_get_buffer(gdma_link_list_handle_t list, int item_index)
{
if (!list) {
return NULL;
@@ -307,3 +311,26 @@ size_t gdma_link_get_length(gdma_link_list_handle_t list, int item_index)
gdma_link_list_item_t *lli = (gdma_link_list_item_t *)(list->items_nc + item_index * list->item_size);
return lli->dw0.length;
}
esp_err_t gdma_link_set_length(gdma_link_list_handle_t list, int item_index, size_t length)
{
ESP_RETURN_ON_FALSE_ISR(list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;
gdma_link_list_item_t *lli = (gdma_link_list_item_t *)(list->items_nc + item_index * list->item_size);
lli->dw0.length = length;
return ESP_OK;
}
bool gdma_link_check_end(gdma_link_list_handle_t list, int item_index)
{
if (!list) {
return false;
}
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;
gdma_link_list_item_t *lli = (gdma_link_list_item_t *)(list->items_nc + item_index * list->item_size);
return lli->next == NULL;
}
@@ -121,6 +121,8 @@ uintptr_t gdma_link_get_head_addr(gdma_link_list_handle_t list);
* Link A: A1 --> B3 --> B4
* Link B: B1 --> B2 --> B3 --> B4
*
* @note If the second link is NULL, the next item of the first_link_item will be set to NULL. And the second_link_item_index is meaningless.
*
* @param[in] first_link First link list handle, allocated by `gdma_new_link_list`
* @param[in] first_link_item_index Index of the item in the first link list (-1 means the last item)
* @param[in] second_link Second link list handle, allocated by `gdma_new_link_list`
@@ -194,6 +196,28 @@ void* gdma_link_get_buffer(gdma_link_list_handle_t list, int item_index);
*/
size_t gdma_link_get_length(gdma_link_list_handle_t list, int item_index);
/**
* @brief Set the length of the buffer of a DMA link list item
*
* @param[in] list Link list handle, allocated by `gdma_new_link_list`
* @param[in] item_index Index of the link list item
* @param[in] length Length of the buffer of the link list item
* @return
* - ESP_OK: Set the length successfully
* - ESP_ERR_INVALID_ARG: Set the length failed because of invalid argument
* - ESP_FAIL: Set the length failed because of other error
*/
esp_err_t gdma_link_set_length(gdma_link_list_handle_t list, int item_index, size_t length);
/**
* @brief Check if a DMA link list item is the last item (has no next item)
*
* @param[in] list Link list handle, allocated by `gdma_new_link_list`
* @param[in] item_index Index of the link list item
* @return True if the link list item is the last item, false otherwise
*/
bool gdma_link_check_end(gdma_link_list_handle_t list, int item_index);
#ifdef __cplusplus
}
#endif