Merge branch 'feature/spi_master_support_edma_on_s3' into 'master'

feat(spi_master): support using edma to transfer with psram directly

Closes IDF-7373 and IDF-10111

See merge request espressif/esp-idf!23785
This commit is contained in:
Wan Lei
2025-11-28 13:59:58 +08:00
33 changed files with 551 additions and 309 deletions
+1 -1
View File
@@ -27,6 +27,6 @@ idf_component_register(
SRCS ${srcs}
INCLUDE_DIRS ${public_include}
REQUIRES esp_pm
PRIV_REQUIRES esp_timer esp_mm esp_driver_gpio
PRIV_REQUIRES esp_timer esp_mm esp_driver_gpio spi_flash esp_psram #For CONFIG_SPIRAM_SPEED
LDFRAGMENTS "linker.lf"
)
+1 -1
View File
@@ -44,7 +44,7 @@ menu "ESP-Driver:SPI Configurations"
help
Normally only the ISR of SPI slave is placed in the IRAM, so that it
can work without the flash when interrupt is triggered.
For other functions, there's some possibility that the flash cache
For other functions, there is some possibility that the flash cache
miss when running inside and out of SPI functions, which may increase
the interval of SPI transactions.
Enable this to put ``queue_trans``, ``get_trans_result`` and
@@ -177,14 +177,15 @@ esp_err_t spi_bus_free(spi_host_device_t host_id);
/**
* @brief Helper function for malloc DMA capable memory for SPI driver
*
* @note This API will take care of the cache and hardware alignment internally.
* To free/release memory allocated by this helper function, simply calling `free()`
* @note Using this API AFTER spi_bus_initialize() is called, this API will take care of the cache and hardware
* alignment internally. To free/release memory allocated by this helper function, simply calling `free()`.
*
* @param[in] host_id SPI peripheral who will using the memory
* @param[in] size Size in bytes, the amount of memory to allocate
* @param[in] extra_heap_caps Extra heap caps based on MALLOC_CAP_DMA
*
* @return Pointer to the memory if allocated successfully
* - NULL If allocation failed or bus not initialized
*/
void *spi_bus_dma_memory_alloc(spi_host_device_t host_id, size_t size, uint32_t extra_heap_caps);
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2010-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2010-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -104,6 +104,7 @@ typedef struct {
*/
} spi_device_interface_config_t;
// Input flags
#define SPI_TRANS_MODE_DIO (1<<0) ///< Transmit/receive data in 2-bit mode
#define SPI_TRANS_MODE_QIO (1<<1) ///< Transmit/receive data in 4-bit mode
#define SPI_TRANS_USE_RXDATA (1<<2) ///< Receive into rx_data member of spi_transaction_t instead into memory at rx_buffer.
@@ -117,6 +118,11 @@ typedef struct {
#define SPI_TRANS_MODE_OCT (1<<10) ///< Transmit/receive data in 8-bit mode
#define SPI_TRANS_MULTILINE_ADDR SPI_TRANS_MODE_DIOQIO_ADDR ///< The data lines used at address phase is the same as data phase (otherwise, only one data line is used at address phase)
#define SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL (1<<11) ///< By default driver will automatically re-alloc dma buffer if it doesn't meet hardware alignment or dma_capable requirements, this flag is for you to disable this feature, you will need to take care of the alignment otherwise driver will return you error ESP_ERR_INVALID_ARG
#define SPI_TRANS_DMA_USE_PSRAM (1<<12) ///< Use PSRAM for DMA buffer directly, has speed limit, but no temp buffer and save memory
// Output flags
#define SPI_TRANS_DMA_RX_FAIL (1<<30) ///< RX transaction data lose flag, indicate DMA RX overflow
#define SPI_TRANS_DMA_TX_FAIL (1<<31) ///< TX transaction data lose flag, indicate DMA TX underflow
/**
* This structure describes one SPI transaction. The descriptor should not be modified until the transaction finishes.
@@ -54,10 +54,11 @@ typedef enum {
typedef struct {
spi_bus_config_t bus_cfg; ///< Config used to initialize the bus
uint64_t gpio_reserve; ///< reserved output gpio bit mask
uint32_t flags; ///< Flags (attributes) of the bus
uint32_t flags; ///< Flags (SPICOMMON_BUSFLAG_* flag combination of bus abilities) of the bus
int max_transfer_sz; ///< Maximum length of bytes available to send
bool dma_enabled; ///< To enable DMA or not
size_t internal_mem_align_size; ///< Buffer align byte requirement for internal memory
size_t cache_align_int; ///< Internal memory align byte requirement
size_t cache_align_ext; ///< External memory align byte requirement
spi_bus_lock_handle_t lock;
#ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock; ///< Power management lock
@@ -72,9 +73,13 @@ typedef struct {
spi_dma_chan_handle_t tx_dma_chan; ///< TX DMA channel, on ESP32 and ESP32S2, tx_dma_chan and rx_dma_chan are same
spi_dma_chan_handle_t rx_dma_chan; ///< RX DMA channel, on ESP32 and ESP32S2, tx_dma_chan and rx_dma_chan are same
#endif
int dma_desc_num; ///< DMA descriptor number of dmadesc_tx or dmadesc_rx.
spi_dma_desc_t *dmadesc_tx; ///< DMA descriptor array for TX
spi_dma_desc_t *dmadesc_rx; ///< DMA descriptor array for RX
size_t dma_align_tx_int; ///< Internal memory align byte requirement for TX
size_t dma_align_tx_ext; ///< External memory align byte requirement for TX
size_t dma_align_rx_int; ///< Internal memory align byte requirement for RX
size_t dma_align_rx_ext; ///< External memory align byte requirement for RX
int dma_desc_num; ///< DMA descriptor number of dmadesc_tx or dmadesc_rx.
spi_dma_desc_t *dmadesc_tx; ///< DMA descriptor array for TX
spi_dma_desc_t *dmadesc_rx; ///< DMA descriptor array for RX
} spi_dma_ctx_t;
/// Destructor called when a bus is deinitialized.
@@ -174,12 +179,11 @@ esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx);
* - ``SPICOMMON_BUSFLAG_QUAD``: Combination of ``SPICOMMON_BUSFLAG_DUAL`` and ``SPICOMMON_BUSFLAG_WPHD``.
* - ``SPICOMMON_BUSFLAG_IO4_IO7``: The bus has spi data4 ~ spi data7 connected.
* - ``SPICOMMON_BUSFLAG_OCTAL``: Combination of ``SPICOMMON_BUSFLAG_QUAL`` and ``SPICOMMON_BUSFLAG_IO4_IO7``.
* @param[out] io_reserved Output the reserved gpio map
* @return
* - ESP_ERR_INVALID_ARG if parameter is invalid
* - ESP_OK on success
*/
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t *flags_o, uint64_t *io_reserved);
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t *flags_o, uint64_t *io_reserved);
/**
* @brief Free the IO used by a SPI peripheral
@@ -201,7 +205,6 @@ esp_err_t spicommon_bus_free_io_cfg(const spi_bus_config_t *bus_cfg, uint64_t *i
* @param cs_id Hardware CS id to route
* @param force_gpio_matrix If true, CS will always be routed through the GPIO matrix. If false,
* if the GPIO number allows it, the routing will happen through the IO_mux.
* @param[out] io_reserved Output the reserved gpio map
*/
void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_id, int force_gpio_matrix, uint64_t *io_reserved);
@@ -5,6 +5,7 @@
*/
#pragma once
#include <stddef.h>
#include "stdbool.h"
#include "hal/spi_types.h"
@@ -36,6 +37,15 @@ typedef struct {
*/
void spi_dma_enable_burst(spi_dma_chan_handle_t chan_handle, bool data_burst, bool desc_burst);
/**
* Get the alignment constraints for DMA
*
* @param chan_handle Context of the spi_dma channel.
* @param internal_size The alignment size for internal memory.
* @param external_size The alignment size for external memory.
*/
void spi_dma_get_alignment_constraints(spi_dma_chan_handle_t chan_handle, size_t *internal_size, size_t *external_size);
/**
* Re-trigger a HW pre-load to pick up appended linked descriptor
*
@@ -6,6 +6,7 @@
#include <string.h>
#include <stdatomic.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_types.h"
#include "esp_attr.h"
@@ -247,6 +248,10 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
spi_dma_enable_burst(dma_ctx->tx_dma_chan, true, true);
spi_dma_enable_burst(dma_ctx->rx_dma_chan, true, true);
// Get DMA alignment constraints
spi_dma_get_alignment_constraints(dma_ctx->tx_dma_chan, &dma_ctx->dma_align_tx_int, &dma_ctx->dma_align_tx_ext);
spi_dma_get_alignment_constraints(dma_ctx->rx_dma_chan, &dma_ctx->dma_align_rx_int, &dma_ctx->dma_align_rx_ext);
return ret;
}
@@ -287,13 +292,16 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
}
#endif
// TODO: add support to allow SPI transfer PSRAM buffer
gdma_transfer_config_t trans_cfg = {
.max_data_burst_size = 16,
.access_ext_mem = false,
.access_ext_mem = true, // allow to transfer data from/to external memory directly by DMA
};
ESP_RETURN_ON_ERROR(gdma_config_transfer(dma_ctx->tx_dma_chan, &trans_cfg), SPI_TAG, "config gdma tx transfer failed");
ESP_RETURN_ON_ERROR(gdma_config_transfer(dma_ctx->rx_dma_chan, &trans_cfg), SPI_TAG, "config gdma rx transfer failed");
// Get DMA alignment constraints
gdma_get_alignment_constraints(dma_ctx->tx_dma_chan, &dma_ctx->dma_align_tx_int, &dma_ctx->dma_align_tx_ext);
gdma_get_alignment_constraints(dma_ctx->rx_dma_chan, &dma_ctx->dma_align_rx_int, &dma_ctx->dma_align_rx_ext);
}
return ret;
}
@@ -720,12 +728,14 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
#endif //SOC_SPI_SUPPORT_OCT
}
if (bus_ctx[host]) {
bus_ctx[host]->bus_attr.bus_cfg = *bus_config;
bus_ctx[host]->bus_attr.flags = temp_flag;
bus_ctx[host]->bus_attr.gpio_reserve = gpio_reserv;
}
if (flags_o) {
*flags_o = temp_flag;
}
if (io_reserved) {
*io_reserved |= gpio_reserv;
}
return ESP_OK;
}
@@ -769,9 +779,7 @@ void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_id, i
}
gpio_func_sel(cs_io_num, PIN_FUNC_GPIO);
}
if (io_reserved) {
*io_reserved |= out_mask;
}
bus_ctx[host]->bus_attr.gpio_reserve |= out_mask;
}
void spicommon_cs_free_io(int cs_gpio_num, uint64_t *io_reserved)
@@ -820,14 +828,12 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
#endif
ESP_RETURN_ON_ERROR(spicommon_bus_alloc(host_id, "spi master"), SPI_TAG, "alloc host failed");
spi_bus_attr_t *bus_attr = (spi_bus_attr_t *)spi_bus_get_attr(host_id);
spicommon_bus_context_t *ctx = __containerof(bus_attr, spicommon_bus_context_t, bus_attr);
assert(bus_attr && ctx); //coverity check
bus_attr->bus_cfg = *bus_config;
if (dma_chan != SPI_DMA_DISABLED) {
bus_attr->dma_enabled = 1;
spicommon_bus_context_t *ctx = bus_ctx[host_id];
spi_bus_attr_t *bus_attr = &ctx->bus_attr;
bus_attr->dma_enabled = (dma_chan != SPI_DMA_DISABLED);
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
if (bus_attr->dma_enabled) {
err = spicommon_dma_chan_alloc(host_id, dma_chan, &ctx->dma_ctx);
if (err != ESP_OK) {
goto cleanup;
@@ -836,14 +842,10 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
if (err != ESP_OK) {
goto cleanup;
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_cache_get_alignment(MALLOC_CAP_DMA, (size_t *)&bus_attr->internal_mem_align_size);
#else
bus_attr->internal_mem_align_size = 4;
#endif
} else {
bus_attr->dma_enabled = 0;
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
// Get cache alignment constraints
esp_cache_get_alignment(MALLOC_CAP_DMA, &bus_attr->cache_align_int);
esp_cache_get_alignment(MALLOC_CAP_SPIRAM, &bus_attr->cache_align_ext);
}
spi_bus_lock_config_t lock_config = {
@@ -868,7 +870,7 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
_lock_acquire(&ctx->mutex);
if (sleep_retention_module_init(spi_reg_retention_info[host_id - 1].module_id, &init_param) == ESP_OK) {
if ((bus_attr->bus_cfg.flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) && (sleep_retention_module_allocate(spi_reg_retention_info[host_id - 1].module_id) != ESP_OK)) {
if ((bus_config->flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) && (sleep_retention_module_allocate(spi_reg_retention_info[host_id - 1].module_id) != ESP_OK)) {
// even though the sleep retention create failed, SPI driver should still work, so just warning here
ESP_LOGW(SPI_TAG, "alloc sleep recover failed, peripherals may hold power on");
}
@@ -878,7 +880,7 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
}
_lock_release(&ctx->mutex);
#else
if (bus_attr->bus_cfg.flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) {
if (bus_config->flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) {
ESP_LOGE(SPI_TAG, "power down peripheral in sleep is not enabled or not supported on your target");
}
#endif // SOC_SPI_SUPPORT_SLEEP_RETENTION
@@ -897,7 +899,7 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
}
#endif //CONFIG_PM_ENABLE
err = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_MASTER | bus_config->flags, &bus_attr->flags, &bus_attr->gpio_reserve);
err = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_MASTER | bus_config->flags, NULL, NULL);
if (err != ESP_OK) {
goto cleanup;
}
@@ -925,11 +927,17 @@ cleanup:
void *spi_bus_dma_memory_alloc(spi_host_device_t host_id, size_t size, uint32_t extra_heap_caps)
{
(void) host_id; //remain for extendability
ESP_RETURN_ON_FALSE((extra_heap_caps & MALLOC_CAP_SPIRAM) == 0, NULL, SPI_TAG, "external memory is not supported now");
SPI_CHECK(bus_ctx[host_id], "SPI %d not initialized", NULL, host_id + 1);
size_t dma_requir = 16; //TODO: IDF-10111, using max alignment temp, refactor to "gdma_get_alignment_constraints" instead
return heap_caps_aligned_calloc(dma_requir, 1, size, extra_heap_caps | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
size_t alignment = 16;
// detailed alignment requirement is not available for slave bus, so use 16 bytes as default
if (bus_ctx[host_id]->bus_attr.flags & SPICOMMON_BUSFLAG_MASTER) {
// As don't know the buffer will used for TX or RX, so use the max alignment requirement
alignment = (extra_heap_caps & MALLOC_CAP_SPIRAM) ? \
MAX(bus_ctx[host_id]->dma_ctx->dma_align_tx_ext, bus_ctx[host_id]->dma_ctx->dma_align_rx_ext) : \
MAX(bus_ctx[host_id]->dma_ctx->dma_align_tx_int, bus_ctx[host_id]->dma_ctx->dma_align_rx_int);
}
return heap_caps_aligned_calloc(alignment, 1, size, extra_heap_caps | MALLOC_CAP_DMA);
}
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
@@ -27,6 +27,18 @@ void spi_dma_enable_burst(spi_dma_chan_handle_t chan_handle, bool data_burst, bo
}
}
void spi_dma_get_alignment_constraints(spi_dma_chan_handle_t chan_handle, size_t *internal_size, size_t *external_size)
{
spi_dma_dev_t *spi_dma = SPI_LL_GET_HW(chan_handle.host_id);
if (chan_handle.dir == DMA_CHANNEL_DIRECTION_TX) {
*internal_size = 1; // TX don't need to follow dma alignment in driver design
*external_size = 1;
} else {
spi_dma_ll_get_rx_alignment_require(spi_dma, (uint32_t *)internal_size, (uint32_t *)external_size);
}
}
#if SOC_SPI_SUPPORT_SLAVE_HD_VER2
void spi_dma_append(spi_dma_chan_handle_t chan_handle)
{
+133 -136
View File
@@ -116,11 +116,14 @@ We have two bits to control the interrupt:
#include "esp_private/spi_common_internal.h"
#include "esp_private/spi_master_internal.h"
#include "esp_private/esp_clk_tree_common.h"
#include "esp_private/cache_utils.h"
#include "driver/spi_master.h"
#include "clk_ctrl_os.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_ipc.h"
#include "esp_cache.h"
#include "esp_heap_caps.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "soc/soc_memory_layout.h"
@@ -128,10 +131,6 @@ We have two bits to control the interrupt:
#include "hal/spi_hal.h"
#include "hal/spi_ll.h"
#include "hal/hal_utils.h"
#include "esp_heap_caps.h"
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#include "esp_cache.h"
#endif
#ifdef CONFIG_SPI_MASTER_ISR_IN_IRAM
#define SPI_MASTER_ISR_ATTR IRAM_ATTR
@@ -159,7 +158,25 @@ We have two bits to control the interrupt:
#define SPI_PERIPH_SRC_FREQ_MAX (80*1000*1000) //peripheral hardware limitation for clock source into peripheral
static const char *SPI_TAG = "spi_master";
/**
* The approx time for dma setup and pop data into peripheral
* This time is theoretically inverse proportion to the PSRAM speed(bandwidth), and direct proportion to the SPI speed, but hard to accurately calculated
* Below is an engineering value based on experience test result, e.g. delay 5us for 20MHz PSRAM speed, 1us for 80M
* Then the formula is: Delay_time = K * (SPI_SPEED / PSRAM_SPEED) + B
* delay
* ▲
* │ x
* │ x
* │ x
* │ x x
* ─┼─────────────────►
* │ psram speed
*/
#define K_EDMA_SETUP_RATIO 1 / 50000
#define B_EDMA_SETUP_TIME_US 1
#define SPI_EDMA_SETUP_TIME_US(spi_speed) ((spi_speed) * K_EDMA_SETUP_RATIO / CONFIG_SPIRAM_SPEED + B_EDMA_SETUP_TIME_US)
ESP_LOG_ATTR_TAG_DRAM(SPI_TAG, "spi_master");
#define SPI_CHECK(a, str, ret_val, ...) ESP_RETURN_ON_FALSE_ISR(a, ret_val, SPI_TAG, str, ##__VA_ARGS__)
typedef struct spi_device_t spi_device_t;
@@ -533,7 +550,7 @@ esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interfa
//Set CS pin, CS options
if (dev_config->spics_io_num >= 0) {
spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio, (uint64_t *)&bus_attr->gpio_reserve);
spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio, NULL);
}
//save a pointer to device in spi_host_t
@@ -753,31 +770,13 @@ static void SPI_MASTER_ISR_ATTR s_spi_dma_prepare_data(spi_host_t *host, spi_hal
}
}
static void SPI_MASTER_ISR_ATTR s_spi_prepare_data(spi_device_t *dev, const spi_hal_trans_config_t *hal_trans)
{
spi_host_t *host = dev->host;
spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
spi_hal_context_t *hal = &(host->hal);
if (host->bus_attr->dma_enabled) {
s_spi_dma_prepare_data(host, hal, hal_dev, hal_trans);
} else {
//Need to copy data to registers manually
spi_hal_push_tx_buffer(hal, hal_trans);
}
//in ESP32 these registers should be configured after the DMA is set
spi_hal_enable_data_line(hal->hw, (!hal_dev->half_duplex && hal_trans->rcv_buffer) || hal_trans->send_buffer, !!hal_trans->rcv_buffer);
}
static void SPI_MASTER_ISR_ATTR spi_format_hal_trans_struct(spi_device_t *dev, spi_trans_priv_t *trans_buf, spi_hal_trans_config_t *hal_trans)
{
spi_host_t *host = dev->host;
spi_transaction_t *trans = trans_buf->trans;
hal_trans->tx_bitlen = trans->length;
hal_trans->rx_bitlen = trans->rxlength;
hal_trans->rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv;
hal_trans->send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send;
hal_trans->rcv_buffer = (uint8_t *)trans_buf->buffer_to_rcv;
hal_trans->send_buffer = (uint8_t *)trans_buf->buffer_to_send;
hal_trans->cmd = trans->cmd;
hal_trans->addr = trans->addr;
@@ -813,6 +812,7 @@ static void SPI_MASTER_ISR_ATTR spi_format_hal_trans_struct(spi_device_t *dev, s
// Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used)
static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf)
{
spi_host_t *host = dev->host;
spi_transaction_t *trans = trans_buf->trans;
spi_hal_context_t *hal = &(dev->host->hal);
spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
@@ -826,12 +826,27 @@ static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_
spi_hal_trans_config_t hal_trans = {};
spi_format_hal_trans_struct(dev, trans_buf, &hal_trans);
spi_hal_setup_trans(hal, hal_dev, &hal_trans);
s_spi_prepare_data(dev, &hal_trans);
if (host->bus_attr->dma_enabled) {
s_spi_dma_prepare_data(host, hal, hal_dev, &hal_trans);
} else {
//Need to copy data to registers manually
spi_hal_push_tx_buffer(hal, &hal_trans);
}
//these registers should be configured after the DMA is set
spi_hal_enable_data_line(hal->hw, (!hal_dev->half_duplex && hal_trans.rcv_buffer) || hal_trans.send_buffer, !!hal_trans.rcv_buffer);
//Call pre-transmission callback, if any
if (dev->cfg.pre_cb) {
dev->cfg.pre_cb(trans);
}
#if CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
spi_hal_clear_intr_mask(hal, SPI_LL_INTR_IN_FULL | SPI_LL_INTR_OUT_EMPTY);
if (esp_ptr_dma_ext_capable(hal_trans.send_buffer)) {
// ! Delay here is required for EDMA to pass data from PSRAM to GPSPI
esp_rom_delay_us(SPI_EDMA_SETUP_TIME_US(hal_dev->timing_conf.real_freq));
}
#endif
//Kick off transfer
spi_hal_user_start(hal);
}
@@ -929,6 +944,26 @@ static void SPI_MASTER_ISR_ATTR spi_post_sct_trans(spi_host_t *host)
}
#endif //#if SOC_SPI_SCT_SUPPORTED
static void SPI_MASTER_ISR_ATTR spi_trans_dma_error_check(spi_host_t *host)
{
#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM //error checks only for psram dma
if (!host->sct_mode_enabled) {
if (esp_ptr_external_ram(host->cur_trans_buf.buffer_to_rcv) && spi_hal_get_intr_mask(&host->hal, SPI_LL_INTR_IN_FULL)) {
host->cur_trans_buf.trans->flags |= SPI_TRANS_DMA_RX_FAIL;
ESP_DRAM_LOGE(SPI_TAG, "DMA RX overflow detected");
} else {
host->cur_trans_buf.trans->flags &= ~SPI_TRANS_DMA_RX_FAIL;
}
if (esp_ptr_external_ram(host->cur_trans_buf.buffer_to_send) && spi_hal_get_intr_mask(&host->hal, SPI_LL_INTR_OUT_EMPTY)) {
host->cur_trans_buf.trans->flags |= SPI_TRANS_DMA_TX_FAIL;
ESP_DRAM_LOGE(SPI_TAG, "DMA TX underflow detected");
} else {
host->cur_trans_buf.trans->flags &= ~SPI_TRANS_DMA_TX_FAIL;
}
}
#endif
}
// This is run in interrupt context.
static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
{
@@ -966,18 +1001,7 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
//This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
spicommon_dmaworkaround_idle(dma_ctx->tx_dma_chan.chan_id);
#endif //#if CONFIG_IDF_TARGET_ESP32
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
if (host->cur_trans_buf.buffer_to_rcv) {
uint16_t alignment = bus_attr->internal_mem_align_size;
uint32_t buffer_byte_len = (host->cur_trans_buf.trans->rxlength + 7) / 8;
buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1));
// invalidate priv_trans.buffer_to_rcv anyway, only user provide aligned buffer can rcv correct data in post_cb
esp_err_t ret = esp_cache_msync((void *)host->cur_trans_buf.buffer_to_rcv, buffer_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
(void)ret;
}
#endif
spi_trans_dma_error_check(host);
}
#if SOC_SPI_SCT_SUPPORTED
@@ -1129,108 +1153,100 @@ static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handl
SPI_CHECK(trans_desc->length <= SPI_LL_CPU_MAX_BIT_LEN, "txdata transfer > hardware max supported len", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->rxlength <= SPI_LL_CPU_MAX_BIT_LEN, "rxdata transfer > hardware max supported len", ESP_ERR_INVALID_ARG);
}
if (esp_ptr_external_ram(trans_desc->tx_buffer) || esp_ptr_external_ram(trans_desc->rx_buffer)) {
SPI_CHECK(spi_flash_cache_enabled(), "Using PSRAM must when cache is enabled", ESP_ERR_INVALID_STATE);
}
return ESP_OK;
}
static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
{
spi_transaction_t *trans_desc = trans_buf->trans;
if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] &&
trans_buf->buffer_to_send != trans_desc->tx_buffer) {
if ((void *)trans_buf->buffer_to_send != trans_desc->tx_data && trans_buf->buffer_to_send != trans_desc->tx_buffer) {
free((void *)trans_buf->buffer_to_send); //force free, ignore const
}
// copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
if (trans_buf->buffer_to_rcv && (void *)trans_buf->buffer_to_rcv != &trans_desc->rx_data[0] && trans_buf->buffer_to_rcv != trans_desc->rx_buffer) { // NOLINT(clang-analyzer-unix.Malloc)
if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
memcpy((uint8_t *) & trans_desc->rx_data[0], trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
} else {
memcpy(trans_desc->rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
}
// copy data from temporary DMA-capable buffer back to trans_desc buffer and free the temporary one.
void *orig_rx_buffer = (trans_desc->flags & SPI_TRANS_USE_RXDATA) ? trans_desc->rx_data : trans_desc->rx_buffer;
if (trans_buf->buffer_to_rcv != orig_rx_buffer) {
memcpy(orig_rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
free(trans_buf->buffer_to_rcv);
}
}
static SPI_MASTER_ISR_ATTR esp_err_t setup_dma_priv_buffer(spi_host_t *host, uint32_t *buffer, uint32_t len, bool is_tx, uint32_t flags, uint32_t **ret_buffer)
{
#if CONFIG_IDF_TARGET_ESP32S2
ESP_RETURN_ON_FALSE_ISR((host->id != SPI3_HOST) || !(flags & SPI_TRANS_DMA_USE_PSRAM), ESP_ERR_NOT_SUPPORTED, SPI_TAG, "SPI3 does not support external memory");
#endif
bool is_ptr_ext = esp_ptr_external_ram(buffer);
bool use_psram = is_ptr_ext && (flags & SPI_TRANS_DMA_USE_PSRAM);
bool need_malloc = is_ptr_ext ? (!use_psram || !esp_ptr_dma_ext_capable(buffer)) : !esp_ptr_dma_capable(buffer);
uint16_t alignment = 0;
// If psram is wanted, re-malloc also from psram.
uint32_t mem_cap = MALLOC_CAP_DMA | (use_psram ? MALLOC_CAP_SPIRAM : MALLOC_CAP_INTERNAL);
if (is_tx) {
alignment = use_psram ? host->dma_ctx->dma_align_tx_ext : host->dma_ctx->dma_align_tx_int;
} else {
// RX cache sync still need consider the cache alignment requirement
if (use_psram) {
alignment = MAX(host->dma_ctx->dma_align_rx_ext, host->bus_attr->cache_align_ext);
} else {
alignment = MAX(host->dma_ctx->dma_align_rx_int, host->bus_attr->cache_align_int);
}
}
need_malloc |= (((uint32_t)buffer | len) & (alignment - 1));
ESP_EARLY_LOGD(SPI_TAG, "%s %p, len %d, is_ptr_ext %d, use_psram: %d, alignment: %d, need_malloc: %d from %s", is_tx ? "TX" : "RX", buffer, len, is_ptr_ext, use_psram, alignment, need_malloc, (mem_cap & MALLOC_CAP_SPIRAM) ? "psram" : "internal");
if (need_malloc) {
ESP_RETURN_ON_FALSE_ISR(!(flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL), ESP_ERR_INVALID_ARG, SPI_TAG, "Set flag SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL but %s addr&len not align to %d, or not dma_capable", is_tx ? "TX" : "RX", alignment);
len = (len + alignment - 1) & (~(alignment - 1)); // up align alignment
uint32_t *temp = heap_caps_aligned_alloc(alignment, len, mem_cap);
ESP_RETURN_ON_FALSE_ISR(temp != NULL, ESP_ERR_NO_MEM, SPI_TAG, "Failed to allocate priv %s buffer", is_tx ? "TX" : "RX");
if (is_tx) {
memcpy(temp, buffer, len);
}
buffer = temp;
}
esp_err_t ret = esp_cache_msync((void *)buffer, len, is_tx ? (ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED) : ESP_CACHE_MSYNC_FLAG_DIR_M2C);
// ESP_ERR_NOT_SUPPORTED stands for not cache sync required, it's allowed here
ESP_RETURN_ON_FALSE_ISR((ret == ESP_OK) || (ret == ESP_ERR_NOT_SUPPORTED), ESP_ERR_INVALID_ARG, SPI_TAG, "sync failed for %s buffer", is_tx ? "TX" : "RX");
*ret_buffer = buffer;
return ESP_OK;
}
static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans_priv_t* priv_desc)
{
spi_transaction_t *trans_desc = priv_desc->trans;
const spi_bus_attr_t *bus_attr = host->bus_attr;
uint16_t alignment = bus_attr->internal_mem_align_size;
// rx memory assign
uint32_t* rcv_ptr;
if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
rcv_ptr = (uint32_t *)&trans_desc->rx_data[0];
} else {
//if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
rcv_ptr = trans_desc->rx_buffer;
}
uint32_t* rcv_ptr = (trans_desc->flags & SPI_TRANS_USE_RXDATA) ? (uint32_t *)trans_desc->rx_data : (uint32_t *)trans_desc->rx_buffer;
// tx memory assign
const uint32_t *send_ptr;
if (trans_desc->flags & SPI_TRANS_USE_TXDATA) {
send_ptr = (uint32_t *)&trans_desc->tx_data[0];
} else {
//if not use TXDATA neither tx_buffer, tx data assigned to NULL
send_ptr = trans_desc->tx_buffer ;
}
uint32_t tx_byte_len = (trans_desc->length + 7) / 8;
uint32_t rx_byte_len = (trans_desc->rxlength + 7) / 8;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
bool tx_unaligned = ((((uint32_t)send_ptr) | tx_byte_len) & (alignment - 1));
bool rx_unaligned = ((((uint32_t)rcv_ptr) | rx_byte_len) & (alignment - 1));
#else
bool tx_unaligned = false; //tx don't need align on addr or length, for other chips
bool rx_unaligned = (((uint32_t)rcv_ptr) & (alignment - 1));
#endif
uint32_t *send_ptr = (trans_desc->flags & SPI_TRANS_USE_TXDATA) ? (uint32_t *)trans_desc->tx_data : (uint32_t *)trans_desc->tx_buffer;
esp_err_t ret = ESP_OK;
if (send_ptr && bus_attr->dma_enabled) {
if ((!esp_ptr_dma_capable(send_ptr) || tx_unaligned)) {
ESP_RETURN_ON_FALSE(!(trans_desc->flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL), ESP_ERR_INVALID_ARG, SPI_TAG, "Set flag SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL but TX buffer addr&len not align to %d byte, or not dma_capable", alignment);
//if txbuf in the desc not DMA-capable, or not bytes aligned to alignment, malloc a new one
ESP_EARLY_LOGD(SPI_TAG, "Allocate TX buffer for DMA");
tx_byte_len = (tx_byte_len + alignment - 1) & (~(alignment - 1)); // up align alignment
uint32_t *temp = heap_caps_aligned_alloc(alignment, tx_byte_len, MALLOC_CAP_DMA);
if (temp == NULL) {
goto clean_up;
}
memcpy(temp, send_ptr, (trans_desc->length + 7) / 8);
send_ptr = temp;
ret = setup_dma_priv_buffer(host, send_ptr, (trans_desc->length + 7) / 8, true, trans_desc->flags, &send_ptr);
if (ret != ESP_OK) {
goto clean_up;
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)send_ptr, tx_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
(void)ret;
#endif
}
if (rcv_ptr && bus_attr->dma_enabled) {
if ((!esp_ptr_dma_capable(rcv_ptr) || rx_unaligned)) {
ESP_RETURN_ON_FALSE(!(trans_desc->flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL), ESP_ERR_INVALID_ARG, SPI_TAG, "Set flag SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL but RX buffer addr&len not align to %d byte, or not dma_capable", alignment);
//if rxbuf in the desc not DMA-capable, or not aligned to alignment, malloc a new one
ESP_EARLY_LOGD(SPI_TAG, "Allocate RX buffer for DMA");
rx_byte_len = (rx_byte_len + alignment - 1) & (~(alignment - 1)); // up align alignment
rcv_ptr = heap_caps_aligned_alloc(alignment, rx_byte_len, MALLOC_CAP_DMA);
if (rcv_ptr == NULL) {
goto clean_up;
}
ret = setup_dma_priv_buffer(host, rcv_ptr, (trans_desc->rxlength + 7) / 8, false, trans_desc->flags, &rcv_ptr);
if (ret != ESP_OK) {
goto clean_up;
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
// do invalid here to hold on cache status to avoid hardware auto write back during dma transaction
esp_err_t ret = esp_cache_msync((void *)rcv_ptr, rx_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
(void)ret;
#endif
}
priv_desc->buffer_to_send = send_ptr;
priv_desc->buffer_to_rcv = rcv_ptr;
return ESP_OK;
clean_up:
uninstall_priv_desc(priv_desc);
return ESP_ERR_NO_MEM;
return ret;
}
esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
@@ -1290,7 +1306,6 @@ esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle
BaseType_t r;
spi_trans_priv_t trans_buf;
SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
bool use_dma = handle->host->bus_attr->dma_enabled;
//if SPI_DEVICE_NO_RETURN_RESULT is set, ret_queue will always be empty
SPI_CHECK(!(handle->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT), "API not Supported!", ESP_ERR_NOT_SUPPORTED);
@@ -1304,12 +1319,10 @@ esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle
return ESP_ERR_TIMEOUT;
}
//release temporary buffers used by dma
if (use_dma) {
uninstall_priv_desc(&trans_buf);
}
uninstall_priv_desc(&trans_buf);
(*trans_desc) = trans_buf.trans;
return ESP_OK;
return (trans_buf.trans->flags & (SPI_TRANS_DMA_RX_FAIL | SPI_TRANS_DMA_TX_FAIL)) ? ESP_ERR_INVALID_STATE : ESP_OK;
}
//Porcelain to do one blocking transmission.
@@ -1458,19 +1471,7 @@ esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle,
return ESP_ERR_TIMEOUT;
}
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
const spi_bus_attr_t *bus_attr = host->bus_attr;
if (bus_attr->dma_enabled && host->cur_trans_buf.buffer_to_rcv) {
uint16_t alignment = bus_attr->internal_mem_align_size;
uint32_t buffer_byte_len = (host->cur_trans_buf.trans->rxlength + 7) / 8;
buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1));
esp_err_t ret = esp_cache_msync((void *)host->cur_trans_buf.buffer_to_rcv, buffer_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
if (ret != ESP_OK) {
return ret;
}
}
#endif
spi_trans_dma_error_check(host);
ESP_LOGV(SPI_TAG, "polling trans done");
//deal with the in-flight transaction
@@ -1487,7 +1488,7 @@ esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle,
spi_bus_lock_acquire_end(handle->dev_lock);
}
return ESP_OK;
return (host->cur_trans_buf.trans->flags & (SPI_TRANS_DMA_RX_FAIL | SPI_TRANS_DMA_TX_FAIL)) ? ESP_ERR_INVALID_STATE : ESP_OK;
}
esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t handle, spi_transaction_t* trans_desc)
@@ -1826,20 +1827,16 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_multi_trans(spi_device_handle_t handl
SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG);
SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG);
SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE);
esp_err_t ret = ESP_OK;
uint16_t alignment = handle->host->bus_attr->internal_mem_align_size;
uint32_t *conf_buffer = heap_caps_aligned_alloc(alignment, (trans_num * SOC_SPI_SCT_BUFFER_NUM_MAX * sizeof(uint32_t)), MALLOC_CAP_DMA);
SPI_CHECK(conf_buffer, "No enough memory", ESP_ERR_NO_MEM);
for (int i = 0; i < trans_num; i++) {
ret = check_trans_valid(handle, (spi_transaction_t *)&seg_trans_desc[i]);
if (ret != ESP_OK) {
return ret;
}
ESP_RETURN_ON_ERROR(check_trans_valid(handle, (spi_transaction_t *)&seg_trans_desc[i]), SPI_TAG, "Invalid transaction");
}
SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE);
uint32_t *conf_buffer = heap_caps_malloc(trans_num * SOC_SPI_SCT_BUFFER_NUM_MAX * sizeof(uint32_t), MALLOC_CAP_DMA);
SPI_CHECK(conf_buffer, "No enough memory", ESP_ERR_NO_MEM);
spi_hal_context_t *hal = &handle->host->hal;
s_sct_init_conf_buffer(hal, conf_buffer, trans_num);
@@ -61,8 +61,7 @@ typedef struct {
typedef struct {
int id;
_Atomic spi_bus_fsm_t fsm;
uint64_t gpio_reserve;
spi_bus_config_t bus_config;
spi_bus_attr_t* bus_attr;
spi_dma_ctx_t *dma_ctx;
spi_slave_interface_config_t cfg;
intr_handle_t intr;
@@ -181,8 +180,8 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
goto cleanup;
}
memcpy(&spihost[host]->cfg, slave_config, sizeof(spi_slave_interface_config_t));
memcpy(&spihost[host]->bus_config, bus_config, sizeof(spi_bus_config_t));
spihost[host]->id = host;
spihost[host]->bus_attr = (spi_bus_attr_t *)spi_bus_get_attr(host);
atomic_store(&spihost[host]->fsm, SPI_BUS_FSM_ENABLED);
spi_slave_hal_context_t *hal = &spihost[host]->hal;
@@ -213,17 +212,18 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
spihost[host]->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
}
err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &spihost[host]->flags, &spihost[host]->gpio_reserve);
err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL, NULL);
if (err != ESP_OK) {
ret = err;
goto cleanup;
}
if (slave_config->spics_io_num >= 0) {
spicommon_cs_initialize(host, slave_config->spics_io_num, 0, !bus_is_iomux(spihost[host]), &spihost[host]->gpio_reserve);
spicommon_cs_initialize(host, slave_config->spics_io_num, 0, !bus_is_iomux(spihost[host]), NULL);
// check and save where cs line really route through
spihost[host]->cs_iomux = (slave_config->spics_io_num == spi_periph_signal[host].spics0_iomux_pin) && bus_is_iomux(spihost[host]);
spihost[host]->cs_in_signal = spi_periph_signal[host].spics_in;
}
spihost[host]->flags = spihost[host]->bus_attr->flags; // This flag MUST be set after spicommon_bus_initialize_io is called
// The slave DMA suffers from unexpected transactions. Forbid reading if DMA is enabled by disabling the CS line.
if (spihost[host]->dma_enabled) {
@@ -337,9 +337,9 @@ esp_err_t spi_slave_free(spi_host_device_t host)
free(spihost[host]->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(spihost[host]->dma_ctx);
}
spicommon_bus_free_io_cfg(&spihost[host]->bus_config, &spihost[host]->gpio_reserve);
spicommon_bus_free_io_cfg(&spihost[host]->bus_attr->bus_cfg, &spihost[host]->bus_attr->gpio_reserve);
if (spihost[host]->cfg.spics_io_num >= 0) {
spicommon_cs_free_io(spihost[host]->cfg.spics_io_num, &spihost[host]->gpio_reserve);
spicommon_cs_free_io(spihost[host]->cfg.spics_io_num, &spihost[host]->bus_attr->gpio_reserve);
}
esp_intr_free(spihost[host]->intr);
@@ -44,9 +44,8 @@ typedef struct {
typedef struct {
spi_host_device_t host_id;
spi_bus_config_t bus_config;
int cs_io_num;
uint64_t gpio_reserve;
spi_bus_attr_t* bus_attr;
_Atomic spi_bus_fsm_t fsm;
spi_dma_ctx_t *dma_ctx;
uint16_t internal_mem_align_size;
@@ -129,7 +128,7 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
host->int_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
host->append_mode = append_mode;
atomic_store(&host->fsm, SPI_BUS_FSM_ENABLED);
memcpy(&host->bus_config, bus_config, sizeof(spi_bus_config_t));
host->bus_attr = (spi_bus_attr_t *)spi_bus_get_attr(host_id);
host->cs_io_num = config->spics_io_num;
ret = spicommon_dma_chan_alloc(host_id, config->dma_chan, &host->dma_ctx);
@@ -172,11 +171,12 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
host->internal_mem_align_size = 4;
#endif
ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &host->flags, &host->gpio_reserve);
ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL, NULL);
if (ret != ESP_OK) {
goto cleanup;
}
spicommon_cs_initialize(host_id, config->spics_io_num, 0, !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS), &host->gpio_reserve);
spicommon_cs_initialize(host_id, config->spics_io_num, 0, !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS), NULL);
host->flags = host->bus_attr->flags; // This flag MUST be set after spicommon_bus_initialize_io is called
spi_slave_hd_hal_config_t hal_config = {
.host_id = host_id,
@@ -349,8 +349,8 @@ esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
}
#endif
spicommon_bus_free_io_cfg(&host->bus_config, &host->gpio_reserve);
spicommon_cs_free_io(host->cs_io_num, &host->gpio_reserve);
spicommon_bus_free_io_cfg(&host->bus_attr->bus_cfg, &host->bus_attr->gpio_reserve);
spicommon_cs_free_io(host->cs_io_num, &host->bus_attr->gpio_reserve);
spicommon_bus_free(host_id);
free(host->dma_ctx->dmadesc_tx);
free(host->dma_ctx->dmadesc_rx);
@@ -16,6 +16,6 @@ endif()
# the component can be registered as WHOLE_ARCHIVE
idf_component_register(
SRCS ${srcs}
PRIV_REQUIRES esp_driver_spi spi_flash esp_timer esp_driver_gpio
PRIV_REQUIRES esp_driver_spi spi_flash esp_timer esp_driver_gpio esp_mm
WHOLE_ARCHIVE
)
@@ -26,6 +26,7 @@
#include "esp_clk_tree.h"
#include "esp_timer.h"
#include "esp_log.h"
#include "esp_cache.h"
#include "test_utils.h"
#include "test_spi_utils.h"
#include "spi_performance.h"
@@ -776,24 +777,27 @@ TEST_CASE("SPI Master DMA test, TX and RX in different regions", "[spi]")
//connect MOSI to two devices breaks the output, fix it.
spitest_gpio_output_sel(buscfg.mosi_io_num, FUNC_GPIO, spi_periph_signal[TEST_SPI_HOST].spid_out);
#define TEST_REGION_SIZE 2
#define TEST_REGION_SIZE 3
static spi_transaction_t trans[TEST_REGION_SIZE];
int x;
memset(trans, 0, sizeof(trans));
trans[0].length = 320 * 8,
trans[0].tx_buffer = data_malloc + 2;
trans[0].length = 320 * 8;
trans[0].tx_buffer = data_malloc + 2;
trans[0].rx_buffer = data_dram;
trans[1].length = 4 * 8,
trans[1].flags = SPI_TRANS_USE_RXDATA | SPI_TRANS_USE_TXDATA;
trans[1].length = 4 * 8;
trans[1].flags = SPI_TRANS_USE_RXDATA | SPI_TRANS_USE_TXDATA;
uint32_t *ptr = (uint32_t *)trans[1].rx_data;
*ptr = 0x54545454;
ptr = (uint32_t *)trans[1].tx_data;
*ptr = 0xbc124960;
trans[2].length = 64 * 8;
trans[2].tx_buffer = data_drom;
trans[2].rx_buffer = data_malloc;
//Queue all transactions.
for (x = 0; x < TEST_REGION_SIZE; x++) {
for (int x = 0; x < TEST_REGION_SIZE; x++) {
ESP_LOGI(TAG, "transmitting %d...", x);
ret = spi_device_transmit(spi, &trans[x]);
TEST_ASSERT(ret == ESP_OK);
@@ -1724,6 +1728,7 @@ static IRAM_ATTR void test_master_iram(void)
spi_device_handle_t dev_handle = {0};
spi_device_interface_config_t devcfg = SPI_DEVICE_TEST_DEFAULT_CONFIG();
devcfg.cs_ena_pretrans = 1;
devcfg.post_cb = test_master_iram_post_trans_cbk;
TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &devcfg, &dev_handle));
@@ -1968,3 +1973,100 @@ TEST_CASE("test_spi_master_auto_sleep_retention", "[spi]")
}
#endif //CONFIG_PM_ENABLE
#endif //SOC_LIGHT_SLEEP_SUPPORTED
#if CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
#define TEST_EDMA_PSRAM_TRANS_NUM 5
#define TEST_EDMA_TRANS_LEN 20000
#define TEST_EDMA_BUFFER_SZ (TEST_EDMA_PSRAM_TRANS_NUM * TEST_EDMA_TRANS_LEN)
void test_spi_psram_trans(spi_device_handle_t dev_handle, void *tx, void *rx)
{
spi_transaction_t trans_cfg = {
.tx_buffer = tx,
.rx_buffer = rx,
};
int trans_len = TEST_EDMA_TRANS_LEN - TEST_EDMA_PSRAM_TRANS_NUM / 2;
for (uint8_t cnt = 0; cnt < TEST_EDMA_PSRAM_TRANS_NUM; cnt ++) {
trans_cfg.length = trans_len * 8;
trans_cfg.rxlength = trans_len * 8;
trans_cfg.flags = (cnt % 2) ? 0 : SPI_TRANS_DMA_USE_PSRAM;
// To use psram, hardware will pass data through MSPI and GDMA to GPSPI, which need some time
// GPSPI bandwidth(speed * line_num) should always no more than PSRAM bandwidth
trans_cfg.override_freq_hz = (CONFIG_SPIRAM_SPEED / 4) * 1000 * 1000;
printf("%d TX %p RX %p len %d @%ld kHz\n", cnt, trans_cfg.tx_buffer, trans_cfg.rx_buffer, trans_len, trans_cfg.override_freq_hz / 1000);
TEST_ESP_OK(spi_device_transmit(dev_handle, &trans_cfg));
TEST_ASSERT(!(trans_cfg.flags & (SPI_TRANS_DMA_RX_FAIL | SPI_TRANS_DMA_TX_FAIL)));
spitest_cmp_or_dump(trans_cfg.tx_buffer, trans_cfg.rx_buffer, trans_len);
trans_cfg.tx_buffer += trans_len;
trans_cfg.rx_buffer += trans_len;
trans_len ++;
}
}
TEST_CASE("SPI_Master: PSRAM buffer transaction via EDMA", "[spi]")
{
spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG();
buscfg.miso_io_num = buscfg.mosi_io_num; // set spi "self-loopback"
buscfg.max_transfer_sz = TEST_EDMA_BUFFER_SZ;
TEST_ESP_OK(spi_bus_initialize(TEST_SPI_HOST, &buscfg, SPI_DMA_CH_AUTO));
spi_device_handle_t dev_handle = NULL;
spi_device_interface_config_t devcfg = SPI_DEVICE_TEST_DEFAULT_CONFIG();
devcfg.clock_speed_hz = 80 * 1000 * 1000; // Test error case on highest freq first
TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &devcfg, &dev_handle));
int real_freq_khz;
spi_device_get_actual_freq(dev_handle, &real_freq_khz);
uint8_t *internal_1 = heap_caps_calloc(1, TEST_EDMA_BUFFER_SZ, MALLOC_CAP_INTERNAL);
uint8_t *external_1 = heap_caps_calloc(1, TEST_EDMA_BUFFER_SZ, MALLOC_CAP_SPIRAM);
uint8_t *external_2 = heap_caps_calloc(1, TEST_EDMA_BUFFER_SZ, MALLOC_CAP_SPIRAM);
test_fill_random_to_buffers_dualboard(1001, internal_1, external_2, TEST_EDMA_BUFFER_SZ);
printf("Test error case: High freq @%d kHz\n", real_freq_khz);
spi_transaction_t trans_cfg = {
.length = TEST_EDMA_TRANS_LEN * 8,
.tx_buffer = external_2,
.rx_buffer = external_1,
};
// also test on polling API, and automalloc mechanism
for (uint8_t i = 0; i < 2; i++) {
printf("\n==== %s ====\n", i ? "EDMA" : "Auto Malloc");
trans_cfg.flags = i ? SPI_TRANS_DMA_USE_PSRAM : 0;
uint32_t before = esp_get_free_heap_size();
spi_device_polling_start(dev_handle, &trans_cfg, portMAX_DELAY);
uint32_t after = esp_get_free_heap_size();
printf("mem_diff: %ld, trans_len: %d\n", after - before, TEST_EDMA_TRANS_LEN);
// rx buffer still potential re-malloc from psram even if SPI_TRANS_DMA_USE_PSRAM is set
TEST_ASSERT(i ? (before - after) < 2 * TEST_EDMA_TRANS_LEN : (before - after) > 2 * TEST_EDMA_TRANS_LEN);
spi_device_polling_end(dev_handle, portMAX_DELAY);
printf("TX fail: %d, RX fail: %d\n", !!(trans_cfg.flags & SPI_TRANS_DMA_TX_FAIL), !!(trans_cfg.flags & SPI_TRANS_DMA_RX_FAIL));
TEST_ASSERT((!!i) == !!(trans_cfg.flags & (SPI_TRANS_DMA_TX_FAIL | SPI_TRANS_DMA_RX_FAIL)));
if (!i) { // data should be correct if using auto malloc
spitest_cmp_or_dump(trans_cfg.tx_buffer, trans_cfg.rx_buffer, TEST_EDMA_TRANS_LEN);
}
}
printf("\nTest trans: internal -> psram\n");
memset(external_1, 0, TEST_EDMA_BUFFER_SZ);
TEST_ESP_OK(esp_cache_msync((void *)external_1, TEST_EDMA_BUFFER_SZ, (ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED)));
test_spi_psram_trans(dev_handle, internal_1, external_1);
printf("\nTest trans: psram -> psram\n");
memset(external_2, 0, TEST_EDMA_BUFFER_SZ);
TEST_ESP_OK(esp_cache_msync((void *)external_2, TEST_EDMA_BUFFER_SZ, (ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED)));
test_spi_psram_trans(dev_handle, external_1, external_2);
printf("\nTest trans: psram -> internal\n");
memset(internal_1, 0, TEST_EDMA_BUFFER_SZ);
test_spi_psram_trans(dev_handle, external_2, internal_1);
free(internal_1);
free(external_1);
free(external_2);
spi_bus_remove_device(dev_handle);
spi_bus_free(TEST_SPI_HOST);
}
#endif
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -130,6 +130,53 @@ TEST_CASE("test fullduplex slave with only RX direction", "[spi]")
ESP_LOGI(SLAVE_TAG, "test passed.");
}
TEST_CASE("test fullduplex slave with only TX direction", "[spi]")
{
custom_setup();
memcpy(slave_txbuf, slave_send, sizeof(slave_send));
for (int i = 0; i < 4; i ++) {
//slave send
spi_slave_transaction_t slave_t;
spi_slave_transaction_t *out;
memset(&slave_t, 0, sizeof(spi_slave_transaction_t));
slave_t.length = 8 * 32;
slave_t.tx_buffer = slave_txbuf;
slave_t.rx_buffer = NULL;
slave_t.flags |= SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO;
// Colorize RX buffer with known pattern
memset(master_rxbuf, 0x66, sizeof(master_rxbuf));
TEST_ESP_OK(spi_slave_queue_trans(TEST_SLAVE_HOST, &slave_t, portMAX_DELAY));
//send
spi_transaction_t t = {};
t.length = 32 * (i + 1);
if (t.length != 0) {
t.tx_buffer = NULL;
t.rx_buffer = master_rxbuf;
}
spi_device_transmit(spi, &t);
//wait for end
TEST_ESP_OK(spi_slave_get_trans_result(TEST_SLAVE_HOST, &out, portMAX_DELAY));
//show result
ESP_LOGI(SLAVE_TAG, "trans_len: %d", slave_t.trans_len);
ESP_LOG_BUFFER_HEX("master rx", t.rx_buffer, t.length / 8);
ESP_LOG_BUFFER_HEX("slave tx", slave_t.tx_buffer, (slave_t.trans_len + 7) / 8);
TEST_ASSERT_EQUAL_HEX8_ARRAY(slave_t.tx_buffer, t.rx_buffer, t.length / 8);
TEST_ASSERT_EQUAL(t.length, slave_t.trans_len);
}
custom_teardown();
ESP_LOGI(SLAVE_TAG, "test passed.");
}
#define TEST_SLV_RX_BUF_LEN 15
TEST_CASE("Test slave rx no_dma overwrite when length below/over config", "[spi]")
{
@@ -202,53 +249,6 @@ TEST_CASE("Test slave rx no_dma overwrite when length below/over config", "[spi]
TEST_ESP_OK(spi_bus_remove_device(spidev0));
TEST_ESP_OK(spi_bus_free(TEST_SPI_HOST));
}
TEST_CASE("test fullduplex slave with only TX direction", "[spi]")
{
custom_setup();
memcpy(slave_txbuf, slave_send, sizeof(slave_send));
for (int i = 0; i < 4; i ++) {
//slave send
spi_slave_transaction_t slave_t;
spi_slave_transaction_t *out;
memset(&slave_t, 0, sizeof(spi_slave_transaction_t));
slave_t.length = 8 * 32;
slave_t.tx_buffer = slave_txbuf;
slave_t.rx_buffer = NULL;
slave_t.flags |= SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO;
// Colorize RX buffer with known pattern
memset(master_rxbuf, 0x66, sizeof(master_rxbuf));
TEST_ESP_OK(spi_slave_queue_trans(TEST_SLAVE_HOST, &slave_t, portMAX_DELAY));
//send
spi_transaction_t t = {};
t.length = 32 * (i + 1);
if (t.length != 0) {
t.tx_buffer = NULL;
t.rx_buffer = master_rxbuf;
}
spi_device_transmit(spi, &t);
//wait for end
TEST_ESP_OK(spi_slave_get_trans_result(TEST_SLAVE_HOST, &out, portMAX_DELAY));
//show result
ESP_LOGI(SLAVE_TAG, "trans_len: %d", slave_t.trans_len);
ESP_LOG_BUFFER_HEX("master rx", t.rx_buffer, t.length / 8);
ESP_LOG_BUFFER_HEX("slave tx", slave_t.tx_buffer, (slave_t.trans_len + 7) / 8);
TEST_ASSERT_EQUAL_HEX8_ARRAY(slave_t.tx_buffer, t.rx_buffer, t.length / 8);
TEST_ASSERT_EQUAL(t.length, slave_t.trans_len);
}
custom_teardown();
ESP_LOGI(SLAVE_TAG, "test passed.");
}
#endif // !CONFIG_SPIRAM
#endif // #if (TEST_SPI_PERIPH_NUM >= 2)
@@ -312,6 +312,7 @@ static void test_slave_iram_master_normal(void)
spi_device_handle_t dev_handle = {0};
spi_device_interface_config_t devcfg = SPI_DEVICE_TEST_DEFAULT_CONFIG();
devcfg.cs_ena_pretrans = 1;
TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &devcfg, &dev_handle));
uint8_t *master_send = heap_caps_malloc(TEST_BUFFER_SZ, MALLOC_CAP_DMA);
+13
View File
@@ -1184,6 +1184,19 @@ static inline void spi_dma_ll_rx_enable_burst_desc(spi_dma_dev_t *dma_in, uint32
dma_in->dma_conf.indscr_burst_en = enable;
}
/**
* Get the DMA RX alignment requirements
*
* @param dma_dev Beginning address of the DMA peripheral registers.
* @param internal_size The internal memory alignment requirements.
* @param external_size The external memory alignment requirements.
*/
static inline void spi_dma_ll_get_rx_alignment_require(spi_dma_dev_t *dma_dev, uint32_t *internal_size, uint32_t *external_size)
{
*internal_size = 4; // esp32 needs 4 bytes alignment on hardware design
*external_size = UINT32_MAX; // dma of esp32 spi don't support external memory
}
/**
* Reset TX DMA which transmits the data from RAM to a peripheral.
*
+14 -10
View File
@@ -55,6 +55,8 @@ typedef spi_dev_t spi_dma_dev_t;
// Type definition of all supported interrupts
typedef enum {
SPI_LL_INTR_TRANS_DONE = BIT(0), ///< A transaction has done
SPI_LL_INTR_IN_FULL = BIT(4), ///< DMA in_full error happened
SPI_LL_INTR_OUT_EMPTY = BIT(5), ///< DMA out_empty error happened
SPI_LL_INTR_RDBUF = BIT(6), ///< Has received RDBUF command. Only available in slave HD.
SPI_LL_INTR_WRBUF = BIT(7), ///< Has received WRBUF command. Only available in slave HD.
SPI_LL_INTR_RDDMA = BIT(8), ///< Has received RDDMA command. Only available in slave HD.
@@ -1098,16 +1100,18 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
//helper macros to generate code for each interrupts
#define FOR_EACH_ITEM(op, list) do { list(op) } while(0)
#define INTR_LIST(item) \
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done_int_ena, dma_int_raw.trans_done_int_raw, dma_int_clr.trans_done_int_clr, dma_int_set.trans_done_int_set) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done_int_ena, dma_int_raw.slv_rd_buf_done_int_raw, dma_int_clr.slv_rd_buf_done_int_clr, dma_int_set.slv_rd_buf_done_int_set) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done_int_ena, dma_int_raw.slv_wr_buf_done_int_raw, dma_int_clr.slv_wr_buf_done_int_clr, dma_int_set.slv_wr_buf_done_int_set) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done_int_ena, dma_int_raw.slv_rd_dma_done_int_raw, dma_int_clr.slv_rd_dma_done_int_clr, dma_int_set.slv_rd_dma_done_int_set) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done_int_ena, dma_int_raw.slv_wr_dma_done_int_raw, dma_int_clr.slv_wr_dma_done_int_clr, dma_int_set.slv_wr_dma_done_int_set) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done_int_ena, dma_int_raw.dma_seg_trans_done_int_raw, dma_int_clr.dma_seg_trans_done_int_clr, dma_int_set.dma_seg_trans_done_int_set) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7_int_ena, dma_int_raw.slv_cmd7_int_raw, dma_int_clr.slv_cmd7_int_clr, dma_int_set.slv_cmd7_int_set) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8_int_ena, dma_int_raw.slv_cmd8_int_raw, dma_int_clr.slv_cmd8_int_clr, dma_int_set.slv_cmd8_int_set) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9_int_ena, dma_int_raw.slv_cmd9_int_raw, dma_int_clr.slv_cmd9_int_clr, dma_int_set.slv_cmd9_int_set) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda_int_ena, dma_int_raw.slv_cmda_int_raw, dma_int_clr.slv_cmda_int_clr, dma_int_set.slv_cmda_int_set)
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done_int_ena, dma_int_raw.trans_done_int_raw, dma_int_clr.trans_done_int_clr, dma_int_set.trans_done_int_set) \
item(SPI_LL_INTR_IN_FULL, dma_int_ena.dma_infifo_full_err_int_ena, dma_int_raw.dma_infifo_full_err_int_raw, dma_int_clr.dma_infifo_full_err_int_clr, dma_int_set.dma_infifo_full_err_int_set) \
item(SPI_LL_INTR_OUT_EMPTY, dma_int_ena.dma_outfifo_empty_err_int_ena, dma_int_raw.dma_outfifo_empty_err_int_raw, dma_int_clr.dma_outfifo_empty_err_int_clr, dma_int_set.dma_outfifo_empty_err_int_set) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done_int_ena, dma_int_raw.slv_rd_buf_done_int_raw, dma_int_clr.slv_rd_buf_done_int_clr, dma_int_set.slv_rd_buf_done_int_set) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done_int_ena, dma_int_raw.slv_wr_buf_done_int_raw, dma_int_clr.slv_wr_buf_done_int_clr, dma_int_set.slv_wr_buf_done_int_set) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done_int_ena, dma_int_raw.slv_rd_dma_done_int_raw, dma_int_clr.slv_rd_dma_done_int_clr, dma_int_set.slv_rd_dma_done_int_set) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done_int_ena, dma_int_raw.slv_wr_dma_done_int_raw, dma_int_clr.slv_wr_dma_done_int_clr, dma_int_set.slv_wr_dma_done_int_set) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done_int_ena, dma_int_raw.dma_seg_trans_done_int_raw, dma_int_clr.dma_seg_trans_done_int_clr, dma_int_set.dma_seg_trans_done_int_set) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7_int_ena, dma_int_raw.slv_cmd7_int_raw, dma_int_clr.slv_cmd7_int_clr, dma_int_set.slv_cmd7_int_set) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8_int_ena, dma_int_raw.slv_cmd8_int_raw, dma_int_clr.slv_cmd8_int_clr, dma_int_set.slv_cmd8_int_set) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9_int_ena, dma_int_raw.slv_cmd9_int_raw, dma_int_clr.slv_cmd9_int_clr, dma_int_set.slv_cmd9_int_set) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda_int_ena, dma_int_raw.slv_cmda_int_raw, dma_int_clr.slv_cmda_int_clr, dma_int_set.slv_cmda_int_set)
static inline void spi_ll_enable_intr(spi_dev_t *hw, spi_ll_intr_t intr_mask)
+14 -10
View File
@@ -55,6 +55,8 @@ typedef spi_dev_t spi_dma_dev_t;
// Type definition of all supported interrupts
typedef enum {
SPI_LL_INTR_TRANS_DONE = BIT(0), ///< A transaction has done
SPI_LL_INTR_IN_FULL = BIT(4), ///< DMA in_full error happened
SPI_LL_INTR_OUT_EMPTY = BIT(5), ///< DMA out_empty error happened
SPI_LL_INTR_RDBUF = BIT(6), ///< Has received RDBUF command. Only available in slave HD.
SPI_LL_INTR_WRBUF = BIT(7), ///< Has received WRBUF command. Only available in slave HD.
SPI_LL_INTR_RDDMA = BIT(8), ///< Has received RDDMA command. Only available in slave HD.
@@ -1098,16 +1100,18 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
//helper macros to generate code for each interrupts
#define FOR_EACH_ITEM(op, list) do { list(op) } while(0)
#define INTR_LIST(item) \
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done, dma_int_raw.trans_done, dma_int_clr.trans_done, dma_int_set.trans_done) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done, dma_int_raw.slv_rd_buf_done, dma_int_clr.slv_rd_buf_done, dma_int_set.slv_rd_buf_done) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done, dma_int_raw.slv_wr_buf_done, dma_int_clr.slv_wr_buf_done, dma_int_set.slv_wr_buf_done) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done, dma_int_raw.slv_rd_dma_done, dma_int_clr.slv_rd_dma_done, dma_int_set.slv_rd_dma_done) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done, dma_int_raw.slv_wr_dma_done, dma_int_clr.slv_wr_dma_done, dma_int_set.slv_wr_dma_done) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done, dma_int_raw.dma_seg_trans_done, dma_int_clr.dma_seg_trans_done, dma_int_set.dma_seg_trans_done) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7, dma_int_raw.slv_cmd7, dma_int_clr.slv_cmd7, dma_int_set.slv_cmd7) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8, dma_int_raw.slv_cmd8, dma_int_clr.slv_cmd8, dma_int_set.slv_cmd8) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9, dma_int_raw.slv_cmd9, dma_int_clr.slv_cmd9, dma_int_set.slv_cmd9) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda, dma_int_raw.slv_cmda, dma_int_clr.slv_cmda, dma_int_set.slv_cmda)
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done, dma_int_raw.trans_done, dma_int_clr.trans_done, dma_int_set.trans_done) \
item(SPI_LL_INTR_IN_FULL, dma_int_ena.dma_infifo_full_err, dma_int_raw.dma_infifo_full_err, dma_int_clr.dma_infifo_full_err, dma_int_set.dma_infifo_full_err) \
item(SPI_LL_INTR_OUT_EMPTY, dma_int_ena.dma_outfifo_empty_err, dma_int_raw.dma_outfifo_empty_err, dma_int_clr.dma_outfifo_empty_err, dma_int_set.dma_outfifo_empty_err) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done, dma_int_raw.slv_rd_buf_done, dma_int_clr.slv_rd_buf_done, dma_int_set.slv_rd_buf_done) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done, dma_int_raw.slv_wr_buf_done, dma_int_clr.slv_wr_buf_done, dma_int_set.slv_wr_buf_done) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done, dma_int_raw.slv_rd_dma_done, dma_int_clr.slv_rd_dma_done, dma_int_set.slv_rd_dma_done) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done, dma_int_raw.slv_wr_dma_done, dma_int_clr.slv_wr_dma_done, dma_int_set.slv_wr_dma_done) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done, dma_int_raw.dma_seg_trans_done, dma_int_clr.dma_seg_trans_done, dma_int_set.dma_seg_trans_done) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7, dma_int_raw.slv_cmd7, dma_int_clr.slv_cmd7, dma_int_set.slv_cmd7) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8, dma_int_raw.slv_cmd8, dma_int_clr.slv_cmd8, dma_int_set.slv_cmd8) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9, dma_int_raw.slv_cmd9, dma_int_clr.slv_cmd9, dma_int_set.slv_cmd9) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda, dma_int_raw.slv_cmda, dma_int_clr.slv_cmda, dma_int_set.slv_cmda)
static inline void spi_ll_enable_intr(spi_dev_t *hw, spi_ll_intr_t intr_mask)
+14 -10
View File
@@ -55,6 +55,8 @@ typedef spi_dev_t spi_dma_dev_t;
// Type definition of all supported interrupts
typedef enum {
SPI_LL_INTR_TRANS_DONE = BIT(0), ///< A transaction has done
SPI_LL_INTR_IN_FULL = BIT(4), ///< DMA in_full error happened
SPI_LL_INTR_OUT_EMPTY = BIT(5), ///< DMA out_empty error happened
SPI_LL_INTR_RDBUF = BIT(6), ///< Has received RDBUF command. Only available in slave HD.
SPI_LL_INTR_WRBUF = BIT(7), ///< Has received WRBUF command. Only available in slave HD.
SPI_LL_INTR_RDDMA = BIT(8), ///< Has received RDDMA command. Only available in slave HD.
@@ -1120,16 +1122,18 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
//helper macros to generate code for each interrupts
#define FOR_EACH_ITEM(op, list) do { list(op) } while(0)
#define INTR_LIST(item) \
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done_int_ena, dma_int_raw.trans_done_int_raw, dma_int_clr.trans_done_int_clr, dma_int_set.trans_done_int_set) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done_int_ena, dma_int_raw.slv_rd_buf_done_int_raw, dma_int_clr.slv_rd_buf_done_int_clr, dma_int_set.slv_rd_buf_done_int_set) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done_int_ena, dma_int_raw.slv_wr_buf_done_int_raw, dma_int_clr.slv_wr_buf_done_int_clr, dma_int_set.slv_wr_buf_done_int_set) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done_int_ena, dma_int_raw.slv_rd_dma_done_int_raw, dma_int_clr.slv_rd_dma_done_int_clr, dma_int_set.slv_rd_dma_done_int_set) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done_int_ena, dma_int_raw.slv_wr_dma_done_int_raw, dma_int_clr.slv_wr_dma_done_int_clr, dma_int_set.slv_wr_dma_done_int_set) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done_int_ena, dma_int_raw.dma_seg_trans_done_int_raw, dma_int_clr.dma_seg_trans_done_int_clr, dma_int_set.dma_seg_trans_done_int_set) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7_int_ena, dma_int_raw.slv_cmd7_int_raw, dma_int_clr.slv_cmd7_int_clr, dma_int_set.slv_cmd7_int_set) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8_int_ena, dma_int_raw.slv_cmd8_int_raw, dma_int_clr.slv_cmd8_int_clr, dma_int_set.slv_cmd8_int_set) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9_int_ena, dma_int_raw.slv_cmd9_int_raw, dma_int_clr.slv_cmd9_int_clr, dma_int_set.slv_cmd9_int_set) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda_int_ena, dma_int_raw.slv_cmda_int_raw, dma_int_clr.slv_cmda_int_clr, dma_int_set.slv_cmda_int_set)
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done_int_ena, dma_int_raw.trans_done_int_raw, dma_int_clr.trans_done_int_clr, dma_int_set.trans_done_int_set) \
item(SPI_LL_INTR_IN_FULL, dma_int_ena.dma_infifo_full_err_int_ena, dma_int_raw.dma_infifo_full_err_int_raw, dma_int_clr.dma_infifo_full_err_int_clr, dma_int_set.dma_infifo_full_err_int_set) \
item(SPI_LL_INTR_OUT_EMPTY, dma_int_ena.dma_outfifo_empty_err_int_ena, dma_int_raw.dma_outfifo_empty_err_int_raw, dma_int_clr.dma_outfifo_empty_err_int_clr, dma_int_set.dma_outfifo_empty_err_int_set) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done_int_ena, dma_int_raw.slv_rd_buf_done_int_raw, dma_int_clr.slv_rd_buf_done_int_clr, dma_int_set.slv_rd_buf_done_int_set) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done_int_ena, dma_int_raw.slv_wr_buf_done_int_raw, dma_int_clr.slv_wr_buf_done_int_clr, dma_int_set.slv_wr_buf_done_int_set) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done_int_ena, dma_int_raw.slv_rd_dma_done_int_raw, dma_int_clr.slv_rd_dma_done_int_clr, dma_int_set.slv_rd_dma_done_int_set) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done_int_ena, dma_int_raw.slv_wr_dma_done_int_raw, dma_int_clr.slv_wr_dma_done_int_clr, dma_int_set.slv_wr_dma_done_int_set) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done_int_ena, dma_int_raw.dma_seg_trans_done_int_raw, dma_int_clr.dma_seg_trans_done_int_clr, dma_int_set.dma_seg_trans_done_int_set) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7_int_ena, dma_int_raw.slv_cmd7_int_raw, dma_int_clr.slv_cmd7_int_clr, dma_int_set.slv_cmd7_int_set) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8_int_ena, dma_int_raw.slv_cmd8_int_raw, dma_int_clr.slv_cmd8_int_clr, dma_int_set.slv_cmd8_int_set) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9_int_ena, dma_int_raw.slv_cmd9_int_raw, dma_int_clr.slv_cmd9_int_clr, dma_int_set.slv_cmd9_int_set) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda_int_ena, dma_int_raw.slv_cmda_int_raw, dma_int_clr.slv_cmda_int_clr, dma_int_set.slv_cmda_int_set)
static inline void spi_ll_enable_intr(spi_dev_t *hw, spi_ll_intr_t intr_mask)
+14 -10
View File
@@ -57,6 +57,8 @@ typedef spi_dev_t spi_dma_dev_t;
// Type definition of all supported interrupts
typedef enum {
SPI_LL_INTR_TRANS_DONE = BIT(0), ///< A transaction has done
SPI_LL_INTR_IN_FULL = BIT(4), ///< DMA in_full error happened
SPI_LL_INTR_OUT_EMPTY = BIT(5), ///< DMA out_empty error happened
SPI_LL_INTR_RDBUF = BIT(6), ///< Has received RDBUF command. Only available in slave HD.
SPI_LL_INTR_WRBUF = BIT(7), ///< Has received WRBUF command. Only available in slave HD.
SPI_LL_INTR_RDDMA = BIT(8), ///< Has received RDDMA command. Only available in slave HD.
@@ -1171,16 +1173,18 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
//helper macros to generate code for each interrupts
#define FOR_EACH_ITEM(op, list) do { list(op) } while(0)
#define INTR_LIST(item) \
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done_int, dma_int_raw.trans_done_int, dma_int_clr.trans_done_int, dma_int_set.trans_done_int) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done_int, dma_int_raw.slv_rd_buf_done_int, dma_int_clr.slv_rd_buf_done_int, dma_int_set.slv_rd_buf_done_int) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done_int, dma_int_raw.slv_wr_buf_done_int, dma_int_clr.slv_wr_buf_done_int, dma_int_set.slv_wr_buf_done_int) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done_int, dma_int_raw.slv_rd_dma_done_int, dma_int_clr.slv_rd_dma_done_int, dma_int_set.slv_rd_dma_done_int) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done_int, dma_int_raw.slv_wr_dma_done_int, dma_int_clr.slv_wr_dma_done_int, dma_int_set.slv_wr_dma_done_int) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done_int, dma_int_raw.dma_seg_trans_done_int, dma_int_clr.dma_seg_trans_done_int, dma_int_set.dma_seg_trans_done_int) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7_int, dma_int_raw.slv_cmd7_int, dma_int_clr.slv_cmd7_int, dma_int_set.slv_cmd7_int) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8_int, dma_int_raw.slv_cmd8_int, dma_int_clr.slv_cmd8_int, dma_int_set.slv_cmd8_int) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9_int, dma_int_raw.slv_cmd9_int, dma_int_clr.slv_cmd9_int, dma_int_set.slv_cmd9_int) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda_int, dma_int_raw.slv_cmda_int, dma_int_clr.slv_cmda_int, dma_int_set.slv_cmda_int)
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done_int, dma_int_raw.trans_done_int, dma_int_clr.trans_done_int, dma_int_set.trans_done_int) \
item(SPI_LL_INTR_IN_FULL, dma_int_ena.dma_infifo_full_err_int, dma_int_raw.dma_infifo_full_err_int, dma_int_clr.dma_infifo_full_err_int, dma_int_set.dma_infifo_full_err_int) \
item(SPI_LL_INTR_OUT_EMPTY, dma_int_ena.dma_outfifo_empty_err_int, dma_int_raw.dma_outfifo_empty_err_int, dma_int_clr.dma_outfifo_empty_err_int, dma_int_set.dma_outfifo_empty_err_int) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.slv_rd_buf_done_int, dma_int_raw.slv_rd_buf_done_int, dma_int_clr.slv_rd_buf_done_int, dma_int_set.slv_rd_buf_done_int) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.slv_wr_buf_done_int, dma_int_raw.slv_wr_buf_done_int, dma_int_clr.slv_wr_buf_done_int, dma_int_set.slv_wr_buf_done_int) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.slv_rd_dma_done_int, dma_int_raw.slv_rd_dma_done_int, dma_int_clr.slv_rd_dma_done_int, dma_int_set.slv_rd_dma_done_int) \
item(SPI_LL_INTR_WRDMA, dma_int_ena.slv_wr_dma_done_int, dma_int_raw.slv_wr_dma_done_int, dma_int_clr.slv_wr_dma_done_int, dma_int_set.slv_wr_dma_done_int) \
item(SPI_LL_INTR_SEG_DONE, dma_int_ena.dma_seg_trans_done_int, dma_int_raw.dma_seg_trans_done_int, dma_int_clr.dma_seg_trans_done_int, dma_int_set.dma_seg_trans_done_int) \
item(SPI_LL_INTR_CMD7, dma_int_ena.slv_cmd7_int, dma_int_raw.slv_cmd7_int, dma_int_clr.slv_cmd7_int, dma_int_set.slv_cmd7_int) \
item(SPI_LL_INTR_CMD8, dma_int_ena.slv_cmd8_int, dma_int_raw.slv_cmd8_int, dma_int_clr.slv_cmd8_int, dma_int_set.slv_cmd8_int) \
item(SPI_LL_INTR_CMD9, dma_int_ena.slv_cmd9_int, dma_int_raw.slv_cmd9_int, dma_int_clr.slv_cmd9_int, dma_int_set.slv_cmd9_int) \
item(SPI_LL_INTR_CMDA, dma_int_ena.slv_cmda_int, dma_int_raw.slv_cmda_int, dma_int_clr.slv_cmda_int, dma_int_set.slv_cmda_int)
static inline void spi_ll_enable_intr(spi_dev_t *hw, spi_ll_intr_t intr_mask)
@@ -1376,6 +1376,20 @@ static inline uint32_t spi_dma_ll_get_in_suc_eof_desc_addr(spi_dma_dev_t *dma_in
return dma_in->dma_in_suc_eof_des_addr;
}
/**
* Get the DMA RX alignment requirements
*
* @param dma_dev Beginning address of the DMA peripheral registers.
* @param internal_size The internal memory alignment requirements.
* @param external_size The external memory alignment requirements.
*/
static inline void spi_dma_ll_get_rx_alignment_require(spi_dma_dev_t *dma_dev, uint32_t *internal_size, uint32_t *external_size)
{
*internal_size = 4;
// SPI2 supports external memory, SPI3 does not
*external_size = (dma_dev == &GPSPI2) ? 16 << dma_dev->dma_conf.ext_mem_bk_size : UINT32_MAX;
}
//---------------------------------------------------TX-------------------------------------------------//
/**
* Reset TX DMA which transmits the data from RAM to a peripheral.
@@ -55,6 +55,8 @@ typedef spi_dev_t spi_dma_dev_t;
// Type definition of all supported interrupts
typedef enum {
SPI_LL_INTR_TRANS_DONE = BIT(0), ///< A transaction has done
SPI_LL_INTR_IN_FULL = BIT(4), ///< DMA in_full error happened
SPI_LL_INTR_OUT_EMPTY = BIT(5), ///< DMA out_empty error happened
SPI_LL_INTR_RDBUF = BIT(6), ///< Has received RDBUF command. Only available in slave HD.
SPI_LL_INTR_WRBUF = BIT(7), ///< Has received WRBUF command. Only available in slave HD.
SPI_LL_INTR_RDDMA = BIT(8), ///< Has received RDDMA command. Only available in slave HD.
@@ -1112,6 +1114,8 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
#define FOR_EACH_ITEM(op, list) do { list(op) } while(0)
#define INTR_LIST(item) \
item(SPI_LL_INTR_TRANS_DONE, dma_int_ena.trans_done, dma_int_raw.trans_done, dma_int_clr.trans_done, dma_int_set.trans_done_int_set) \
item(SPI_LL_INTR_IN_FULL, dma_int_ena.infifo_full_err, dma_int_raw.infifo_full_err, dma_int_clr.infifo_full_err, dma_int_set.infifo_full_err_int_set) \
item(SPI_LL_INTR_OUT_EMPTY, dma_int_ena.outfifo_empty_err, dma_int_raw.outfifo_empty_err, dma_int_clr.outfifo_empty_err, dma_int_set.outfifo_empty_err_int_set) \
item(SPI_LL_INTR_RDBUF, dma_int_ena.rd_buf_done, dma_int_raw.rd_buf_done, dma_int_clr.rd_buf_done, dma_int_set.rd_buf_done_int_set) \
item(SPI_LL_INTR_WRBUF, dma_int_ena.wr_buf_done, dma_int_raw.wr_buf_done, dma_int_clr.wr_buf_done, dma_int_set.wr_buf_done_int_set) \
item(SPI_LL_INTR_RDDMA, dma_int_ena.rd_dma_done, dma_int_raw.rd_dma_done, dma_int_clr.rd_dma_done, dma_int_set.rd_dma_done_int_set) \
+17 -10
View File
@@ -230,6 +230,23 @@ void spi_hal_user_start(const spi_hal_context_t *hal);
*/
bool spi_hal_usr_is_done(const spi_hal_context_t *hal);
/**
* Get SPI interrupt bits status by mask
*
* @param hal Context of the HAL layer.
* @param mask Mask of the interrupt bits to check.
* @return True if the masked interrupts are set, false otherwise.
*/
bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask);
/**
* Clear SPI interrupt bits by mask
*
* @param hal Context of the HAL layer.
* @param mask Mask of the interrupt bits to clear.
*/
void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask);
/**
* Setup transaction operations, write tx buffer to HW registers
*
@@ -332,16 +349,6 @@ void spi_hal_sct_deinit(spi_hal_context_t *hal);
*/
void spi_hal_sct_set_conf_bits_len(spi_hal_context_t *hal, uint32_t conf_len);
/**
* Clear SPI interrupt bits by mask
*/
void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask);
/**
* Get SPI interrupt bits status by mask
*/
bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask);
/**
* Set conf_bitslen base to HW for sct, only supported on s2.
*/
+11 -9
View File
@@ -234,6 +234,16 @@ bool spi_hal_usr_is_done(const spi_hal_context_t *hal)
return spi_ll_usr_is_done(hal->hw);
}
#if SOC_SPI_SUPPORT_SLAVE_HD_VER2
bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask) {
return spi_ll_get_intr(hal->hw, mask);
}
void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask) {
spi_ll_clear_intr(hal->hw, mask);
}
#endif
void spi_hal_push_tx_buffer(const spi_hal_context_t *hal, const spi_hal_trans_config_t *hal_trans)
{
if (hal_trans->send_buffer) {
@@ -255,15 +265,7 @@ void spi_hal_fetch_result(const spi_hal_context_t *hal)
#if SOC_SPI_SCT_SUPPORTED
/*------------------------------------------------------------------------------
* Segmented-Configure-Transfer
*----------------------------------------------------------------------------*/
void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask) {
spi_ll_clear_intr(hal->hw, mask);
}
bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask) {
return spi_ll_get_intr(hal->hw, mask);
}
*----------------------------------------------------------------------------*/
void spi_hal_sct_set_conf_bits_len(spi_hal_context_t *hal, uint32_t conf_len) {
spi_ll_set_conf_phase_bits_len(hal->hw, conf_len);
}
@@ -353,6 +353,14 @@ Driver Usage
The example code for the SPI Master driver can be found in the :example:`peripherals/spi_master` directory of ESP-IDF examples.
.. only:: SOC_PSRAM_DMA_CAPABLE
Transactions with Data on PSRAM
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
{IDF_TARGET_NAME} supports GPSPI Master with DMA transferring data from/to PSRAM directly without extra internal copy process, which saves memory, by adding :c:macro:`SPI_TRANS_DMA_USE_PSRAM` flag to the transaction.
Note that this feature shares bandwidth (bus frequency * bus bits width) with MSPI bus, so GPSPI transfer bandwidth should be less than PSRAM bandwidth, **otherwise transmission data may be lost**. You can check the return value or :c:macro:`SPI_TRANS_DMA_RX_FAIL` and :c:macro:`SPI_TRANS_DMA_TX_FAIL` flags after the transaction is finished to check if error occurs during the transmission. If the transaction returns :c:macro:`ESP_ERR_INVALID_STATE` error, the transaction fails.
Transactions with Data Not Exceeding 32 Bits
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -353,6 +353,14 @@ SPI 总线传输事务由五个阶段构成,详见下表(任意阶段均可
SPI 主机驱动程序的示例代码存放在 ESP-IDF 示例项目的 :example:`peripherals/spi_master` 目录下。
.. only:: SOC_PSRAM_DMA_CAPABLE
使用 PSRAM 的传输事务
^^^^^^^^^^^^^^^^^^^^^^
{IDF_TARGET_NAME} 支持 GPSPI Master 通过 DMA 直接传输 PSRAM 存储的数据而不用内部额外的零时拷贝,应此可以节省内存,在传输配置中添加 :c:macro:`SPI_TRANS_DMA_USE_PSRAM` 标志信号即可使用。
请注意该功能共享 MSPI 总线带宽(总线频率 * 总线位宽),因此 GPSPI 传输带宽应小于 PSRAM 带宽,否则 **可能会丢失传输数据**。可通过在传输结束时检查返回值或 :c:macro:`SPI_TRANS_DMA_RX_FAIL`:c:macro:`SPI_TRANS_DMA_TX_FAIL` 标志信号来判断传输是否发生了错误。若传输事务返回 :c:macro:`ESP_ERR_INVALID_STATE` 错误,则传输事务失败。
传输数据小于 32 位的传输事务
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -23,4 +23,13 @@ menu "Example Configuration"
in practice the driver chips work fine with a higher clock rate, and using that gives a better framerate.
Select this to try using the out-of-spec clock rate.
config LCD_BUFFER_IN_PSRAM
bool
prompt "Malloc LCD buffer from PSRAM, it can save internal RAM"
depends on SPIRAM && SOC_PSRAM_DMA_CAPABLE
default "y"
help
Driver is now support using PSRAM memory as LCD buffer directly
without additional internal copy, using it is able to save internal
memory space, and without CPU cost.
endmenu
@@ -19,6 +19,10 @@ uint16_t *pixels;
//Grab a rgb16 pixel from the esp32_tiles image
static inline uint16_t get_bgnd_pixel(int x, int y)
{
// Clamp coordinates to valid image bounds
x = (x < 0) ? 0 : (x >= IMAGE_W) ? IMAGE_W - 1 : x;
y = (y < 0) ? 0 : (y >= IMAGE_H) ? IMAGE_H - 1 : y;
//Get color of the pixel on x,y coords
return (uint16_t) * (pixels + (y * IMAGE_W) + x);
}
@@ -26,7 +30,7 @@ static inline uint16_t get_bgnd_pixel(int x, int y)
//This variable is used to detect the next frame.
static int prev_frame = -1;
//Instead of calculating the offsets for each pixel we grab, we pre-calculate the valueswhenever a frame changes, then re-use
//Instead of calculating the offsets for each pixel we grab, we pre-calculate the valueswhenever a frame changes, then reuse
//these as we go through all the pixels in the frame. This is much, much faster.
static int8_t xofs[320], yofs[240];
static int8_t xcomp[320], ycomp[240];
@@ -343,7 +343,11 @@ static void send_lines(spi_device_handle_t spi, int ypos, uint16_t *linedata)
trans[4].tx_data[0] = 0x2C; //memory write
trans[5].tx_buffer = linedata; //finally send the line data
trans[5].length = 320 * 2 * 8 * PARALLEL_LINES; //Data length, in bits
#if CONFIG_LCD_BUFFER_IN_PSRAM
trans[5].flags = SPI_TRANS_DMA_USE_PSRAM; //using PSRAM
#else
trans[5].flags = 0; //undo SPI_TRANS_USE_TXDATA flag
#endif
//Queue all transactions.
for (x = 0; x < 6; x++) {
@@ -375,9 +379,17 @@ static void send_line_finish(spi_device_handle_t spi)
static void display_pretty_colors(spi_device_handle_t spi)
{
uint16_t *lines[2];
#if CONFIG_LCD_BUFFER_IN_PSRAM
uint32_t mem_cap = MALLOC_CAP_SPIRAM | MALLOC_CAP_DMA;
printf("Get LCD buffer from PSRAM\n");
#else
uint32_t mem_cap = MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
printf("Get LCD buffer from internal\n");
#endif
//Allocate memory for the pixel buffers
for (int i = 0; i < 2; i++) {
lines[i] = spi_bus_dma_memory_alloc(LCD_HOST, 320 * PARALLEL_LINES * sizeof(uint16_t), 0);
lines[i] = spi_bus_dma_memory_alloc(LCD_HOST, 320 * PARALLEL_LINES * sizeof(uint16_t), mem_cap);
assert(lines[i] != NULL);
}
int frame = 0;