Merge branch 'feat/spi_slave_edma_psram_support_v6.0' into 'release/v6.0'

fix(driver_spi): added slave/slave_hd edma support, and add master bit length check, internal mem length check (v6.0)

See merge request espressif/esp-idf!46349
This commit is contained in:
morris
2026-03-19 22:57:43 +08:00
35 changed files with 735 additions and 498 deletions
@@ -266,10 +266,9 @@ void spitest_gpio_output_sel(uint32_t gpio_num, int func, uint32_t signal_idx);
//use this function to fix the input source when assign multiple functions to a same pin
void spitest_gpio_input_sel(uint32_t gpio_num, int func, uint32_t signal_idx);
//Note this cs_num is the ID of the connected devices' ID, e.g. if 2 devices are connected to the bus,
//then the cs_num of the 1st and 2nd devices are 0 and 1 respectively.
//Enable `soft_master` to connect to soft spi master instead of hardware master.
void same_pin_func_sel(spi_bus_config_t bus, uint8_t cs_pin, uint8_t cs_dev_id, bool soft_master);
// Connect master and slave to the same pin
// master_id and slave_id are the IDs of the master and slave devices, set 0 for each to use soft master/slave.
void same_pin_func_sel(spi_host_device_t master_id, spi_host_device_t slave_id, spi_bus_config_t bus, uint8_t cs_pin);
// Soft simulated spi master host for slave testing
// `speed_hz` max 500kHz
@@ -224,20 +224,20 @@ void spitest_gpio_input_sel(uint32_t gpio_num, int func, uint32_t signal_idx)
esp_rom_gpio_connect_in_signal(gpio_num, signal_idx, 0);
}
void same_pin_func_sel(spi_bus_config_t bus, uint8_t cs_pin, uint8_t cs_dev_id, bool soft_master)
void same_pin_func_sel(spi_host_device_t master_id, spi_host_device_t slave_id, spi_bus_config_t bus, uint8_t cs_pin)
{
spitest_gpio_output_sel(bus.mosi_io_num, FUNC_GPIO, soft_master ? SIG_GPIO_OUT_IDX : spi_periph_signal[TEST_SPI_HOST].spid_out);
spitest_gpio_input_sel(bus.mosi_io_num, FUNC_GPIO, spi_periph_signal[TEST_SLAVE_HOST].spid_in);
spitest_gpio_output_sel(bus.mosi_io_num, FUNC_GPIO, (!master_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[master_id].spid_out);
spitest_gpio_input_sel(bus.mosi_io_num, FUNC_GPIO, (!slave_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[slave_id].spid_in);
spitest_gpio_output_sel(bus.miso_io_num, FUNC_GPIO, spi_periph_signal[TEST_SLAVE_HOST].spiq_out);
spitest_gpio_input_sel(bus.miso_io_num, FUNC_GPIO, soft_master ? SIG_GPIO_OUT_IDX : spi_periph_signal[TEST_SPI_HOST].spiq_in);
spitest_gpio_input_sel(bus.miso_io_num, FUNC_GPIO, (!master_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[master_id].spiq_in);
spitest_gpio_output_sel(bus.miso_io_num, FUNC_GPIO, (!slave_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[slave_id].spiq_out);
gpio_set_level(cs_pin, 1); //ensure CS is inactive when select to soft_master and before transaction start
spitest_gpio_output_sel(cs_pin, FUNC_GPIO, soft_master ? SIG_GPIO_OUT_IDX : spi_periph_signal[TEST_SPI_HOST].spics_out[cs_dev_id]);
spitest_gpio_input_sel(cs_pin, FUNC_GPIO, spi_periph_signal[TEST_SLAVE_HOST].spics_in);
gpio_set_level(cs_pin, 1); //ensure CS is inactive when select 0 for soft_master and before transaction start
spitest_gpio_output_sel(cs_pin, FUNC_GPIO, (!master_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[master_id].spics_out[0]);
spitest_gpio_input_sel(cs_pin, FUNC_GPIO, (!slave_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[slave_id].spics_in);
spitest_gpio_output_sel(bus.sclk_io_num, FUNC_GPIO, soft_master ? SIG_GPIO_OUT_IDX : spi_periph_signal[TEST_SPI_HOST].spiclk_out);
spitest_gpio_input_sel(bus.sclk_io_num, FUNC_GPIO, spi_periph_signal[TEST_SLAVE_HOST].spiclk_in);
spitest_gpio_output_sel(bus.sclk_io_num, FUNC_GPIO, (!master_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[master_id].spiclk_out);
spitest_gpio_input_sel(bus.sclk_io_num, FUNC_GPIO, (!slave_id) ? SIG_GPIO_OUT_IDX : spi_periph_signal[slave_id].spiclk_in);
}
#define GPIO_MAX_FREQ 500*1000 //max of soft spi clock at delay(0)
+1 -1
View File
@@ -23,8 +23,8 @@ typedef struct {
gdma_channel_handle_t gdma_chan;
#elif CONFIG_IDF_TARGET_ESP32S2
//On ESP32S2, there is no gdma, so use SPI DMA to transmit data
spi_dma_ctx_t *spi_dma_ctx;
spi_dev_t *adc_spi_dev;
uint32_t dma_chan_id;
#elif CONFIG_IDF_TARGET_ESP32
//On ESP32, there is no gdma, so use I2S DMA to transmit data
i2s_dev_t *adc_i2s_dev;
+7 -6
View File
@@ -28,7 +28,7 @@ static IRAM_ATTR void adc_dma_intr_handler(void *arg)
bool conversion_finish = spi_ll_get_intr(ctx->adc_dma.adc_spi_dev, ADC_DMA_INTR_MASK);
if (conversion_finish) {
spi_ll_clear_intr(ctx->adc_dma.adc_spi_dev, ADC_DMA_INTR_MASK);
intptr_t desc_addr = spi_dma_ll_get_in_suc_eof_desc_addr(ctx->adc_dma.adc_spi_dev, ctx->adc_dma.spi_dma_ctx->rx_dma_chan.chan_id);
intptr_t desc_addr = spi_dma_ll_get_in_suc_eof_desc_addr(ctx->adc_dma.adc_spi_dev, ctx->adc_dma.dma_chan_id);
ctx->rx_eof_desc_addr = desc_addr;
need_yield = ctx->adc_intr_func(ctx);
}
@@ -58,10 +58,11 @@ esp_err_t adc_dma_init(adc_dma_t *adc_dma)
if (spi_success != true) {
return ESP_FAIL;
}
ret = spicommon_dma_chan_alloc(SPI3_HOST, SPI_DMA_CH_AUTO, &(adc_dma->spi_dma_ctx));
ret = spicommon_dma_chan_alloc(ADC_DMA_SPI_HOST, SPI_DMA_CH_AUTO);
if (ret != ESP_OK) {
return ret;
}
adc_dma->dma_chan_id = spi_bus_get_dma_ctx(ADC_DMA_SPI_HOST)->rx_dma_chan.chan_id;
adc_dma->adc_spi_dev = SPI_LL_GET_HW(ADC_DMA_SPI_HOST);
return ESP_OK;
@@ -70,7 +71,7 @@ esp_err_t adc_dma_init(adc_dma_t *adc_dma)
esp_err_t adc_dma_deinit(adc_dma_t adc_dma)
{
esp_intr_free(adc_dma.dma_intr_hdl);
spicommon_dma_chan_free(adc_dma.spi_dma_ctx);
spicommon_dma_chan_free(ADC_DMA_SPI_HOST);
spicommon_periph_free(ADC_DMA_SPI_HOST);
return ESP_OK;
}
@@ -79,7 +80,7 @@ esp_err_t adc_dma_start(adc_dma_t adc_dma, dma_descriptor_t *addr)
{
spi_ll_clear_intr(adc_dma.adc_spi_dev, ADC_DMA_INTR_MASK);
spi_ll_enable_intr(adc_dma.adc_spi_dev, ADC_DMA_INTR_MASK);
spi_dma_ll_rx_start(adc_dma.adc_spi_dev, adc_dma.spi_dma_ctx->rx_dma_chan.chan_id, (lldesc_t *)addr);
spi_dma_ll_rx_start(adc_dma.adc_spi_dev, adc_dma.dma_chan_id, (lldesc_t *)addr);
return ESP_OK;
}
@@ -87,12 +88,12 @@ esp_err_t adc_dma_stop(adc_dma_t adc_dma)
{
spi_ll_disable_intr(adc_dma.adc_spi_dev, ADC_DMA_INTR_MASK);
spi_ll_clear_intr(adc_dma.adc_spi_dev, ADC_DMA_INTR_MASK);
spi_dma_ll_rx_stop(adc_dma.adc_spi_dev, adc_dma.spi_dma_ctx->rx_dma_chan.chan_id);
spi_dma_ll_rx_stop(adc_dma.adc_spi_dev, adc_dma.dma_chan_id);
return ESP_OK;
}
esp_err_t adc_dma_reset(adc_dma_t adc_dma)
{
spi_dma_ll_rx_reset(adc_dma.adc_spi_dev, adc_dma.spi_dma_ctx->rx_dma_chan.chan_id);
spi_dma_ll_rx_reset(adc_dma.adc_spi_dev, adc_dma.dma_chan_id);
return ESP_OK;
}
+3 -5
View File
@@ -40,7 +40,6 @@
typedef struct {
void *periph_dev; /* DMA peripheral device address */
uint32_t dma_chan;
spi_dma_ctx_t *spi_dma_ctx; /* spi_dma context */
intr_handle_t intr_handle; /* Interrupt handle */
bool use_apll; /* Whether use APLL as digital controller clock source */
} dac_dma_periph_spi_t;
@@ -145,10 +144,9 @@ esp_err_t dac_dma_periph_init(uint32_t freq_hz, bool is_alternate, bool is_apll)
/* When transmit alternately, twice frequency is needed to guarantee the convert frequency in one channel */
uint32_t trans_freq_hz = freq_hz * (is_alternate ? 2 : 1);
ESP_GOTO_ON_ERROR(s_dac_dma_periph_set_clock(trans_freq_hz, is_apll), err, TAG, "Failed to set clock of DMA peripheral");
ESP_GOTO_ON_ERROR(spicommon_dma_chan_alloc(DAC_DMA_PERIPH_SPI_HOST, SPI_DMA_CH_AUTO, &s_ddp->spi_dma_ctx),
ESP_GOTO_ON_ERROR(spicommon_dma_chan_alloc(DAC_DMA_PERIPH_SPI_HOST, SPI_DMA_CH_AUTO),
err, TAG, "Failed to allocate dma peripheral channel");
s_ddp->dma_chan = s_ddp->spi_dma_ctx->rx_dma_chan.chan_id;
s_ddp->dma_chan = spi_bus_get_dma_ctx(DAC_DMA_PERIPH_SPI_HOST)->rx_dma_chan.chan_id;
spi_ll_enable_intr(s_ddp->periph_dev, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_OUT_TOTAL_EOF);
dac_ll_digi_set_convert_mode(is_alternate);
return ret;
@@ -162,7 +160,7 @@ esp_err_t dac_dma_periph_deinit(void)
ESP_RETURN_ON_FALSE(s_ddp != NULL, ESP_ERR_INVALID_STATE, TAG, "DAC DMA peripheral is not initialized");
ESP_RETURN_ON_FALSE(s_ddp->intr_handle == NULL, ESP_ERR_INVALID_STATE, TAG, "The interrupt is not deregistered yet");
if (s_ddp->dma_chan) {
ESP_RETURN_ON_ERROR(spicommon_dma_chan_free(s_ddp->spi_dma_ctx), TAG, "Failed to free dma peripheral channel");
ESP_RETURN_ON_ERROR(spicommon_dma_chan_free(DAC_DMA_PERIPH_SPI_HOST), TAG, "Failed to free dma peripheral channel");
}
ESP_RETURN_ON_FALSE(spicommon_periph_free(DAC_DMA_PERIPH_SPI_HOST), ESP_FAIL, TAG, "Failed to release DAC DMA peripheral");
spi_ll_disable_intr(s_ddp->periph_dev, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_OUT_TOTAL_EOF);
@@ -185,6 +185,7 @@ esp_err_t spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transact
* @return
* - ESP_ERR_INVALID_ARG if parameter is invalid
* - ESP_ERR_NOT_SUPPORTED if flag `SPI_SLAVE_NO_RETURN_RESULT` is set
* - ESP_ERR_INVALID_STATE if dma over/underflow error occurs during psram transfer
* - ESP_OK on success
*/
esp_err_t spi_slave_get_trans_result(spi_host_device_t host, spi_slave_transaction_t **trans_desc, uint32_t ticks_to_wait);
@@ -204,6 +205,7 @@ esp_err_t spi_slave_get_trans_result(spi_host_device_t host, spi_slave_transacti
* out.
* @return
* - ESP_ERR_INVALID_ARG if parameter is invalid
* - ESP_ERR_INVALID_STATE if dma over/underflow error occurs during psram transfer
* - ESP_OK on success
*/
esp_err_t spi_slave_transmit(spi_host_device_t host, spi_slave_transaction_t *trans_desc, uint32_t ticks_to_wait);
@@ -166,7 +166,7 @@ esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t c
* - ESP_OK: on success
* - ESP_ERR_INVALID_ARG: Function is not valid
* - ESP_ERR_TIMEOUT: There's no transaction done before timeout
* - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under segment mode.
* - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under segment mode. Or DMA hardware over/underflow occurred.
*/
esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, uint32_t timeout);
@@ -223,7 +223,7 @@ esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t
* - ESP_OK: on success
* - ESP_ERR_INVALID_ARG: Function is not valid
* - ESP_ERR_TIMEOUT: There's no transaction done before timeout
* - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under append mode.
* - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under append mode. Or DMA hardware over/underflow occurred.
*/
esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, uint32_t timeout);
@@ -50,7 +50,7 @@ typedef enum {
/// Attributes of an SPI bus
typedef struct {
spi_bus_config_t bus_cfg; ///< Config used to initialize the bus
uint64_t gpio_reserve; ///< reserved output gpio bit mask
uint64_t gpio_reserve; ///< reserved gpio matrix output pins and all iomux pins bit mask
uint32_t flags; ///< Flags (SPICOMMON_BUSFLAG_* flag combination of bus abilities) of the bus
int max_transfer_sz; ///< Maximum length of bytes available to send
bool dma_enabled; ///< To enable DMA or not
@@ -104,19 +104,18 @@ esp_err_t spicommon_bus_free(spi_host_device_t host_id);
*
* @param host_id SPI host ID
* @param dma_chan DMA channel to be used
* @param out_dma_ctx Actual DMA channel context (if you choose to assign a specific DMA channel, this will be the channel you assigned before)
*
* @return
* - ESP_OK: On success
* - ESP_ERR_NO_MEM: No enough memory
* - ESP_ERR_NOT_FOUND: There is no available DMA channel
*/
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t **out_dma_ctx);
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan);
/**
* @brief Alloc DMA descriptors for SPI
*
* @param dma_ctx DMA context returned by `spicommon_dma_chan_alloc`
* @param[in] host_id SPI host ID
* @param[in] cfg_max_sz Expected maximum transfer size, in bytes.
* @param[out] actual_max_sz Actual max transfer size one transaction can be, in bytes.
*
@@ -124,7 +123,7 @@ esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma
* - ESP_OK: On success
* - ESP_ERR_NO_MEM: No enough memory
*/
esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *actual_max_sz);
esp_err_t spicommon_dma_desc_alloc(spi_host_device_t host_id, int cfg_max_sz, int *actual_max_sz);
/**
* Setupt/Configure dma descriptor link list
@@ -136,15 +135,40 @@ esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *
*/
void spicommon_dma_desc_setup_link(spi_dma_desc_t *dmadesc, const void *data, int len, bool is_rx);
/**
* @brief Setup private buffer for DMA transfer
*
* @param host_id SPI host ID to access the DMA context
* @param buffer buffer to be setup
* @param len length of buffer, in byte
* @param is_tx if buffer is for tx/transmit direction
* @param psram_prefer if psram is preferred
* @param auto_malloc if auto malloc is enabled
* @param ret_buffer return buffer, which is the buffer that is actually used for DMA transfer
*
* @return
* - ESP_OK: On success
* - ESP_ERR_NO_MEM: No enough memory
*/
esp_err_t spicommon_dma_setup_priv_buffer(spi_host_device_t host_id, uint32_t *buffer, uint32_t len, bool is_tx, bool psram_prefer, bool auto_malloc, uint32_t **ret_buffer);
/**
* @brief Memory barrier for DMA RX buffer
*
* @param host_id SPI host ID
* @param rx_buffer RX buffer
*/
void spicommon_dma_rx_mb(spi_host_device_t host_id, void *rx_buffer);
/**
* @brief Free DMA for SPI
*
* @param dma_ctx spi_dma_ctx_t struct pointer
* @param host_id SPI host ID
*
* @return
* - ESP_OK: On success
*/
esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx);
esp_err_t spicommon_dma_chan_free(spi_host_device_t host_id);
/**
* @brief Connect a SPI peripheral to GPIO pins
@@ -180,19 +204,18 @@ esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx);
* - ESP_ERR_INVALID_ARG if parameter is invalid
* - ESP_OK on success
*/
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t *flags_o, uint64_t *io_reserved);
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t *flags_o);
/**
* @brief Free the IO used by a SPI peripheral
*
* @param bus_cfg Bus config struct which defines which pins to be used.
* @param io_reserved Bitmap indicate which pin is reserved
* @param host SPI peripheral
*
* @return
* - ESP_ERR_INVALID_ARG if parameter is invalid
* - ESP_OK on success
*/
esp_err_t spicommon_bus_free_io_cfg(const spi_bus_config_t *bus_cfg, uint64_t *io_reserved);
esp_err_t spicommon_bus_free_io_cfg(spi_host_device_t host);
/**
* @brief Initialize a Chip Select pin for a specific SPI peripheral
@@ -293,7 +316,7 @@ void spicommon_dmaworkaround_transfer_active(int dmachan);
* @param host_id The specified host to get attribute
* @return (Const) Pointer to the attributes
*/
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id);
spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id);
/**
* @brief Get the dma context of a specified SPI bus.
@@ -301,7 +324,7 @@ const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id);
* @param host_id The specified host to get attribute
* @return (Const) Pointer to the dma context
*/
const spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id);
spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id);
/**
* @brief Register a function to a initialized bus to make it called when deinitializing the bus.
+180 -103
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -12,9 +12,8 @@
#include "esp_attr.h"
#include "esp_check.h"
#include "esp_cache.h"
#include "esp_rom_gpio.h"
#include "esp_heap_caps.h"
#include "soc/spi_periph.h"
#include "esp_memory_utils.h"
#include "driver/spi_master.h"
#include "driver/gpio.h"
#include "esp_private/gpio.h"
@@ -30,6 +29,9 @@
#if CONFIG_IDF_TARGET_ESP32
#include "soc/dport_reg.h"
#endif
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_mspi.h"
#endif
#if SOC_PERIPH_CLK_CTRL_SHARED
#define SPI_COMMON_PERI_CLOCK_ATOMIC() PERIPH_RCC_ATOMIC()
@@ -70,11 +72,11 @@ typedef struct {
spi_destroy_func_t destroy_func;
void* destroy_arg;
spi_bus_attr_t bus_attr;
spi_dma_ctx_t *dma_ctx;
} spicommon_bus_context_t;
static spicommon_bus_context_t s_mainbus = SPI_MAIN_BUS_DEFAULT();
static spicommon_bus_context_t* bus_ctx[SOC_SPI_PERIPH_NUM] = {&s_mainbus};
static spi_dma_ctx_t *spi_dma_ctx[SOC_SPI_PERIPH_NUM];
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
/* The lock for the share SPI1 bus is registered here in a constructor due to need to access the context
@@ -98,6 +100,10 @@ esp_err_t spicommon_bus_alloc(spi_host_device_t host_id, const char *name)
SPI_COMMON_PERI_CLOCK_ATOMIC() {
spi_ll_enable_clock(host_id, true);
}
// Get cache alignment constraints
esp_cache_get_alignment(MALLOC_CAP_DMA, &ctx->bus_attr.cache_align_int);
esp_cache_get_alignment(MALLOC_CAP_SPIRAM, &ctx->bus_attr.cache_align_ext);
ctx->host_id = host_id;
bus_ctx[host_id] = ctx;
return ESP_OK;
@@ -115,20 +121,6 @@ esp_err_t spicommon_bus_free(spi_host_device_t host_id)
return ESP_OK;
}
#if SOC_GDMA_SUPPORTED
//NOTE!! If both A and B are not defined, '#if (A==B)' is true, because GCC use 0 stand for undefined symbol
#if defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#define SPI_GDMA_NEW_CHANNEL gdma_new_axi_channel
#elif defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB)
#define SPI_GDMA_NEW_CHANNEL gdma_new_ahb_channel
#endif
#else
//Each bit stands for 1 dma channel, BIT(0) should be used for SPI1
static uint8_t spi_dma_chan_enabled = 0;
static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED;
#endif //!SOC_GDMA_SUPPORTED
static inline bool is_valid_host(spi_host_device_t host)
{
#if (SOC_SPI_PERIPH_NUM == 2)
@@ -150,44 +142,52 @@ int spicommon_irqdma_source_for_host(spi_host_device_t host)
//----------------------------------------------------------alloc dma periph-------------------------------------------------------//
#if !SOC_GDMA_SUPPORTED
//Each bit stands for 1 dma channel, BIT(0) should be used for SPI1
static uint8_t spi_dma_chan_enabled = 0;
static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED;
#if SPI_LL_DMA_SHARED
static inline shared_periph_module_t get_dma_periph(int dma_chan)
static inline void _spicommon_dma_rcc_clock_ctrl(spi_dma_chan_t dma_chan, bool enable)
{
assert(dma_chan >= 1 && dma_chan <= SOC_SPI_DMA_CHAN_NUM);
#if SPI_LL_DMA_SHARED
shared_periph_module_t dma_periph;
if (dma_chan == 1) {
return PERIPH_SPI2_DMA_MODULE;
dma_periph = PERIPH_SPI2_DMA_MODULE;
} else if (dma_chan == 2) {
return PERIPH_SPI3_DMA_MODULE;
dma_periph = PERIPH_SPI3_DMA_MODULE;
} else {
abort();
}
}
if (enable) {
PERIPH_RCC_ACQUIRE_ATOMIC(dma_periph, ref_count) {
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(dma_chan, true);
spi_dma_ll_reset_register(dma_chan);
}
}
} else {
PERIPH_RCC_RELEASE_ATOMIC(dma_periph, ref_count) {
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(dma_chan, false);
}
}
}
#else
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_enable_bus_clock(dma_chan, enable);
spi_dma_ll_reset_register(dma_chan);
}
#endif
}
static bool claim_dma_chan(int dma_chan, uint32_t *out_actual_dma_chan)
{
bool ret = false;
portENTER_CRITICAL(&spi_dma_spinlock);
bool is_used = (BIT(dma_chan) & spi_dma_chan_enabled);
if (!is_used) {
if (!(BIT(dma_chan) & spi_dma_chan_enabled)) {
spi_dma_chan_enabled |= BIT(dma_chan);
#if SPI_LL_DMA_SHARED
PERIPH_RCC_ACQUIRE_ATOMIC(get_dma_periph(dma_chan), ref_count) {
//esp32s2: dma_chan index is same as spi host_id, no matter dma_chan_auto or not
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(dma_chan, true);
spi_dma_ll_reset_register(dma_chan);
}
}
#else
SPI_COMMON_RCC_CLOCK_ATOMIC() {
//esp32: have only one spi_dma
spi_dma_ll_enable_bus_clock(dma_chan, true);
spi_dma_ll_reset_register(dma_chan);
}
#endif
_spicommon_dma_rcc_clock_ctrl(dma_chan, true);
*out_actual_dma_chan = dma_chan;
ret = true;
}
@@ -257,6 +257,12 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
}
#else //SOC_GDMA_SUPPORTED
//NOTE!! If both A and B are not defined, '#if (A==B)' is true, because GCC use 0 stand for undefined symbol
#if defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#define SPI_GDMA_NEW_CHANNEL gdma_new_axi_channel
#elif defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB)
#define SPI_GDMA_NEW_CHANNEL gdma_new_ahb_channel
#endif
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{
@@ -299,7 +305,7 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
}
#endif //#if !SOC_GDMA_SUPPORTED
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t **out_dma_ctx)
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan)
{
assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32
@@ -319,7 +325,7 @@ esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma
if (ret != ESP_OK) {
goto cleanup;
}
*out_dma_ctx = dma_ctx;
spi_dma_ctx[host_id] = dma_ctx;
return ret;
cleanup:
@@ -327,13 +333,17 @@ cleanup:
return ret;
}
esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *actual_max_sz)
esp_err_t spicommon_dma_desc_alloc(spi_host_device_t host_id, int cfg_max_sz, int *actual_max_sz)
{
int dma_desc_ct = (cfg_max_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
if (dma_desc_ct == 0) {
dma_desc_ct = 1; //default to 4k when max is not given
}
spi_dma_ctx_t *dma_ctx = spi_bus_get_dma_ctx(host_id);
if (!dma_ctx) {
return ESP_ERR_INVALID_STATE;
}
dma_ctx->dmadesc_tx = heap_caps_aligned_calloc(DMA_DESC_MEM_ALIGN_SIZE, 1, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
dma_ctx->dmadesc_rx = heap_caps_aligned_calloc(DMA_DESC_MEM_ALIGN_SIZE, 1, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
if (dma_ctx->dmadesc_tx == NULL || dma_ctx->dmadesc_rx == NULL) {
@@ -380,10 +390,73 @@ void SPI_COMMON_ISR_ATTR spicommon_dma_desc_setup_link(spi_dma_desc_t *dmadesc,
dmadesc[n - 1].next = NULL;
}
//----------------------------------------------------------free dma periph-------------------------------------------------------//
esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx)
esp_err_t SPI_COMMON_ISR_ATTR spicommon_dma_setup_priv_buffer(spi_host_device_t host_id, uint32_t *buffer, uint32_t len, bool is_tx, bool psram_prefer, bool auto_malloc, uint32_t **ret_buffer)
{
assert(dma_ctx);
spi_bus_attr_t *bus_attr = spi_bus_get_attr(host_id);
spi_dma_ctx_t *dma_ctx = spi_bus_get_dma_ctx(host_id);
assert(bus_attr && dma_ctx);
if ((buffer == NULL) || (len == 0)) {
*ret_buffer = buffer;
return ESP_OK;
}
bool is_ptr_ext = esp_ptr_external_ram(buffer);
bool use_psram = is_ptr_ext && psram_prefer;
#if SOC_IS(ESP32S2)
ESP_RETURN_ON_FALSE_ISR((host_id != SPI3_HOST) || !use_psram, ESP_ERR_NOT_SUPPORTED, SPI_TAG, "SPI3 does not support external memory");
#endif
bool need_malloc = is_ptr_ext ? (!use_psram || !esp_ptr_dma_ext_capable(buffer)) : !esp_ptr_dma_capable(buffer);
// If psram is wanted, re-malloc also from psram.
uint32_t mem_cap = MALLOC_CAP_DMA | (use_psram ? MALLOC_CAP_SPIRAM : MALLOC_CAP_INTERNAL);
uint16_t alignment = 0;
if (is_tx) {
alignment = use_psram ? dma_ctx->dma_align_tx_ext : dma_ctx->dma_align_tx_int;
} else {
// RX cache sync still need consider the cache alignment requirement
if (use_psram) {
alignment = MAX(dma_ctx->dma_align_rx_ext, bus_attr->cache_align_ext);
} else {
alignment = MAX(dma_ctx->dma_align_rx_int, bus_attr->cache_align_int);
}
}
// length also must be aligned if cache sync is required, otherwise don't need
need_malloc |= (use_psram || bus_attr->cache_align_int > 1) ? (((uint32_t)buffer | len) & (alignment - 1)) : (((uint32_t)buffer) & (alignment - 1));
uint32_t align_len = (len + alignment - 1) & (~(alignment - 1)); // up align alignment
ESP_EARLY_LOGV(SPI_TAG, "SPI%d %s %p, len %d, is_ptr_ext %d, use_psram: %d, alignment: %d, need_malloc: %d from %s", host_id + 1, is_tx ? "TX" : "RX", buffer, len, is_ptr_ext, use_psram, alignment, need_malloc, (mem_cap & MALLOC_CAP_SPIRAM) ? "psram" : "internal");
if (need_malloc) {
ESP_RETURN_ON_FALSE_ISR(auto_malloc, ESP_ERR_INVALID_STATE, SPI_TAG, "%s addr&len not align to %d, or not dma_capable, suggest use 'heap_caps_malloc' or enable auto_align", is_tx ? "TX" : "RX", alignment);
uint32_t *temp = heap_caps_aligned_alloc(alignment, align_len, mem_cap);
ESP_RETURN_ON_FALSE_ISR(temp != NULL, ESP_ERR_NO_MEM, SPI_TAG, "Failed to allocate priv %s buffer", is_tx ? "TX" : "RX");
if (is_tx) {
memcpy(temp, buffer, len);
}
buffer = temp;
}
*ret_buffer = buffer;
uint32_t sync_flags = is_tx ? (ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED) : ESP_CACHE_MSYNC_FLAG_DIR_M2C;
esp_err_t ret = esp_cache_msync((void *)buffer, need_malloc ? align_len : len, sync_flags);
// ESP_ERR_NOT_SUPPORTED stands for not cache sync required, it's allowed here
ESP_RETURN_ON_FALSE_ISR((ret == ESP_OK) || (ret == ESP_ERR_NOT_SUPPORTED), ESP_ERR_INVALID_ARG, SPI_TAG, "sync failed for %s buffer", is_tx ? "TX" : "RX");
return ESP_OK;
}
void SPI_COMMON_ISR_ATTR spicommon_dma_rx_mb(spi_host_device_t host_id, void *rx_buffer)
{
(void)host_id;
#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM
if (esp_ptr_external_ram(rx_buffer)) {
// dma rx data to psram need memory barrier fix
esp_psram_mspi_mb();
}
#endif
}
//----------------------------------------------------------free dma periph-------------------------------------------------------//
esp_err_t spicommon_dma_chan_free(spi_host_device_t host_id)
{
spi_dma_ctx_t *dma_ctx = spi_bus_get_dma_ctx(host_id);
#if !SOC_GDMA_SUPPORTED
//On ESP32S2, each SPI controller has its own DMA channel
@@ -392,17 +465,7 @@ esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx)
portENTER_CRITICAL(&spi_dma_spinlock);
spi_dma_chan_enabled &= ~BIT(dma_chan);
#if SPI_LL_DMA_SHARED
PERIPH_RCC_RELEASE_ATOMIC(get_dma_periph(dma_chan), ref_count) {
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
}
}
#else
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
}
#endif
_spicommon_dma_rcc_clock_ctrl(dma_chan, false);
portEXIT_CRITICAL(&spi_dma_spinlock);
#else //SOC_GDMA_SUPPORTED
@@ -416,7 +479,14 @@ esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx)
}
#endif
if (dma_ctx->dmadesc_tx) {
free(dma_ctx->dmadesc_tx);
}
if (dma_ctx->dmadesc_rx) {
free(dma_ctx->dmadesc_rx);
}
free(dma_ctx);
spi_dma_ctx[host_id] = NULL;
return ESP_OK;
}
@@ -538,14 +608,13 @@ static void s_spi_common_bus_via_gpio(gpio_num_t gpio_num, int in_sig, int out_s
{
assert(GPIO_IS_VALID_GPIO(gpio_num)); //coverity check
if (in_sig != -1) {
gpio_input_enable(gpio_num);
esp_rom_gpio_connect_in_signal(gpio_num, in_sig, false);
gpio_matrix_input(gpio_num, in_sig, false);
}
if (out_sig != -1) {
// For gpio_matrix, reserve output pins, see 'esp_gpio_reserve.h'
*io_mask |= BIT64(gpio_num);
s_spi_common_gpio_check_reserve(gpio_num);
esp_rom_gpio_connect_out_signal(gpio_num, out_sig, false, false);
gpio_matrix_output(gpio_num, out_sig, false, false);
}
gpio_func_sel(gpio_num, PIN_FUNC_GPIO);
}
@@ -555,7 +624,7 @@ Do the common stuff to hook up a SPI host to a bus defined by a bunch of GPIO pi
bus config struct and it'll set up the GPIO matrix and enable the device. If a pin is set to non-negative value,
it should be able to be initialized.
*/
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t* flags_o, uint64_t *io_reserved)
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t* flags_o)
{
#if SOC_SPI_SUPPORT_OCT
// In the driver of previous version, spi data4 ~ spi data7 are not in spi_bus_config_t struct. So the new-added pins come as 0
@@ -613,9 +682,9 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
SPI_CHECK_PIN(bus_config->miso_io_num, "miso", !(flags & SPICOMMON_BUSFLAG_MASTER) || (temp_flag & SPICOMMON_BUSFLAG_DUAL));
}
//set flags for DUAL mode according to output-capability of MOSI and MISO pins.
if ((bus_config->mosi_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num)) &&
(bus_config->miso_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->miso_io_num)) &&
(bus_config->miso_io_num != bus_config->mosi_io_num)) {
//DUAL mode requires both MOSI and MISO to able to input and output.
if (GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num) && GPIO_IS_VALID_OUTPUT_GPIO(bus_config->miso_io_num) &&
bus_config->miso_io_num != bus_config->mosi_io_num) {
temp_flag |= SPICOMMON_BUSFLAG_DUAL;
}
@@ -682,7 +751,7 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
} else {
//Use GPIO matrix
if (bus_config->mosi_io_num >= 0) {
int in_sig = (!(flags & SPICOMMON_BUSFLAG_MASTER) || (temp_flag & SPICOMMON_BUSFLAG_DUAL)) ? spi_periph_signal[host].spid_in : -1;
int in_sig = spi_periph_signal[host].spid_in; // always connect input in case sio mode device is used
int out_sig = ((flags & SPICOMMON_BUSFLAG_MASTER) || (temp_flag & SPICOMMON_BUSFLAG_DUAL)) ? spi_periph_signal[host].spid_out : -1;
s_spi_common_bus_via_gpio(bus_config->mosi_io_num, in_sig, out_sig, &gpio_reserv);
}
@@ -731,20 +800,41 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
return ESP_OK;
}
esp_err_t spicommon_bus_free_io_cfg(const spi_bus_config_t *bus_cfg, uint64_t *io_reserved)
esp_err_t spicommon_bus_free_io_cfg(spi_host_device_t host)
{
spi_bus_attr_t *bus_attr = (spi_bus_attr_t *)spi_bus_get_attr(host);
assert(bus_attr);
spi_bus_config_t *bus_cfg = &bus_attr->bus_cfg;
for (uint8_t i = 0; i < sizeof(bus_cfg->iocfg) / sizeof(bus_cfg->iocfg[0]); i++) {
#if !SOC_SPI_SUPPORT_OCT
if (i > 4) {
break;
}
#endif
if (GPIO_IS_VALID_GPIO(bus_cfg->iocfg[i]) && (*io_reserved & BIT64(bus_cfg->iocfg[i]))) {
*io_reserved &= ~BIT64(bus_cfg->iocfg[i]);
if (GPIO_IS_VALID_GPIO(bus_cfg->iocfg[i]) && (bus_attr->gpio_reserve & BIT64(bus_cfg->iocfg[i]))) {
bus_attr->gpio_reserve &= ~BIT64(bus_cfg->iocfg[i]);
// all reserved pins (even iomux input pins) is harmless to be disabled here.
gpio_output_disable(bus_cfg->iocfg[i]);
esp_gpio_revoke(BIT64(bus_cfg->iocfg[i]));
}
}
// disconnect all input signals anyway
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spics_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spiclk_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spid_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spiq_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spiwp_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spihd_in, false);
#if SOC_SPI_SUPPORT_OCT
if (host == SPI2_HOST) { // only gpspi2 supports octal pins (data4 ~ data7)
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spid4_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spid5_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spid6_in, false);
gpio_matrix_input(GPIO_MATRIX_CONST_ONE_INPUT, spi_periph_signal[host].spid7_in, false);
}
#endif
return ESP_OK;
}
@@ -762,12 +852,11 @@ void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_id, i
if (GPIO_IS_VALID_OUTPUT_GPIO(cs_io_num)) {
out_mask |= BIT64(cs_io_num);
s_spi_common_gpio_check_reserve(cs_io_num);
esp_rom_gpio_connect_out_signal(cs_io_num, spi_periph_signal[host].spics_out[cs_id], false, false);
gpio_matrix_output(cs_io_num, spi_periph_signal[host].spics_out[cs_id], false, false);
}
// cs_id 0 is always used by slave for input
if (cs_id == 0) {
gpio_input_enable(cs_io_num);
esp_rom_gpio_connect_in_signal(cs_io_num, spi_periph_signal[host].spics_in, false);
gpio_matrix_input(cs_io_num, spi_periph_signal[host].spics_in, false);
}
gpio_func_sel(cs_io_num, PIN_FUNC_GPIO);
}
@@ -826,18 +915,14 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
bus_attr->dma_enabled = (dma_chan != SPI_DMA_DISABLED);
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
if (bus_attr->dma_enabled) {
err = spicommon_dma_chan_alloc(host_id, dma_chan, &ctx->dma_ctx);
err = spicommon_dma_chan_alloc(host_id, dma_chan);
if (err != ESP_OK) {
goto cleanup;
}
err = spicommon_dma_desc_alloc(ctx->dma_ctx, bus_config->max_transfer_sz, &bus_attr->max_transfer_sz);
err = spicommon_dma_desc_alloc(host_id, bus_config->max_transfer_sz, &bus_attr->max_transfer_sz);
if (err != ESP_OK) {
goto cleanup;
}
// Get cache alignment constraints
esp_cache_get_alignment(MALLOC_CAP_DMA, &bus_attr->cache_align_int);
esp_cache_get_alignment(MALLOC_CAP_SPIRAM, &bus_attr->cache_align_ext);
}
spi_bus_lock_config_t lock_config = {
@@ -891,7 +976,7 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
}
#endif //CONFIG_PM_ENABLE
err = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_MASTER | bus_config->flags, NULL, NULL);
err = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_MASTER | bus_config->flags, NULL);
if (err != ESP_OK) {
goto cleanup;
}
@@ -906,12 +991,10 @@ cleanup:
if (bus_attr->lock) {
spi_bus_deinit_lock(bus_attr->lock);
}
if (ctx->dma_ctx) {
free(ctx->dma_ctx->dmadesc_tx);
free(ctx->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(ctx->dma_ctx);
ctx->dma_ctx = NULL;
}
}
if (bus_attr->dma_enabled) {
// free dma channel and descriptors
spicommon_dma_chan_free(host_id);
}
spicommon_bus_free(host_id);
return err;
@@ -920,19 +1003,20 @@ cleanup:
void *spi_bus_dma_memory_alloc(spi_host_device_t host_id, size_t size, uint32_t extra_heap_caps)
{
SPI_CHECK(bus_ctx[host_id], "SPI %d not initialized", NULL, host_id + 1);
spi_dma_ctx_t *dma_ctx = spi_bus_get_dma_ctx(host_id);
size_t alignment = 16;
// detailed alignment requirement is not available for slave bus, so use 16 bytes as default
if (bus_ctx[host_id]->bus_attr.flags & SPICOMMON_BUSFLAG_MASTER) {
size_t alignment = 1; // return 1 anyway if dma not used but user use it.
if (dma_ctx) {
// As don't know the buffer will used for TX or RX, so use the max alignment requirement
alignment = (extra_heap_caps & MALLOC_CAP_SPIRAM) ? \
MAX(bus_ctx[host_id]->dma_ctx->dma_align_tx_ext, bus_ctx[host_id]->dma_ctx->dma_align_rx_ext) : \
MAX(bus_ctx[host_id]->dma_ctx->dma_align_tx_int, bus_ctx[host_id]->dma_ctx->dma_align_rx_int);
MAX(dma_ctx->dma_align_tx_ext, dma_ctx->dma_align_rx_ext) : \
MAX(dma_ctx->dma_align_tx_int, dma_ctx->dma_align_rx_int);
}
return heap_caps_aligned_calloc(alignment, 1, size, extra_heap_caps | MALLOC_CAP_DMA);
}
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
SPI_COMMON_ISR_ATTR spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return NULL;
@@ -941,13 +1025,9 @@ const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
return &bus_ctx[host_id]->bus_attr;
}
const spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id)
SPI_COMMON_ISR_ATTR spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return NULL;
}
return bus_ctx[host_id]->dma_ctx;
return spi_dma_ctx[host_id];
}
esp_err_t spi_bus_free(spi_host_device_t host_id)
@@ -966,7 +1046,7 @@ esp_err_t spi_bus_free(spi_host_device_t host_id)
return err;
}
}
spicommon_bus_free_io_cfg(&bus_attr->bus_cfg, &bus_attr->gpio_reserve);
spicommon_bus_free_io_cfg(host_id);
#if SOC_SPI_SUPPORT_SLEEP_RETENTION && CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
const periph_retention_module_t retention_id = spi_reg_retention_info[host_id - 1].module_id;
@@ -987,11 +1067,8 @@ esp_err_t spi_bus_free(spi_host_device_t host_id)
#endif
spi_bus_deinit_lock(bus_attr->lock);
if (ctx->dma_ctx) {
free(ctx->dma_ctx->dmadesc_tx);
free(ctx->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(ctx->dma_ctx);
ctx->dma_ctx = NULL;
if (bus_attr->dma_enabled) {
spicommon_dma_chan_free(host_id);
}
spicommon_bus_free(host_id);
return err;
@@ -124,10 +124,10 @@ We have two bits to control the interrupt:
#include "esp_ipc.h"
#include "esp_cache.h"
#include "esp_heap_caps.h"
#include "esp_memory_utils.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "soc/soc_memory_layout.h"
#include "driver/gpio.h"
#include "hal/spi_hal.h"
#include "hal/spi_ll.h"
@@ -1002,6 +1002,7 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
//This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
spicommon_dmaworkaround_idle(dma_ctx->tx_dma_chan.chan_id);
#endif //#if CONFIG_IDF_TARGET_ESP32
spicommon_dma_rx_mb(host->id, host->cur_trans_buf.buffer_to_rcv);
spi_trans_dma_error_check(host);
}
@@ -1180,47 +1181,6 @@ static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
}
}
static SPI_MASTER_ISR_ATTR esp_err_t setup_dma_priv_buffer(spi_host_t *host, uint32_t *buffer, uint32_t len, bool is_tx, uint32_t flags, uint32_t **ret_buffer)
{
#if CONFIG_IDF_TARGET_ESP32S2
ESP_RETURN_ON_FALSE_ISR((host->id != SPI3_HOST) || !(flags & SPI_TRANS_DMA_USE_PSRAM), ESP_ERR_NOT_SUPPORTED, SPI_TAG, "SPI3 does not support external memory");
#endif
bool is_ptr_ext = esp_ptr_external_ram(buffer);
bool use_psram = is_ptr_ext && (flags & SPI_TRANS_DMA_USE_PSRAM);
bool need_malloc = is_ptr_ext ? (!use_psram || !esp_ptr_dma_ext_capable(buffer)) : !esp_ptr_dma_capable(buffer);
uint16_t alignment = 0;
// If psram is wanted, re-malloc also from psram.
uint32_t mem_cap = MALLOC_CAP_DMA | (use_psram ? MALLOC_CAP_SPIRAM : MALLOC_CAP_INTERNAL);
if (is_tx) {
alignment = use_psram ? host->dma_ctx->dma_align_tx_ext : host->dma_ctx->dma_align_tx_int;
} else {
// RX cache sync still need consider the cache alignment requirement
if (use_psram) {
alignment = MAX(host->dma_ctx->dma_align_rx_ext, host->bus_attr->cache_align_ext);
} else {
alignment = MAX(host->dma_ctx->dma_align_rx_int, host->bus_attr->cache_align_int);
}
}
need_malloc |= (((uint32_t)buffer | len) & (alignment - 1));
ESP_EARLY_LOGV(SPI_TAG, "%s %p, len %d, is_ptr_ext %d, use_psram: %d, alignment: %d, need_malloc: %d from %s", is_tx ? "TX" : "RX", buffer, len, is_ptr_ext, use_psram, alignment, need_malloc, (mem_cap & MALLOC_CAP_SPIRAM) ? "psram" : "internal");
if (need_malloc) {
ESP_RETURN_ON_FALSE_ISR(!(flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL), ESP_ERR_INVALID_ARG, SPI_TAG, "Set flag SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL but %s addr&len not align to %d, or not dma_capable", is_tx ? "TX" : "RX", alignment);
len = (len + alignment - 1) & (~(alignment - 1)); // up align alignment
uint32_t *temp = heap_caps_aligned_alloc(alignment, len, mem_cap);
ESP_RETURN_ON_FALSE_ISR(temp != NULL, ESP_ERR_NO_MEM, SPI_TAG, "Failed to allocate priv %s buffer", is_tx ? "TX" : "RX");
if (is_tx) {
memcpy(temp, buffer, len);
}
buffer = temp;
}
esp_err_t ret = esp_cache_msync((void *)buffer, len, is_tx ? (ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED) : ESP_CACHE_MSYNC_FLAG_DIR_M2C);
// ESP_ERR_NOT_SUPPORTED stands for not cache sync required, it's allowed here
ESP_RETURN_ON_FALSE_ISR((ret == ESP_OK) || (ret == ESP_ERR_NOT_SUPPORTED), ESP_ERR_INVALID_ARG, SPI_TAG, "sync failed for %s buffer", is_tx ? "TX" : "RX");
*ret_buffer = buffer;
return ESP_OK;
}
static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans_priv_t* priv_desc)
{
spi_transaction_t *trans_desc = priv_desc->trans;
@@ -1230,25 +1190,23 @@ static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans
uint32_t* rcv_ptr = (trans_desc->flags & SPI_TRANS_USE_RXDATA) ? (uint32_t *)trans_desc->rx_data : (uint32_t *)trans_desc->rx_buffer;
// tx memory assign
uint32_t *send_ptr = (trans_desc->flags & SPI_TRANS_USE_TXDATA) ? (uint32_t *)trans_desc->tx_data : (uint32_t *)trans_desc->tx_buffer;
esp_err_t ret = ESP_OK;
if (send_ptr && bus_attr->dma_enabled) {
ret = setup_dma_priv_buffer(host, send_ptr, (trans_desc->length + 7) / 8, true, trans_desc->flags, &send_ptr);
if (ret != ESP_OK) {
goto clean_up;
}
if (!bus_attr->dma_enabled) {
priv_desc->buffer_to_send = send_ptr;
priv_desc->buffer_to_rcv = rcv_ptr;
return ESP_OK;
}
if (rcv_ptr && bus_attr->dma_enabled) {
ret = setup_dma_priv_buffer(host, rcv_ptr, (trans_desc->rxlength + 7) / 8, false, trans_desc->flags, &rcv_ptr);
if (ret != ESP_OK) {
goto clean_up;
}
bool use_psram = trans_desc->flags & SPI_TRANS_DMA_USE_PSRAM;
bool auto_malloc = !(trans_desc->flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL);
esp_err_t ret = spicommon_dma_setup_priv_buffer(host->id, send_ptr, (trans_desc->length + 7) / 8, true, use_psram, auto_malloc, (void *)&priv_desc->buffer_to_send);
if (ret != ESP_OK) {
goto clean_up;
}
priv_desc->buffer_to_send = send_ptr;
priv_desc->buffer_to_rcv = rcv_ptr;
return ESP_OK;
ret = spicommon_dma_setup_priv_buffer(host->id, rcv_ptr, (trans_desc->rxlength + 7) / 8, false, use_psram, auto_malloc, &priv_desc->buffer_to_rcv);
if (ret != ESP_OK) {
goto clean_up;
}
return ret;
clean_up:
uninstall_priv_desc(priv_desc);
@@ -1477,6 +1435,7 @@ esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle,
return ESP_ERR_TIMEOUT;
}
}
spicommon_dma_rx_mb(host->id, host->cur_trans_buf.buffer_to_rcv);
spi_trans_dma_error_check(host);
uint32_t trans_flags = host->cur_trans_buf.trans->flags; // save the flags before bus_lock release
+64 -122
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -54,12 +54,13 @@ static const char *SPI_TAG = "spi_slave";
/// struct to hold private transaction data (like tx and rx buffer for DMA).
typedef struct {
spi_slave_transaction_t *trans; //original trans
void *tx_buffer; //actually tx buffer (re-malloced if needed)
void *rx_buffer; //actually rx buffer (re-malloced if needed)
uint32_t *tx_buffer; //actually tx buffer (re-malloced if needed)
uint32_t *rx_buffer; //actually rx buffer (re-malloced if needed)
bool dma_hw_error; //true if DMA hardware over/underflow occurred
} spi_slave_trans_priv_t;
typedef struct {
int id;
spi_host_device_t id;
_Atomic spi_bus_fsm_t fsm;
spi_bus_attr_t* bus_attr;
spi_dma_ctx_t *dma_ctx;
@@ -69,13 +70,10 @@ typedef struct {
spi_slave_trans_priv_t cur_trans;
uint32_t flags;
uint32_t intr_flags;
int max_transfer_sz;
QueueHandle_t trans_queue;
QueueHandle_t ret_queue;
bool dma_enabled;
bool cs_iomux;
uint8_t cs_in_signal;
uint16_t internal_mem_align_size;
#ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock;
#endif
@@ -185,13 +183,15 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
atomic_store(&spihost[host]->fsm, SPI_BUS_FSM_ENABLED);
spi_slave_hal_context_t *hal = &spihost[host]->hal;
spihost[host]->dma_enabled = (dma_chan != SPI_DMA_DISABLED);
if (spihost[host]->dma_enabled) {
ret = spicommon_dma_chan_alloc(host, dma_chan, &spihost[host]->dma_ctx);
spihost[host]->bus_attr->dma_enabled = (dma_chan != SPI_DMA_DISABLED);
spihost[host]->bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
if (spihost[host]->bus_attr->dma_enabled) {
ret = spicommon_dma_chan_alloc(host, dma_chan);
if (ret != ESP_OK) {
goto cleanup;
}
ret = spicommon_dma_desc_alloc(spihost[host]->dma_ctx, bus_config->max_transfer_sz, &spihost[host]->max_transfer_sz);
spihost[host]->dma_ctx = spi_bus_get_dma_ctx(host);
ret = spicommon_dma_desc_alloc(host, bus_config->max_transfer_sz, &spihost[host]->bus_attr->max_transfer_sz);
if (ret != ESP_OK) {
goto cleanup;
}
@@ -199,20 +199,9 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
hal->dmadesc_tx = spihost[host]->dma_ctx->dmadesc_tx;
hal->dmadesc_rx = spihost[host]->dma_ctx->dmadesc_rx;
hal->dmadesc_n = spihost[host]->dma_ctx->dma_desc_num;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
size_t alignment;
esp_cache_get_alignment(MALLOC_CAP_DMA, &alignment);
spihost[host]->internal_mem_align_size = alignment;
#else
spihost[host]->internal_mem_align_size = 4;
#endif
} else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
spihost[host]->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
}
err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL, NULL);
err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL);
if (err != ESP_OK) {
ret = err;
goto cleanup;
@@ -226,7 +215,7 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
spihost[host]->flags = spihost[host]->bus_attr->flags; // This flag MUST be set after spicommon_bus_initialize_io is called
// The slave DMA suffers from unexpected transactions. Forbid reading if DMA is enabled by disabling the CS line.
if (spihost[host]->dma_enabled) {
if (spihost[host]->bus_attr->dma_enabled) {
freeze_cs(spihost[host]);
}
@@ -313,7 +302,7 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
hal->rx_lsbfirst = (slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST) ? 1 : 0;
hal->tx_lsbfirst = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0;
hal->mode = slave_config->mode;
hal->use_dma = spihost[host]->dma_enabled;
hal->use_dma = spihost[host]->bus_attr->dma_enabled;
spi_slave_hal_setup_device(hal);
return ESP_OK;
@@ -332,12 +321,10 @@ esp_err_t spi_slave_free(spi_host_device_t host)
if (spihost[host]->ret_queue) {
vQueueDelete(spihost[host]->ret_queue);
}
if (spihost[host]->dma_enabled) {
free(spihost[host]->dma_ctx->dmadesc_tx);
free(spihost[host]->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(spihost[host]->dma_ctx);
if (spihost[host]->bus_attr->dma_enabled) {
spicommon_dma_chan_free(host);
}
spicommon_bus_free_io_cfg(&spihost[host]->bus_attr->bus_cfg, &spihost[host]->bus_attr->gpio_reserve);
spicommon_bus_free_io_cfg(host);
if (spihost[host]->cfg.spics_io_num >= 0) {
spicommon_cs_free_io(spihost[host]->cfg.spics_io_num, &spihost[host]->bus_attr->gpio_reserve);
}
@@ -409,12 +396,11 @@ static void SPI_SLAVE_ISR_ATTR spi_slave_uninstall_priv_trans(spi_host_device_t
{
__attribute__((unused)) spi_slave_transaction_t *trans = (spi_slave_transaction_t *)priv_trans->trans;
#if CONFIG_IDF_TARGET_ESP32
if (spihost[host]->dma_enabled && (trans->trans_len % 32)) {
if (spihost[host]->bus_attr->dma_enabled && (trans->trans_len % 32)) {
ESP_EARLY_LOGW(SPI_TAG, "Use DMA but real trans_len is not 4 bytes aligned, slave may loss data");
}
#endif
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
if (spihost[host]->dma_enabled) {
if (spihost[host]->bus_attr->dma_enabled) {
if (trans->tx_buffer && (trans->tx_buffer != priv_trans->tx_buffer)) {
free(priv_trans->tx_buffer);
}
@@ -423,54 +409,28 @@ static void SPI_SLAVE_ISR_ATTR spi_slave_uninstall_priv_trans(spi_host_device_t
free(priv_trans->rx_buffer);
}
}
#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
}
static esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_setup_priv_trans(spi_host_device_t host, spi_slave_trans_priv_t *priv_trans)
static esp_err_t SPI_SLAVE_ATTR spi_slave_setup_priv_trans(spi_host_device_t host, spi_slave_trans_priv_t *priv_trans)
{
spi_slave_transaction_t *trans = (spi_slave_transaction_t *)priv_trans->trans;
priv_trans->tx_buffer = (void *)trans->tx_buffer;
priv_trans->rx_buffer = trans->rx_buffer;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
uint16_t alignment = spihost[host]->internal_mem_align_size;
uint32_t buffer_byte_len = (trans->length + 7) / 8;
if (spihost[host]->dma_enabled && trans->tx_buffer) {
if ((!esp_ptr_dma_capable(trans->tx_buffer) || ((((uint32_t)trans->tx_buffer) | buffer_byte_len) & (alignment - 1)))) {
ESP_RETURN_ON_FALSE_ISR(trans->flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, SPI_TAG, "TX buffer addr&len not align to %d byte, or not dma_capable", alignment);
//if txbuf in the desc not DMA-capable, or not align to "alignment", malloc a new one
ESP_EARLY_LOGD(SPI_TAG, "Allocate TX buffer for DMA");
buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1)); // up align to "alignment"
uint32_t *temp = heap_caps_aligned_alloc(alignment, buffer_byte_len, MALLOC_CAP_DMA);
if (temp == NULL) {
return ESP_ERR_NO_MEM;
}
memcpy(temp, trans->tx_buffer, (trans->length + 7) / 8);
priv_trans->tx_buffer = temp;
}
esp_err_t ret = esp_cache_msync((void *)priv_trans->tx_buffer, buffer_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
ESP_RETURN_ON_FALSE_ISR(ESP_OK == ret, ESP_ERR_INVALID_STATE, SPI_TAG, "mem sync c2m(writeback) fail");
if (!spihost[host]->bus_attr->dma_enabled) {
priv_trans->tx_buffer = (uint32_t *)trans->tx_buffer;
priv_trans->rx_buffer = (uint32_t *)trans->rx_buffer;
return ESP_OK;
}
if (spihost[host]->dma_enabled && trans->rx_buffer) {
if ((!esp_ptr_dma_capable(trans->rx_buffer) || ((((uint32_t)trans->rx_buffer) | (trans->length + 7) / 8) & (alignment - 1)))) {
ESP_RETURN_ON_FALSE_ISR(trans->flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, SPI_TAG, "RX buffer addr&len not align to %d byte, or not dma_capable", alignment);
//if rxbuf in the desc not DMA-capable, or not align to "alignment", malloc a new one
ESP_EARLY_LOGD(SPI_TAG, "Allocate RX buffer for DMA");
buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1)); // up align to "alignment"
priv_trans->rx_buffer = heap_caps_aligned_alloc(alignment, buffer_byte_len, MALLOC_CAP_DMA);
if (priv_trans->rx_buffer == NULL) {
free(priv_trans->tx_buffer);
return ESP_ERR_NO_MEM;
}
}
esp_err_t ret = esp_cache_msync((void *)priv_trans->rx_buffer, buffer_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
ESP_RETURN_ON_FALSE_ISR(ESP_OK == ret, ESP_ERR_INVALID_STATE, SPI_TAG, "mem sync m2c(invalid) fail");
bool auto_malloc = (trans->flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO);
esp_err_t ret = spicommon_dma_setup_priv_buffer(spihost[host]->id, (uint32_t *)trans->tx_buffer, (trans->length + 7) / 8, true, true, auto_malloc, &priv_trans->tx_buffer);
if (ret != ESP_OK) {
spi_slave_uninstall_priv_trans(host, priv_trans);
return ret;
}
#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
return ESP_OK;
ret = spicommon_dma_setup_priv_buffer(spihost[host]->id, (uint32_t *)trans->rx_buffer, (trans->length + 7) / 8, false, true, auto_malloc, &priv_trans->rx_buffer);
if (ret != ESP_OK) {
spi_slave_uninstall_priv_trans(host, priv_trans);
}
return ret;
}
esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transaction_t *trans_desc, uint32_t ticks_to_wait)
@@ -478,20 +438,7 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi
BaseType_t r;
SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->tx_buffer == NULL || esp_ptr_dma_capable(trans_desc->tx_buffer),
"txdata not in DMA-capable memory", ESP_ERR_INVALID_ARG);
// We don't check length WORD alignment for rx when using DMA, seems break DMA requirement,
// however peripheral can also stop DMA from over writing memory even if it not aligned (except esp32).
// ATTENTION!: On esp32, peripheral can NOT stop DMA, if length not WORD aligned,
// remain bytes in last word domain will overwritten by DMA HW, which may cause unexpected issues!
// But driver already used for long time, to avoid breaking changes, we still don't add alignment limit.
SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->rx_buffer == NULL ||
(esp_ptr_dma_capable(trans_desc->rx_buffer) && esp_ptr_word_aligned(trans_desc->rx_buffer) &&
(trans_desc->length % 8 == 0)),
"rxdata not in DMA-capable memory or not BYTE aligned", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz * 8, "data transfer > host maximum", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->length <= spihost[host]->bus_attr->max_transfer_sz * 8, "data transfer > host maximum", ESP_ERR_INVALID_ARG);
spi_slave_trans_priv_t priv_trans = {.trans = (spi_slave_transaction_t *)trans_desc};
SPI_CHECK(ESP_OK == spi_slave_setup_priv_trans(host, &priv_trans), "slave setup priv_trans failed", ESP_ERR_NO_MEM);
@@ -543,30 +490,18 @@ esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_trans_isr(spi_host_device_t host, c
BaseType_t do_yield = pdFALSE;
ESP_RETURN_ON_FALSE_ISR(is_valid_host(host), ESP_ERR_INVALID_ARG, SPI_TAG, "invalid host");
ESP_RETURN_ON_FALSE_ISR(spihost[host], ESP_ERR_INVALID_ARG, SPI_TAG, "host not slave");
ESP_RETURN_ON_FALSE_ISR(trans_desc->length <= spihost[host]->max_transfer_sz * 8, ESP_ERR_INVALID_ARG, SPI_TAG, "data transfer > host maximum");
if (spihost[host]->dma_enabled) {
uint16_t alignment = spihost[host]->internal_mem_align_size;
(void) alignment;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
// For those targets length and addr alignment is still required from Cache side
uint32_t buffer_byte_len = (trans_desc->length + 7) / 8;
bool tx_aligned = (trans_desc->tx_buffer == NULL) || (esp_ptr_dma_capable(trans_desc->tx_buffer) && ((((uint32_t)trans_desc->tx_buffer | buffer_byte_len) & (alignment - 1)) == 0));
bool rx_aligned = (trans_desc->rx_buffer == NULL) || (esp_ptr_dma_capable(trans_desc->rx_buffer) && ((((uint32_t)trans_desc->rx_buffer | buffer_byte_len) & (alignment - 1)) == 0));
#else
bool tx_aligned = (trans_desc->tx_buffer == NULL) || esp_ptr_dma_capable(trans_desc->tx_buffer);
bool rx_aligned = (trans_desc->rx_buffer == NULL) || (esp_ptr_dma_capable(trans_desc->rx_buffer) && esp_ptr_word_aligned(trans_desc->rx_buffer) && (trans_desc->length % 8 == 0));
#endif
ESP_RETURN_ON_FALSE_ISR(tx_aligned, ESP_ERR_INVALID_ARG, SPI_TAG, "txdata addr & len not align to %d bytes or not dma_capable", alignment);
ESP_RETURN_ON_FALSE_ISR(rx_aligned, ESP_ERR_INVALID_ARG, SPI_TAG, "rxdata addr & len not align to %d bytes or not dma_capable", alignment);
}
ESP_RETURN_ON_FALSE_ISR(trans_desc->length <= spihost[host]->bus_attr->max_transfer_sz * 8, ESP_ERR_INVALID_ARG, SPI_TAG, "data transfer > host maximum");
spi_slave_trans_priv_t priv_trans = {
.trans = (spi_slave_transaction_t *)trans_desc,
.tx_buffer = (void *)trans_desc->tx_buffer,
.rx_buffer = trans_desc->rx_buffer,
};
if (spihost[host]->bus_attr->dma_enabled) {
// isr api is not allowed to auto_malloc, so don't need to 'uninstall' anything here, return directly
ESP_RETURN_ON_ERROR_ISR(spicommon_dma_setup_priv_buffer(host, (uint32_t *)trans_desc->tx_buffer, (trans_desc->length + 7) / 8, true, true, false, &priv_trans.tx_buffer), SPI_TAG, "");
ESP_RETURN_ON_ERROR_ISR(spicommon_dma_setup_priv_buffer(host, (uint32_t *)trans_desc->rx_buffer, (trans_desc->length + 7) / 8, false, true, false, &priv_trans.rx_buffer), SPI_TAG, "");
}
r = xQueueSendFromISR(spihost[host]->trans_queue, (void *)&priv_trans, &do_yield);
if (!r) {
return ESP_ERR_NO_MEM;
@@ -617,7 +552,7 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_get_trans_result(spi_host_device_t host, spi_
spi_slave_uninstall_priv_trans(host, &priv_trans);
*trans_desc = priv_trans.trans;
return ESP_OK;
return priv_trans.dma_hw_error ? ESP_ERR_INVALID_STATE : ESP_OK;
}
esp_err_t SPI_SLAVE_ATTR spi_slave_transmit(spi_host_device_t host, spi_slave_transaction_t *trans_desc, uint32_t ticks_to_wait)
@@ -665,7 +600,7 @@ static void SPI_SLAVE_ISR_ATTR s_spi_slave_prepare_data(spi_slave_t *host)
{
spi_slave_hal_context_t *hal = &host->hal;
if (host->dma_enabled) {
if (host->bus_attr->dma_enabled) {
s_spi_slave_dma_prepare_data(host->dma_ctx, &host->hal);
} else {
//No DMA. Copy data to transmit buffers.
@@ -688,6 +623,20 @@ static void SPI_SLAVE_ISR_ATTR spi_slave_restart_after_dmareset(void *arg)
}
#endif //#if CONFIG_IDF_TARGET_ESP32
static void SPI_SLAVE_ISR_ATTR spi_slave_trans_dma_error_check(spi_slave_t *host)
{
#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM //error checks only for psram dma
if (esp_ptr_external_ram(host->cur_trans.rx_buffer) && spi_slave_hal_get_intr_status(&host->hal, SPI_LL_INTR_IN_FULL)) {
host->cur_trans.dma_hw_error = true;
ESP_DRAM_LOGE(SPI_TAG, "DMA RX overflow detected");
}
if (esp_ptr_external_ram(host->cur_trans.tx_buffer) && spi_slave_hal_get_intr_status(&host->hal, SPI_LL_INTR_OUT_EMPTY)) {
host->cur_trans.dma_hw_error = true;
ESP_DRAM_LOGE(SPI_TAG, "DMA TX underflow detected");
}
#endif
}
//This is run in interrupt context and apart from initialization and destruction, this is the only code
//touching the host (=spihost[x]) variable. The rest of the data arrives in queues. That is why there are
//no muxes in this code.
@@ -700,11 +649,13 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
assert(spi_slave_hal_usr_is_done(hal));
bool use_dma = host->dma_enabled;
bool use_dma = host->bus_attr->dma_enabled;
if (host->cur_trans.trans) {
// When DMA is enabled, the slave rx dma suffers from unexpected transactions. Forbid reading until transaction ready.
if (use_dma) {
freeze_cs(host);
spicommon_dma_rx_mb(host->id, host->cur_trans.rx_buffer);
spi_slave_trans_dma_error_check(host);
}
spi_slave_hal_store_result(hal);
@@ -718,17 +669,6 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
}
#endif //#if CONFIG_IDF_TARGET_ESP32
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
if (use_dma && host->cur_trans.rx_buffer) {
uint16_t alignment = host->internal_mem_align_size;
uint32_t buffer_byte_len = (host->cur_trans.trans->length + 7) / 8;
buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1));
// invalidate priv_trans.buffer_to_rcv anyway, only user provide aligned buffer can rcv correct data in post_cb
esp_err_t ret = esp_cache_msync((void *)host->cur_trans.rx_buffer, buffer_byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
(void)ret;
}
#endif
if (host->cfg.post_trans_cb) {
host->cfg.post_trans_cb(host->cur_trans.trans);
}
@@ -789,7 +729,9 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
if (use_dma) {
restore_cs(host);
}
#if CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
spi_slave_hal_clear_intr_status(hal, SPI_LL_INTR_IN_FULL | SPI_LL_INTR_OUT_EMPTY);
#endif
//Kick off transfer
spi_slave_hal_user_start(hal);
if (host->cfg.post_setup_cb) {
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2010-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2010-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -8,6 +8,7 @@
#include "esp_compiler.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_cache.h"
#include "esp_memory_utils.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
@@ -19,9 +20,6 @@
#include "esp_private/esp_cache_private.h"
#include "driver/spi_slave_hd.h"
#include "hal/spi_slave_hd_hal.h"
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#include "esp_cache.h"
#endif
#ifdef CONFIG_SPI_SLAVE_ISR_IN_IRAM
#define SPI_SLAVE_ISR_ATTR IRAM_ATTR
@@ -40,6 +38,7 @@
typedef struct {
spi_slave_hd_data_t *trans; //original trans
void *aligned_buffer; //actually trans buffer (re-malloced if needed)
bool dma_hw_error; //true if DMA hardware over/underflow occurred
} spi_slave_hd_trans_priv_t;
typedef struct {
@@ -48,9 +47,6 @@ typedef struct {
spi_bus_attr_t* bus_attr;
_Atomic spi_bus_fsm_t fsm;
spi_dma_ctx_t *dma_ctx;
uint16_t internal_mem_align_size;
int max_transfer_sz;
uint32_t flags;
portMUX_TYPE int_spinlock;
intr_handle_t intr;
intr_handle_t intr_dma;
@@ -131,10 +127,11 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
host->bus_attr = (spi_bus_attr_t *)spi_bus_get_attr(host_id);
host->cs_io_num = config->spics_io_num;
ret = spicommon_dma_chan_alloc(host_id, config->dma_chan, &host->dma_ctx);
ret = spicommon_dma_chan_alloc(host_id, config->dma_chan);
if (ret != ESP_OK) {
goto cleanup;
}
host->dma_ctx = spi_bus_get_dma_ctx(host_id);
#if SOC_GDMA_SUPPORTED
gdma_strategy_config_t dma_strategy = {
.auto_update_desc = true,
@@ -145,7 +142,7 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
spi_dma_ll_enable_out_auto_wrback(SPI_LL_GET_HW(host->dma_ctx->tx_dma_chan.host_id), host->dma_ctx->tx_dma_chan.chan_id, 1);
spi_dma_ll_set_out_eof_generation(SPI_LL_GET_HW(host->dma_ctx->tx_dma_chan.host_id), host->dma_ctx->tx_dma_chan.chan_id, 1);
#endif
ret = spicommon_dma_desc_alloc(host->dma_ctx, bus_config->max_transfer_sz, &host->max_transfer_sz);
ret = spicommon_dma_desc_alloc(host_id, bus_config->max_transfer_sz, &host->bus_attr->max_transfer_sz);
if (ret != ESP_OK) {
goto cleanup;
}
@@ -163,20 +160,11 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
host->hal.dmadesc_rx[i].desc = &host->dma_ctx->dmadesc_rx[i];
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
size_t alignment;
esp_cache_get_alignment(MALLOC_CAP_DMA, &alignment);
host->internal_mem_align_size = alignment;
#else
host->internal_mem_align_size = 4;
#endif
ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL, NULL);
ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL);
if (ret != ESP_OK) {
goto cleanup;
}
spicommon_cs_initialize(host_id, config->spics_io_num, 0, !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS), NULL);
host->flags = host->bus_attr->flags; // This flag MUST be set after spicommon_bus_initialize_io is called
spi_slave_hd_hal_config_t hal_config = {
.host_id = host_id,
@@ -349,14 +337,12 @@ esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
}
#endif
spicommon_bus_free_io_cfg(&host->bus_attr->bus_cfg, &host->bus_attr->gpio_reserve);
spicommon_bus_free_io_cfg(host_id);
spicommon_cs_free_io(host->cs_io_num, &host->bus_attr->gpio_reserve);
spicommon_bus_free(host_id);
free(host->dma_ctx->dmadesc_tx);
free(host->dma_ctx->dmadesc_rx);
free(host->hal.dmadesc_tx);
free(host->hal.dmadesc_rx);
spicommon_dma_chan_free(host->dma_ctx);
spicommon_dma_chan_free(host_id);
spicommon_bus_free(host_id);
free(host);
spihost[host_id] = NULL;
@@ -426,6 +412,25 @@ static inline SPI_SLAVE_ISR_ATTR BaseType_t intr_check_clear_callback(spi_slave_
}
return cb_awoken;
}
static void SPI_SLAVE_ISR_ATTR spi_slave_hd_tx_dma_error_check(spi_slave_hd_slot_t *host, spi_slave_hd_trans_priv_t priv_trans)
{
#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM //error checks only for psram dma
if (esp_ptr_external_ram(priv_trans.aligned_buffer) && spi_slave_hd_hal_check_clear_intr(&host->hal, SPI_LL_INTR_OUT_EMPTY)) {
priv_trans.dma_hw_error = true;
ESP_DRAM_LOGE(TAG, "DMA TX underflow detected");
}
#endif
}
static void SPI_SLAVE_ISR_ATTR spi_slave_hd_rx_dma_error_check(spi_slave_hd_slot_t *host, spi_slave_hd_trans_priv_t priv_trans)
{
#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM //error checks only for psram dma
if (esp_ptr_external_ram(priv_trans.aligned_buffer) && spi_slave_hd_hal_check_clear_intr(&host->hal, SPI_LL_INTR_IN_FULL)) {
priv_trans.dma_hw_error = true;
ESP_DRAM_LOGE(TAG, "DMA RX overflow detected");
}
#endif
}
static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_segment_isr(void *arg)
{
spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t *)arg;
@@ -450,6 +455,7 @@ static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_segment_isr(void *arg)
portEXIT_CRITICAL_ISR(&host->int_spinlock);
if (tx_done) {
spi_slave_hd_tx_dma_error_check(host, host->tx_curr_trans);
bool ret_queue = true;
if (callback->cb_sent) {
spi_slave_hd_event_t ev = {
@@ -468,15 +474,10 @@ static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_segment_isr(void *arg)
host->tx_curr_trans.trans = NULL;
}
if (rx_done) {
spicommon_dma_rx_mb(host->host_id, host->rx_curr_trans.aligned_buffer);
spi_slave_hd_rx_dma_error_check(host, host->rx_curr_trans);
bool ret_queue = true;
host->rx_curr_trans.trans->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
uint16_t alignment = host->internal_mem_align_size;
uint32_t buff_len = (host->rx_curr_trans.trans->len + alignment - 1) & (~(alignment - 1));
esp_err_t ret = esp_cache_msync((void *)host->rx_curr_trans.aligned_buffer, buff_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
(void)ret;
#endif
if (callback->cb_recv) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_RECV,
@@ -557,7 +558,7 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_tx_isr(void *arg)
BaseType_t awoken = pdFALSE;
BaseType_t ret __attribute__((unused));
spi_slave_hd_trans_priv_t ret_priv_trans;
spi_slave_hd_trans_priv_t ret_priv_trans = {};
while (1) {
bool trans_finish = false;
trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&ret_priv_trans.trans, &ret_priv_trans.aligned_buffer);
@@ -569,6 +570,7 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_tx_isr(void *arg)
portEXIT_CRITICAL_ISR(&host->int_spinlock);
bool ret_queue = true;
spi_slave_hd_tx_dma_error_check(host, ret_priv_trans);
if (callback->cb_sent) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_SEND,
@@ -600,7 +602,7 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_rx_isr(void *arg)
BaseType_t awoken = pdFALSE;
BaseType_t ret __attribute__((unused));
spi_slave_hd_trans_priv_t ret_priv_trans;
spi_slave_hd_trans_priv_t ret_priv_trans = {};
size_t trans_len;
while (1) {
bool trans_finish = false;
@@ -613,14 +615,9 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_rx_isr(void *arg)
portEXIT_CRITICAL_ISR(&host->int_spinlock);
ret_priv_trans.trans->trans_len = trans_len;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
uint16_t alignment = host->internal_mem_align_size;
uint32_t buff_len = (ret_priv_trans.trans->len + alignment - 1) & (~(alignment - 1));
esp_err_t ret = esp_cache_msync((void *)ret_priv_trans.aligned_buffer, buff_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
(void)ret;
#endif
bool ret_queue = true;
spicommon_dma_rx_mb(host->host_id, ret_priv_trans.aligned_buffer);
spi_slave_hd_rx_dma_error_check(host, ret_priv_trans);
if (callback->cb_recv) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_RECV,
@@ -689,7 +686,6 @@ static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_append_legacy_isr(void *arg)
static void s_spi_slave_hd_destroy_priv_trans(spi_host_device_t host, spi_slave_hd_trans_priv_t *priv_trans, spi_slave_chan_t chan)
{
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
spi_slave_hd_data_t *orig_trans = priv_trans->trans;
if (priv_trans->aligned_buffer != orig_trans->data) {
if (chan == SPI_SLAVE_CHAN_RX) {
@@ -697,39 +693,19 @@ static void s_spi_slave_hd_destroy_priv_trans(spi_host_device_t host, spi_slave_
}
free(priv_trans->aligned_buffer);
}
#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
}
static esp_err_t s_spi_slave_hd_setup_priv_trans(spi_host_device_t host, spi_slave_hd_trans_priv_t *priv_trans, spi_slave_chan_t chan)
{
spi_slave_hd_data_t *orig_trans = priv_trans->trans;
priv_trans->aligned_buffer = orig_trans->data;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
uint16_t alignment = spihost[host]->internal_mem_align_size;
uint32_t byte_len = orig_trans->len;
if (((uint32_t)orig_trans->data | byte_len) & (alignment - 1)) {
ESP_RETURN_ON_FALSE(orig_trans->flags & SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, TAG, "data buffer addr&len not align to %d byte, or not dma_capable", alignment);
byte_len = (byte_len + alignment - 1) & (~(alignment - 1)); // up align to alignment
ESP_LOGD(TAG, "Re-allocate %s buffer of len %" PRIu32 " for DMA", (chan == SPI_SLAVE_CHAN_TX) ? "TX" : "RX", byte_len);
priv_trans->aligned_buffer = heap_caps_aligned_alloc(64, byte_len, MALLOC_CAP_DMA);
if (priv_trans->aligned_buffer == NULL) {
return ESP_ERR_NO_MEM;
}
bool auto_malloc = (orig_trans->flags & SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO);
bool is_tx = (chan == SPI_SLAVE_CHAN_TX);
esp_err_t ret = spicommon_dma_setup_priv_buffer(host, (uint32_t *)orig_trans->data, orig_trans->len, is_tx, true, auto_malloc, (uint32_t **)&priv_trans->aligned_buffer);
if (ret != ESP_OK) {
s_spi_slave_hd_destroy_priv_trans(host, priv_trans, chan);
return ret;
}
if (chan == SPI_SLAVE_CHAN_TX) {
ESP_COMPILER_DIAGNOSTIC_PUSH_IGNORE("-Wanalyzer-overlapping-buffers") // TODO IDF-11086
memcpy(priv_trans->aligned_buffer, orig_trans->data, orig_trans->len);
ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-overlapping-buffers")
esp_err_t ret = esp_cache_msync((void *)priv_trans->aligned_buffer, byte_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
ESP_RETURN_ON_FALSE(ESP_OK == ret, ESP_ERR_INVALID_STATE, TAG, "mem sync c2m(writeback) fail");
} else {
esp_err_t ret = esp_cache_msync((void *)priv_trans->aligned_buffer, byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
ESP_RETURN_ON_FALSE(ESP_OK == ret, ESP_ERR_INVALID_STATE, TAG, "mem sync m2c(invalid) fail");
}
#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
return ESP_OK;
}
@@ -750,7 +726,7 @@ static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_
s_spi_slave_hd_destroy_priv_trans(host_id, &hd_priv_trans, chan);
*out_trans = hd_priv_trans.trans;
return ESP_OK;
return hd_priv_trans.dma_hw_error ? ESP_ERR_INVALID_STATE : ESP_OK;
}
esp_err_t s_spi_slave_hd_append_txdma(spi_slave_hd_slot_t *host, uint8_t *data, size_t len, void *arg)
@@ -841,12 +817,11 @@ esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t c
spi_slave_hd_slot_t *host = spihost[host_id];
SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(trans->len <= host->bus_attr->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
spi_slave_hd_trans_priv_t hd_priv_trans = {.trans = trans};
SPIHD_CHECK(ESP_OK == s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), "No mem to allocate new cache buffer", ESP_ERR_NO_MEM);
ESP_RETURN_ON_ERROR(s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), TAG, "Setup dma buffer failed");
if (chan == SPI_SLAVE_CHAN_TX) {
BaseType_t ret = xQueueSend(host->tx_trans_queue, &hd_priv_trans, timeout);
@@ -894,12 +869,11 @@ esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t
SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(trans->len <= host->bus_attr->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
spi_slave_hd_trans_priv_t hd_priv_trans = {.trans = trans};
SPIHD_CHECK(ESP_OK == s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), "No mem to allocate new cache buffer", ESP_ERR_NO_MEM);
ESP_RETURN_ON_ERROR(s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), TAG, "Setup dma buffer failed");
if (chan == SPI_SLAVE_CHAN_TX) {
BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -27,7 +27,7 @@
#elif CONFIG_IDF_TARGET_ESP32S3
#define IDF_TARGET_MAX_SPI_CLK_FREQ 40*1000*1000
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 15
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 17
#define IDF_TARGET_MAX_TRANS_TIME_POLL_CPU 15
#define IDF_TARGET_MAX_TRANS_TIME_INTR_DMA 32
#define IDF_TARGET_MAX_TRANS_TIME_INTR_CPU 30
@@ -42,9 +42,9 @@
#elif CONFIG_IDF_TARGET_ESP32C3
#define IDF_TARGET_MAX_SPI_CLK_FREQ 40*1000*1000
#if !CONFIG_FREERTOS_SMP // IDF-5223
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 15
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 17
#define IDF_TARGET_MAX_TRANS_TIME_POLL_CPU 15
#define IDF_TARGET_MAX_TRANS_TIME_INTR_DMA 33
#define IDF_TARGET_MAX_TRANS_TIME_INTR_DMA 35
#define IDF_TARGET_MAX_TRANS_TIME_INTR_CPU 30
#else
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 17
@@ -56,7 +56,7 @@
#elif CONFIG_IDF_TARGET_ESP32C6
#define IDF_TARGET_MAX_SPI_CLK_FREQ 26666*1000
#define IDF_TARGET_MAX_TRANS_TIME_INTR_DMA 35 //TODO: IDF-9551, check perform
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 17
#define IDF_TARGET_MAX_TRANS_TIME_POLL_DMA 19
#define IDF_TARGET_MAX_TRANS_TIME_INTR_CPU 32
#define IDF_TARGET_MAX_TRANS_TIME_POLL_CPU 15
@@ -283,7 +283,7 @@ static void test_bus_lock(bool test_flash)
#if CONFIG_IDF_TARGET_ESP32
// no need this case in other target, only esp32 need buslock to split MSPI and GPSPI2 action
TEST_CASE("spi bus lock, with flash", "[spi][test_env=external_flash]")
TEST_CASE("spi bus lock, with flash", "[external_flash][test_env=external_flash]")
{
test_bus_lock(true);
}
@@ -137,7 +137,7 @@ TEST_CASE("SPI Master clockdiv calculation routines", "[spi]")
// Test All clock source
#define TEST_CLK_BYTE_LEN 10000
#define TEST_TRANS_TIME_BIAS_RATIO (float)8.0/100 // think 8% transfer time bias as acceptable
#define TEST_TRANS_TIME_BIAS_RATIO (float)10.0/100 // think 10% transfer time bias as acceptable
TEST_CASE("SPI Master clk_source and divider accuracy", "[spi]")
{
int64_t start = 0, end = 0;
@@ -504,9 +504,9 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
ESP_LOGI(TAG, "test 4 iomux output pins...");
@@ -515,9 +515,9 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
ESP_LOGI(TAG, "test 6 output pins...");
@@ -527,9 +527,9 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
ESP_LOGI(TAG, "test 4 output pins...");
@@ -539,9 +539,9 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
#if TEST_SOC_HAS_INPUT_ONLY_PINS //There is no input-only pin on esp32c3 and esp32s3, so this test could be ignored.
@@ -551,7 +551,7 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = INPUT_ONLY_PIN, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
ESP_LOGI(TAG, "test slave 5 output pins and MISO on input-only pin...");
@@ -560,7 +560,7 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = INPUT_ONLY_PIN, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
ESP_LOGI(TAG, "test master 3 output pins and MOSI on input-only pin...");
@@ -570,7 +570,7 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = INPUT_ONLY_PIN, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
ESP_LOGI(TAG, "test slave 3 output pins and MISO on input-only pin...");
@@ -579,7 +579,7 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = INPUT_ONLY_PIN, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ESP_OK(spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
TEST_ASSERT_EQUAL_HEX32(flags_expected, flags_o);
//There is no input-only pin on esp32c3 and esp32s3, so this test could be ignored.
#endif //#if TEST_SOC_HAS_INPUT_ONLY_PINS
@@ -591,8 +591,8 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
ESP_LOGI(TAG, "check native flag for 4 output pins...");
flags_expected = SPICOMMON_BUSFLAG_IOMUX_PINS;
@@ -601,8 +601,8 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
#if TEST_SOC_HAS_INPUT_ONLY_PINS //There is no input-only pin on esp32c3 and esp32s3, so this test could be ignored.
ESP_LOGI(TAG, "check dual flag for master 5 output pins and MISO/MOSI on input-only pin...");
@@ -611,14 +611,14 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = INPUT_ONLY_PIN, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
cfg = (spi_bus_config_t) {
.mosi_io_num = INPUT_ONLY_PIN, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
ESP_LOGI(TAG, "check dual flag for master 3 output pins and MISO/MOSI on input-only pin...");
flags_expected = SPICOMMON_BUSFLAG_DUAL | SPICOMMON_BUSFLAG_GPIO_PINS;
@@ -626,14 +626,14 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = INPUT_ONLY_PIN, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
cfg = (spi_bus_config_t) {
.mosi_io_num = INPUT_ONLY_PIN, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
//There is no input-only pin on esp32c3 and esp32s3, so this test could be ignored.
#endif //#if TEST_SOC_HAS_INPUT_ONLY_PINS
@@ -643,8 +643,8 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = -1, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
ESP_LOGI(TAG, "check mosi flag...");
flags_expected = SPICOMMON_BUSFLAG_MOSI;
@@ -652,8 +652,8 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = -1, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
ESP_LOGI(TAG, "check miso flag...");
flags_expected = SPICOMMON_BUSFLAG_MISO;
@@ -661,8 +661,8 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = -1, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
ESP_LOGI(TAG, "check quad flag...");
flags_expected = SPICOMMON_BUSFLAG_QUAD;
@@ -670,14 +670,14 @@ TEST_CASE("spi bus setting with different pin configs", "[spi]")
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = -1, .quadwp_io_num = spi_periph_signal[TEST_SPI_HOST].spiwp_iomux_pin,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
cfg = (spi_bus_config_t) {
.mosi_io_num = spi_periph_signal[TEST_SPI_HOST].spid_iomux_pin, .miso_io_num = spi_periph_signal[TEST_SPI_HOST].spiq_iomux_pin, .sclk_io_num = spi_periph_signal[TEST_SPI_HOST].spiclk_iomux_pin, .quadhd_io_num = spi_periph_signal[TEST_SPI_HOST].spihd_iomux_pin, .quadwp_io_num = -1,
.max_transfer_sz = 8, .flags = flags_expected
};
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o, NULL));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_MASTER, &flags_o));
TEST_ASSERT_EQUAL(ESP_ERR_INVALID_ARG, spicommon_bus_initialize_io(TEST_SPI_HOST, &cfg, flags_expected | SPICOMMON_BUSFLAG_SLAVE, &flags_o));
}
TEST_CASE("SPI Master no response when switch from host1 (SPI2) to host2 (SPI3)", "[spi]")
@@ -893,6 +893,37 @@ TEST_CASE("SPI Master DMA test: length, start, not aligned", "[spi]")
TEST_ASSERT(spi_bus_free(TEST_SPI_HOST) == ESP_OK);
}
#if !SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE // targets who need cache sync don't support unaligned trans
TEST_CASE("SPI Master DMA manually unaligned RX test", "[spi]")
{
spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG();
buscfg.miso_io_num = buscfg.mosi_io_num;
TEST_ESP_OK(spi_bus_initialize(TEST_SPI_HOST, &buscfg, SPI_DMA_CH_AUTO));
spi_device_handle_t dev0;
spi_device_interface_config_t devcfg = SPI_DEVICE_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &devcfg, &dev0));
WORD_ALIGNED_ATTR uint8_t tx_data[8] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88};
WORD_ALIGNED_ATTR uint8_t rx_data[8] = {0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA};
spi_transaction_t trans_cfg = {
.tx_buffer = tx_data,
.rx_buffer = rx_data,
.length = 5 * 8,
.flags = SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL,
};
printf("Sending %d bytes\n", trans_cfg.length / 8);
TEST_ESP_OK(spi_device_transmit(dev0, &trans_cfg));
ESP_LOG_BUFFER_HEX("rx", rx_data, 8);
#if !SOC_IS(ESP32)
TEST_ASSERT_EQUAL(rx_data[6], 0xAA);
#endif
TEST_ESP_OK(spi_bus_remove_device(dev0));
TEST_ESP_OK(spi_bus_free(TEST_SPI_HOST));
}
#endif
#if (TEST_SPI_PERIPH_NUM >= 2)
//These will only be enabled on chips with 2 or more SPI peripherals
@@ -914,7 +945,7 @@ void test_cmd_addr(spi_slave_task_context_t *slave_context, bool lsb_first)
TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &devcfg, &spi));
//connecting pins to two peripherals breaks the output, fix it.
same_pin_func_sel(buscfg, devcfg.spics_io_num, 0, false);
same_pin_func_sel(TEST_SPI_HOST, TEST_SLAVE_HOST, buscfg, devcfg.spics_io_num);
for (int i = 0; i < 8; i++) {
//prepare slave tx data
@@ -1099,7 +1130,7 @@ TEST_CASE("SPI master variable dummy test", "[spi]")
spi_slave_interface_config_t slave_cfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &bus_cfg, &slave_cfg, SPI_DMA_DISABLED));
same_pin_func_sel(bus_cfg, dev_cfg.spics_io_num, 0, false);
same_pin_func_sel(TEST_SPI_HOST, TEST_SLAVE_HOST, bus_cfg, dev_cfg.spics_io_num);
uint8_t data_to_send[] = {0x12, 0x34, 0x56, 0x78};
@@ -1142,7 +1173,7 @@ TEST_CASE("SPI master hd dma TX without RX test", "[spi]")
spi_slave_interface_config_t slave_cfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &bus_cfg, &slave_cfg, SPI_DMA_CH_AUTO));
same_pin_func_sel(bus_cfg, dev_cfg.spics_io_num, 0, false);
same_pin_func_sel(TEST_SPI_HOST, TEST_SLAVE_HOST, bus_cfg, dev_cfg.spics_io_num);
uint32_t buf_size = 32;
uint8_t *mst_send_buf = spi_bus_dma_memory_alloc(TEST_SPI_HOST, buf_size, 0);
@@ -2077,11 +2108,11 @@ void test_spi_psram_trans(spi_device_handle_t dev_handle, void *tx, void *rx)
// To use psram, hardware will pass data through MSPI and GDMA to GPSPI, which need some time
// GPSPI bandwidth(speed * line_num) should always no more than PSRAM bandwidth
trans_cfg.override_freq_hz = (CONFIG_SPIRAM_SPEED / 4) * 1000 * 1000;
trans_cfg.override_freq_hz = MIN(80000000, (CONFIG_SPIRAM_SPEED / 2) * 1000 * 1000);
printf("%d TX %p RX %p len %d @%ld kHz\n", cnt, trans_cfg.tx_buffer, trans_cfg.rx_buffer, trans_len, trans_cfg.override_freq_hz / 1000);
TEST_ESP_OK(spi_device_transmit(dev_handle, &trans_cfg));
TEST_ASSERT(!(trans_cfg.flags & (SPI_TRANS_DMA_RX_FAIL | SPI_TRANS_DMA_TX_FAIL)));
spitest_cmp_or_dump(trans_cfg.tx_buffer, trans_cfg.rx_buffer, trans_len);
TEST_ASSERT_EQUAL_HEX8_ARRAY(trans_cfg.tx_buffer, trans_cfg.rx_buffer, trans_len);
trans_cfg.tx_buffer += trans_len;
trans_cfg.rx_buffer += trans_len;
trans_len ++;
@@ -2126,9 +2157,11 @@ TEST_CASE("SPI_Master: PSRAM buffer transaction via EDMA", "[spi]")
TEST_ASSERT(i ? (before - after) < 2 * TEST_EDMA_TRANS_LEN : (before - after) > 2 * TEST_EDMA_TRANS_LEN);
spi_device_polling_end(dev_handle, portMAX_DELAY);
printf("TX fail: %d, RX fail: %d\n", !!(trans_cfg.flags & SPI_TRANS_DMA_TX_FAIL), !!(trans_cfg.flags & SPI_TRANS_DMA_RX_FAIL));
#if !SOC_IS(ESP32P4) // P4 can't reach error condition since it has powerful 16bits ddr psram
TEST_ASSERT((!!i) == !!(trans_cfg.flags & (SPI_TRANS_DMA_TX_FAIL | SPI_TRANS_DMA_RX_FAIL)));
#endif
if (!i) { // data should be correct if using auto malloc
spitest_cmp_or_dump(trans_cfg.tx_buffer, trans_cfg.rx_buffer, TEST_EDMA_TRANS_LEN);
TEST_ASSERT_EQUAL_HEX8_ARRAY(trans_cfg.tx_buffer, trans_cfg.rx_buffer, TEST_EDMA_TRANS_LEN);
}
}
@@ -75,7 +75,7 @@ TEST_CASE("SPI Single Board Test SIO", "[spi]")
spi_slave_interface_config_t slv_cfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &bus_cfg, &slv_cfg, SPI_DMA_DISABLED));
same_pin_func_sel(bus_cfg, dev_cfg.spics_io_num, 0, false);
same_pin_func_sel(TEST_SPI_HOST, TEST_SLAVE_HOST, bus_cfg, dev_cfg.spics_io_num);
inner_connect(bus_cfg);
WORD_ALIGNED_ATTR uint8_t master_rx_buffer[320];
@@ -1567,15 +1567,18 @@ static void test_slave_hd_dma(void)
test_fill_random_to_buffers_dualboard(985 + mode + speed_level + i, slave_expect, slave_send, TEST_STEP_LEN);
uint32_t test_trans_len = TEST_STEP_LEN;
spi_slave_hd_data_t *ret_trans, slave_trans = {
spi_slave_hd_data_t *ret_trans, slave_tx = {
.data = slave_send,
.len = test_trans_len,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
}, slave_rx = {
.data = slave_receive,
.len = test_trans_len,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
unity_send_signal("Slave ready");
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_trans, portMAX_DELAY));
slave_trans.data = slave_receive;
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_trans, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_tx, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_rx, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY));
@@ -1677,16 +1680,19 @@ static void test_slave_hd_no_dma(void)
test_fill_random_to_buffers_dualboard(911 + mode + speed_level + i, slave_expect, slave_send, SOC_SPI_MAXIMUM_BUFFER_SIZE);
uint32_t test_trans_len = SOC_SPI_MAXIMUM_BUFFER_SIZE;
spi_slave_hd_data_t *ret_trans, slave_trans = {
spi_slave_hd_data_t *ret_trans, slave_tx = {
.data = slave_send,
.len = test_trans_len,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
}, slave_rx = {
.data = slave_receive,
.len = test_trans_len,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_trans, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_tx, portMAX_DELAY));
unity_send_signal("Slave ready");
TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY));
slave_trans.data = slave_receive;
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_trans, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_rx, portMAX_DELAY));
unity_send_signal("Slave ready");
TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY));
@@ -8,6 +8,7 @@
*/
#include <string.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "unity.h"
#include "test_utils.h"
@@ -32,13 +33,9 @@ static WORD_ALIGNED_ATTR uint8_t slave_rxbuf[320];
static const uint8_t master_send[] = { 0x93, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xaa, 0xcc, 0xff, 0xee, 0x55, 0x77, 0x88, 0x43 };
static const uint8_t slave_send[] = { 0xaa, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x13, 0x57, 0x9b, 0xdf, 0x24, 0x68, 0xac, 0xe0 };
#if (TEST_SPI_PERIPH_NUM >= 2)
//These will only be enabled on chips with 2 or more SPI peripherals
#ifndef CONFIG_SPIRAM
//This test should be removed once the timing test is merged.
static spi_host_device_t master_slave_ids[2];
static spi_device_handle_t spi;
static void custom_setup(void)
void custom_setup(spi_host_device_t master_id, spi_host_device_t slave_id)
{
//Initialize buffers
memset(master_txbuf, 0, sizeof(master_txbuf));
@@ -46,46 +43,60 @@ static void custom_setup(void)
memset(slave_txbuf, 0, sizeof(slave_txbuf));
memset(slave_rxbuf, 0, sizeof(slave_rxbuf));
//Initialize SPI Master
spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG();
buscfg.flags |= SPICOMMON_BUSFLAG_GPIO_PINS;
buscfg.max_transfer_sz = 40960;
spi_device_interface_config_t devcfg = {
.clock_speed_hz = 4 * 1000 * 1000, //currently only up to 4MHz for internal connect
.mode = 0, //SPI mode 0
.spics_io_num = PIN_NUM_CS, //CS pin
.queue_size = 7, //We want to be able to queue 7 transactions at a time
.pre_cb = NULL,
.cs_ena_posttrans = 5,
.cs_ena_pretrans = 1,
};
//Initialize the SPI bus
TEST_ESP_OK(spi_bus_initialize(TEST_SPI_HOST, &buscfg, SPI_DMA_CH_AUTO));
//Attach the device to the SPI bus
TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &devcfg, &spi));
//Configuration for the SPI slave interface
spi_slave_interface_config_t slvcfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
//Enable pull-ups on SPI lines so we don't detect rogue pulses when no master is connected.
gpio_set_pull_mode(PIN_NUM_MOSI, GPIO_PULLUP_ONLY);
gpio_set_pull_mode(PIN_NUM_CLK, GPIO_PULLUP_ONLY);
gpio_set_pull_mode(PIN_NUM_CS, GPIO_PULLUP_ONLY);
//Initialize SPI slave interface
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &buscfg, &slvcfg, SPI_DMA_CH_AUTO));
//Initialize SPI Master
if (master_id) {
//Initialize the SPI bus
TEST_ESP_OK(spi_bus_initialize(master_id, &buscfg, SPI_DMA_CH_AUTO));
//Attach the device to the SPI bus
TEST_ESP_OK(spi_bus_add_device(master_id, &devcfg, &spi));
}
if (slave_id) {
//Configuration for the SPI slave interface
spi_slave_interface_config_t slvcfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
//Enable pull-ups on SPI lines so we don't detect rogue pulses when no master is connected.
slave_pull_up(&buscfg, devcfg.spics_io_num);
//Initialize SPI slave interface
TEST_ESP_OK(spi_slave_initialize(slave_id, &buscfg, &slvcfg, SPI_DMA_CH_AUTO));
}
//Do internal connections
same_pin_func_sel(buscfg, devcfg.spics_io_num, 0, false);
same_pin_func_sel(master_id, slave_id, buscfg, devcfg.spics_io_num);
master_slave_ids[0] = master_id;
master_slave_ids[1] = slave_id;
}
static void custom_teardown(void)
void custom_teardown(void)
{
TEST_ESP_OK(spi_slave_free(TEST_SLAVE_HOST));
TEST_ESP_OK(spi_bus_remove_device(spi));
TEST_ESP_OK(spi_bus_free(TEST_SPI_HOST));
if (master_slave_ids[1]) {
TEST_ESP_OK(spi_slave_free(master_slave_ids[1]));
master_slave_ids[1] = 0;
}
if (spi != NULL) {
TEST_ESP_OK(spi_bus_remove_device(spi));
spi = NULL;
}
if (master_slave_ids[0]) {
TEST_ESP_OK(spi_bus_free(master_slave_ids[0]));
master_slave_ids[0] = 0;
}
}
#if (TEST_SPI_PERIPH_NUM >= 2)
TEST_CASE("test fullduplex slave with only RX direction", "[spi]")
{
custom_setup();
custom_setup(TEST_SPI_HOST, TEST_SLAVE_HOST);
memcpy(master_txbuf, master_send, sizeof(master_send));
@@ -132,7 +143,7 @@ TEST_CASE("test fullduplex slave with only RX direction", "[spi]")
TEST_CASE("test fullduplex slave with only TX direction", "[spi]")
{
custom_setup();
custom_setup(TEST_SPI_HOST, TEST_SLAVE_HOST);
memcpy(slave_txbuf, slave_send, sizeof(slave_send));
@@ -191,7 +202,7 @@ TEST_CASE("Test slave rx no_dma overwrite when length below/over config", "[spi]
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &buscfg, &slvcfg, SPI_DMA_DISABLED));
//initialize master and slave on the same pins break some of the output configs, fix them
same_pin_func_sel(buscfg, devcfg.spics_io_num, 0, false);
same_pin_func_sel(TEST_SPI_HOST, TEST_SLAVE_HOST, buscfg, devcfg.spics_io_num);
uint8_t master_tx[TEST_SLV_RX_BUF_LEN], slave_rx[TEST_SLV_RX_BUF_LEN];
for (uint8_t i = 0; i < TEST_SLV_RX_BUF_LEN; i++) {
@@ -249,16 +260,80 @@ TEST_CASE("Test slave rx no_dma overwrite when length below/over config", "[spi]
TEST_ESP_OK(spi_bus_remove_device(spidev0));
TEST_ESP_OK(spi_bus_free(TEST_SPI_HOST));
}
#endif // !CONFIG_SPIRAM
#endif // #if (TEST_SPI_PERIPH_NUM >= 2)
#if CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
#define PSRAM_TRANS_LEN 16000
TEST_CASE("test slave using external ram", "[spi]")
{
custom_setup((TEST_SPI_PERIPH_NUM >= 2) ? TEST_SLAVE_HOST : 0, TEST_SPI_HOST);
uint8_t *slave_ext_tx = heap_caps_aligned_calloc(32, 1, PSRAM_TRANS_LEN, MALLOC_CAP_SPIRAM);
uint8_t *slave_ext_rx = heap_caps_aligned_calloc(32, 1, PSRAM_TRANS_LEN, MALLOC_CAP_SPIRAM);
uint8_t *master_tx = heap_caps_malloc(PSRAM_TRANS_LEN, MALLOC_CAP_DMA);
uint8_t *master_rx = heap_caps_malloc(PSRAM_TRANS_LEN, MALLOC_CAP_DMA);
spi_slave_transaction_t slave_tans = {}, *out_trans;
slave_tans.length = 8 * PSRAM_TRANS_LEN;
slave_tans.tx_buffer = slave_ext_tx;
slave_tans.rx_buffer = slave_ext_rx;
slave_tans.flags = SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO;
spi_transaction_t master_tans = {};
master_tans.override_freq_hz = MIN(60000000, (CONFIG_SPIRAM_SPEED / 2) * 1000 * 1000);
master_tans.tx_buffer = master_tx;
master_tans.rx_buffer = master_rx;
for (int i = 0; i < 6; i ++) {
test_fill_random_to_buffers_dualboard(7 + i, master_tx, slave_ext_tx, PSRAM_TRANS_LEN);
slave_tans.length -= i * 8;
master_tans.length = slave_tans.length;
master_tans.rxlength = slave_tans.length;
ESP_LOGI(SLAVE_TAG, "Test freq: %ld, tx: %p, rx: %p, len: %d", master_tans.override_freq_hz, slave_tans.tx_buffer, slave_tans.rx_buffer, slave_tans.length / 8);
uint32_t before = esp_get_free_heap_size();
TEST_ESP_OK(spi_slave_queue_trans(TEST_SPI_HOST, &slave_tans, portMAX_DELAY));
uint32_t after = esp_get_free_heap_size();
#if (TEST_SPI_PERIPH_NUM >= 2)
spi_device_transmit(spi, &master_tans);
#else
spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG();
spi_master_trans_impl_gpio(buscfg, PIN_NUM_CS, 0, (uint8_t *)master_tans.tx_buffer, master_tans.rx_buffer, master_tans.length / 8, false);
#endif
ESP_LOGI(SLAVE_TAG, "slave malloc: %ld", after - before);
TEST_ASSERT(i ? (before - after) > PSRAM_TRANS_LEN : (before - after) < PSRAM_TRANS_LEN);
TEST_ESP_OK(spi_slave_get_trans_result(TEST_SPI_HOST, &out_trans, portMAX_DELAY));
TEST_ASSERT_EQUAL(master_tans.length, slave_tans.trans_len);
TEST_ASSERT_EQUAL_HEX8_ARRAY(slave_tans.tx_buffer, master_tans.rx_buffer, master_tans.length / 8);
TEST_ASSERT_EQUAL_HEX8_ARRAY(master_tans.tx_buffer, slave_tans.rx_buffer, master_tans.length / 8);
ESP_LOGI(SLAVE_TAG, "ok\n");
}
#if (TEST_SPI_PERIPH_NUM >= 2) && !SOC_IS(ESP32P4) // P4 can't reach error condition since it has powerful 16bits ddr psram
master_tans.override_freq_hz = 61000000; // real freq will be 40 if just config 60M
ESP_LOGI(SLAVE_TAG, "Testing over freq: %ld", master_tans.override_freq_hz);
TEST_ESP_OK(spi_slave_queue_trans(TEST_SPI_HOST, &slave_tans, portMAX_DELAY));
spi_device_transmit(spi, (spi_transaction_t *)&master_tans);
TEST_ESP_ERR(ESP_ERR_INVALID_STATE, spi_slave_get_trans_result(TEST_SPI_HOST, &out_trans, portMAX_DELAY));
#endif
free(slave_ext_tx);
free(slave_ext_rx);
free(master_tx);
free(master_rx);
custom_teardown();
ESP_LOGI(SLAVE_TAG, "test passed.");
}
#endif // CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
TEST_CASE("test slave send unaligned", "[spi]")
{
spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG();
buscfg.flags |= SPICOMMON_BUSFLAG_GPIO_PINS;
spi_slave_interface_config_t slvcfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &buscfg, &slvcfg, SPI_DMA_CH_AUTO));
same_pin_func_sel(buscfg, slvcfg.spics_io_num, 0, true);
same_pin_func_sel(0, TEST_SLAVE_HOST, buscfg, slvcfg.spics_io_num);
memcpy(master_txbuf, master_send, sizeof(master_send));
memcpy(slave_txbuf, slave_send, sizeof(slave_send));
@@ -678,7 +753,7 @@ TEST_CASE("test_spi_slave_sleep_retention", "[spi]")
buscfg.flags |= SPICOMMON_BUSFLAG_GPIO_PINS;
spi_slave_interface_config_t slvcfg = SPI_SLAVE_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_slave_initialize(TEST_SLAVE_HOST, &buscfg, &slvcfg, SPI_DMA_DISABLED));
same_pin_func_sel(buscfg, slvcfg.spics_io_num, 0, true);
same_pin_func_sel(0, TEST_SLAVE_HOST, buscfg, slvcfg.spics_io_num);
for (uint8_t cnt = 0; cnt < 3; cnt ++) {
printf("Going into sleep with power %s ...\n", (buscfg.flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) ? "down" : "hold");
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -0,0 +1 @@
CONFIG_SPIRAM=y
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -171,7 +171,7 @@ static void test_hd_start(spi_device_handle_t *spi, int freq, const spitest_para
//when test with single board via same set of mosi, miso, clk and cs pins.
spi_bus_config_t bus_cfg = SPI_BUS_TEST_DEFAULT_CONFIG();
spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG();
same_pin_func_sel(bus_cfg, slave_hd_cfg.spics_io_num, 0, false);
same_pin_func_sel(TEST_SPI_HOST, TEST_SLAVE_HOST, bus_cfg, slave_hd_cfg.spics_io_num);
wait_wrbuf_sig(ctx, 0);
wait_rdbuf_sig(ctx, 0);
@@ -529,7 +529,7 @@ TEST_CASE("test spi slave hd segment mode, master too long", "[spi][spi_slv_hd]"
//Use GPIO matrix to connect signal of master and slave via same set of pins on one board.
spi_bus_config_t bus_cfg = SPI_BUS_TEST_DEFAULT_CONFIG();
spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG();
same_pin_func_sel(bus_cfg, slave_hd_cfg.spics_io_num, 0, true);
same_pin_func_sel(0, TEST_SLAVE_HOST, bus_cfg, slave_hd_cfg.spics_io_num);
const int send_buf_size = 1024;
WORD_ALIGNED_ATTR uint8_t* slave_send_buf = malloc(send_buf_size * 2);
@@ -955,7 +955,7 @@ TEST_CASE("test_spi_slave_hd_sleep_retention", "[spi]")
bus_cfg.flags |= SPICOMMON_BUSFLAG_GPIO_PINS;
spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG();
TEST_ESP_OK(spi_slave_hd_init(TEST_SLAVE_HOST, &bus_cfg, &slave_hd_cfg));
same_pin_func_sel(bus_cfg, slave_hd_cfg.spics_io_num, 0, true);
same_pin_func_sel(0, TEST_SLAVE_HOST, bus_cfg, slave_hd_cfg.spics_io_num);
vTaskDelay(1);
for (uint8_t cnt = 0; cnt < 3; cnt ++) {
@@ -1033,7 +1033,7 @@ TEST_CASE("test_spi_slave_hd_append_sleep_retention", "[spi]")
spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG();
slave_hd_cfg.flags |= SPI_SLAVE_HD_APPEND_MODE;
TEST_ESP_OK(spi_slave_hd_init(TEST_SLAVE_HOST, &bus_cfg, &slave_hd_cfg));
same_pin_func_sel(bus_cfg, slave_hd_cfg.spics_io_num, 0, true);
same_pin_func_sel(0, TEST_SLAVE_HOST, bus_cfg, slave_hd_cfg.spics_io_num);
vTaskDelay(1);
for (uint8_t i = 0; i < 2; i++) {
@@ -1085,3 +1085,66 @@ TEST_CASE("test_spi_slave_hd_append_sleep_retention", "[spi]")
#endif
}
#endif //SOC_LIGHT_SLEEP_SUPPORTED
#if CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
// function pointers for segment and append mode
static esp_err_t (*hd_trans[2])(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, uint32_t timeout) = {
spi_slave_hd_queue_trans, spi_slave_hd_append_trans
};
static esp_err_t (*hd_get_trans_res[2])(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, uint32_t timeout) = {
spi_slave_hd_get_trans_res, spi_slave_hd_get_append_trans_res
};
#define TEST_PSRAM_TRANS_LEN 1000
TEST_CASE("test slave hd edma segment and append mode", "[spi]")
{
uint8_t *mst_tx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_DEFAULT);
uint8_t *mst_rx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_DEFAULT);
uint8_t *slv_tx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_SPIRAM);
uint8_t *slv_rx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_SPIRAM);
spi_slave_hd_data_t *ret_trans, tx_data = {
.data = slv_tx,
.len = TEST_PSRAM_TRANS_LEN,
}, rx_data = {
.data = slv_rx,
.len = TEST_PSRAM_TRANS_LEN,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
for (int i = 0; i < 2; i++) {
printf("\ntest slave hd edma %s mode\n", i ? "append" : "segment");
test_fill_random_to_buffers_dualboard(i + 1, mst_tx, slv_tx, TEST_PSRAM_TRANS_LEN);
spi_bus_config_t bus_cfg = SPI_BUS_TEST_DEFAULT_CONFIG();
bus_cfg.max_transfer_sz = 4092 * 4; // append mode require at least 2 for tx and 2 for rx dma descs
bus_cfg.flags |= SPICOMMON_BUSFLAG_GPIO_PINS;
spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG();
slave_hd_cfg.flags |= i ? SPI_SLAVE_HD_APPEND_MODE : 0;
TEST_ESP_OK(spi_slave_hd_init(TEST_SLAVE_HOST, &bus_cfg, &slave_hd_cfg));
same_pin_func_sel(0, TEST_SLAVE_HOST, bus_cfg, slave_hd_cfg.spics_io_num);
vTaskDelay(1);
TEST_ESP_OK(hd_trans[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &tx_data, portMAX_DELAY));
TEST_ESP_OK(hd_trans[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &rx_data, portMAX_DELAY));
// tx append transaction
printf("tx %d bytes\n", TEST_PSRAM_TRANS_LEN);
essl_sspi_hd_dma_trans_seg(bus_cfg, slave_hd_cfg.spics_io_num, 0, false, mst_tx, TEST_PSRAM_TRANS_LEN, -1);
TEST_ESP_OK(hd_get_trans_res[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY));
// rx append transaction
printf("rx %d bytes\n", TEST_PSRAM_TRANS_LEN);
essl_sspi_hd_dma_trans_seg(bus_cfg, slave_hd_cfg.spics_io_num, 0, true, mst_rx, TEST_PSRAM_TRANS_LEN, -1);
TEST_ESP_OK(hd_get_trans_res[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY));
spitest_cmp_or_dump(slv_rx, mst_tx, TEST_PSRAM_TRANS_LEN);
spitest_cmp_or_dump(mst_rx, slv_tx, TEST_PSRAM_TRANS_LEN);
spi_slave_hd_deinit(TEST_SLAVE_HOST);
printf("test done\n");
}
free(mst_tx);
free(mst_rx);
free(slv_tx);
free(slv_rx);
}
#endif //CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE
+3 -1
View File
@@ -47,7 +47,9 @@ void gpio_hal_iomux_out(gpio_hal_context_t *hal, uint32_t gpio_num, int func)
void gpio_hal_matrix_in(gpio_hal_context_t *hal, uint32_t gpio_num, uint32_t signal_idx, bool in_inv)
{
gpio_ll_input_enable(hal->dev, gpio_num);
if (gpio_num < GPIO_NUM_MAX) { // skip const_0/1 io num from enabling input
gpio_ll_input_enable(hal->dev, gpio_num);
}
#if HAL_CONFIG(GPIO_USE_ROM_API)
esp_rom_gpio_connect_in_signal(gpio_num, signal_idx, in_inv);
#else
@@ -172,6 +172,23 @@ void spi_slave_hal_user_start(const spi_slave_hal_context_t *hal);
*/
bool spi_slave_hal_usr_is_done(spi_slave_hal_context_t* hal);
/**
* Get SPI interrupt bits status by mask
*
* @param hal Context of the HAL layer.
* @param mask Mask of the interrupt bits to check.
* @return True if the masked interrupts are set, false otherwise.
*/
bool spi_slave_hal_get_intr_status(spi_slave_hal_context_t *hal, uint32_t mask);
/**
* Clear SPI interrupt bits by mask
*
* @param hal Context of the HAL layer.
* @param mask Mask of the interrupt bits to clear.
*/
void spi_slave_hal_clear_intr_status(spi_slave_hal_context_t *hal, uint32_t mask);
/**
* Post transaction operations, fetch data from the buffer and recorded the length.
*
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -138,6 +138,15 @@ void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_h
*/
bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t* hal, spi_event_t ev);
/**
* @brief Check and clear the interrupt by mask
*
* @param hal Context of the HAL layer
* @param mask Mask of the interrupt bits to check
* @return True if the masked interrupts are set, false otherwise
*/
bool spi_slave_hd_hal_check_clear_intr(spi_slave_hd_hal_context_t *hal, uint32_t mask);
/**
* @brief Check and clear the interrupt of one event.
*
@@ -99,3 +99,15 @@ bool spi_slave_hal_dma_need_reset(const spi_slave_hal_context_t *hal)
#endif // SPI_LL_SLAVE_NEEDS_RESET_WORKAROUND
return ret;
}
#if SOC_SPI_SUPPORT_SLAVE_HD_VER2
bool spi_slave_hal_get_intr_status(spi_slave_hal_context_t *hal, uint32_t mask)
{
return spi_ll_get_intr(hal->hw, mask);
}
void spi_slave_hal_clear_intr_status(spi_slave_hal_context_t *hal, uint32_t mask)
{
spi_ll_clear_intr(hal->hw, mask);
}
#endif
+9 -5
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -154,16 +154,20 @@ static spi_ll_intr_t get_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t
return intr;
}
bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
bool spi_slave_hd_hal_check_clear_intr(spi_slave_hd_hal_context_t *hal, uint32_t mask)
{
spi_ll_intr_t intr = get_event_intr(hal, ev);
if (spi_ll_get_intr(hal->dev, intr)) {
spi_ll_clear_intr(hal->dev, intr);
if (spi_ll_get_intr(hal->dev, mask)) {
spi_ll_clear_intr(hal->dev, mask);
return true;
}
return false;
}
bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
{
return spi_slave_hd_hal_check_clear_intr(hal, get_event_intr(hal, ev));
}
bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
{
//The trans_done interrupt is used for the workaround when some interrupt is not writable
@@ -852,6 +852,10 @@ Please note that the ISR is disabled during flash operation by default. To keep
4. ``cs_ena_pretrans`` is not compatible with the Command and Address phases of full-duplex transactions.
.. only:: esp32
5. If DMA is enabled, the RX buffer should be word-aligned (starting from a 32-bit boundary and having a length of multiples of 4 bytes). Otherwise, DMA may overwrite the data in the unaligned part.
Application Examples
--------------------
@@ -79,6 +79,14 @@ As not every transaction requires both writing and reading data, you can choose
A Host should not start a transaction before its Device is ready for receiving data. It is recommended to use another GPIO pin for a handshake signal to sync the Devices. For more details, see :ref:`transaction_interval`.
.. only:: SOC_PSRAM_DMA_CAPABLE
Using PSRAM for DMA transfer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SPI Slave driver supports using PSRAM for DMA transfer. Directly passing a PSRAM address as :cpp:member:`spi_slave_transaction_t::tx_buffer` or :cpp:member:`spi_slave_transaction_t::rx_buffer` is supported. For the rx_buffer, it has alignment requirements, using :cpp:func:`heap_caps_malloc` to allocate memory can automatically handle the alignment requirements. For the buffers that you can not control, you can also use the :c:macro:`SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO` flag to enable driver to automatically align the buffer from PSRAM.
Note that this feature shares the MSPI bus bandwidth (bus frequency * bus width), so the transmission bandwidth of the host to this device should be less than the PSRAM bandwidth, otherwise **data may be lost**, and the ``spi_slave_transmit`` function will return the :c:macro:`ESP_ERR_INVALID_STATE` error.
Driver Usage
------------
@@ -63,6 +63,12 @@ Send/Receive Data by DMA Channels
To send data to the master through the sending DMA channel, the application should properly wrap the data in an :cpp:type:`spi_slave_hd_data_t` descriptor structure before calling :cpp:func:`spi_slave_hd_queue_trans` with the data descriptor and the channel argument of :cpp:enumerator:`SPI_SLAVE_CHAN_TX`. The pointers to descriptors are stored in the queue, and the data is sent to the master in the same order they are enqueued using :cpp:func:`spi_slave_hd_queue_trans`, upon receiving the master's ``Rd_DMA`` command.
.. only:: SOC_PSRAM_DMA_CAPABLE
The driver supports using PSRAM for DMA transfer. Directly passing a PSRAM address as :cpp:member:`spi_slave_hd_data_t::data` is supported. For the DMA receive channel, its memory address and transfer length have alignment requirements, using :cpp:func:`heap_caps_malloc` to allocate memory can automatically handle the alignment requirements. For the buffers that you can not control, you can also use the :c:macro:`SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO` flag to enable driver to automatically align the buffer from PSRAM.
Note that this feature shares the MSPI bus bandwidth (bus frequency * bus width), so the transmission bandwidth of the host to this device should be less than the PSRAM bandwidth, otherwise **data may be lost**, and then getting the transmission result will return the :c:macro:`ESP_ERR_INVALID_STATE` error.
The application should check the result of data sending by calling :cpp:func:`spi_slave_hd_get_trans_res` with the channel set as :cpp:enumerator:`SPI_SLAVE_CHAN_TX`. This function blocks until the transaction with the command ``Rd_DMA`` from the master successfully completes (or timeout). The ``out_trans`` argument of the function outputs the pointer of the data descriptor which is just finished, providing information about the sending.
Receiving data from the master through the receiving DMA channel is quite similar. The application calls :cpp:func:`spi_slave_hd_queue_trans` with proper data descriptor and the channel argument of :cpp:enumerator:`SPI_SLAVE_CHAN_RX`. And the application calls the :cpp:func:`spi_slave_hd_get_trans_res` later to get the descriptor to the receiving buffer before it handles the data in the receiving buffer.
@@ -852,6 +852,10 @@ GPSPI 外设的时钟源可以通过设置 :cpp:member:`spi_device_interface_con
4. 全双工传输事务模式中,命令阶段和地址阶段与 ``cs_ena_pretrans`` 不兼容。
.. only:: esp32
5. 若启用了 DMA,则 RX 缓冲区应该以字对齐(从 32 位边界开始,字节长度为 4 的倍数)。否则 DMA 可能覆盖未对齐部分的数据。
应用示例
-------------------
@@ -79,6 +79,14 @@ SPI 传输事务
主机应在从机设备准备好接收数据之后再进行传输事务。建议使用另外一个 GPIO 管脚作为握手信号来同步设备。更多细节,请参阅 :ref:`transaction_interval`
.. only:: SOC_PSRAM_DMA_CAPABLE
使用 PSRAM 的传输
^^^^^^^^^^^^^^^^^^
SPI Slave 驱动程序支持使用 PSRAM 进行传输。直接传入 PSRAM 地址作为 :cpp:member:`spi_slave_transaction_t::tx_buffer`:cpp:member:`spi_slave_transaction_t::rx_buffer` 即可。对于 rx_buffer ,其地址和传输长度有对齐要求,使用 :cpp:func:`heap_caps_malloc` 分配内存可以自动处理对齐要求。对于不能控制的内存,也可以使用 :c:macro:`SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO` 标志位,驱动会自动从 PSRAM 重新分配满足要求的内存。
请注意该功能共享 MSPI 总线带宽(总线频率 * 总线位宽),因此主机对该设备的传输带宽应小于 PSRAM 带宽,否则 **可能会丢失传输数据**,此时 ``spi_slave_transmit`` 函数将会返回 :c:macro:`ESP_ERR_INVALID_STATE` 错误。
使用驱动程序
------------
@@ -63,6 +63,12 @@ SPI 从机半双工模式
要通过 DMA 通道向主设备发送数据,应用程序需要先将数据正确地封装在 :cpp:type:`spi_slave_hd_data_t` 描述符结构体中,然后再将数据描述符和通道参数 :cpp:enumerator:`SPI_SLAVE_CHAN_TX` 传递给 :cpp:func:`spi_slave_hd_queue_trans`。数据描述符的指针存储在队列中,一旦接收到主设备的 Rd_DMA 命令,就会按照调用 :cpp:func:`spi_slave_hd_queue_trans` 时数据进入队列的顺序,依次将数据发送给主设备。
.. only:: SOC_PSRAM_DMA_CAPABLE
驱动程序支持使用 PSRAM 进行传输。直接传入 PSRAM 地址作为 :cpp:member:`spi_slave_hd_data_t::data` 即可。对于 DMA 接收通道,其内存地址和传输长度有对齐要求,使用 :cpp:func:`heap_caps_malloc` 分配内存可以自动处理对齐要求。对于不能控制的内存,也可以使用 :c:macro:`SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO` 标志位,驱动会自动从 PSRAM 重新分配满足要求的内存。
请注意该功能共享 MSPI 总线带宽(总线频率 * 总线位宽),因此主机对该设备的传输带宽应小于 PSRAM 带宽,否则 **可能会丢失传输数据**,此时获取传输结果会返回 :c:macro:`ESP_ERR_INVALID_STATE` 错误。
应用程序需要检查数据发送的结果。为此,应用程序可以调用 :cpp:func:`spi_slave_hd_get_trans_res`,并将通道参数设置为 :cpp:enumerator:`SPI_SLAVE_CHAN_TX`。该函数将阻塞程序,直到主设备发起的 Rd_DMA 命令事务成功完成或超时。函数中的参数 ``out_trans`` 将输出刚刚完成的数据描述符的指针,从而提供有关已完成的发送操作的信息。
通过 DMA 通道从主设备接收数据的操作与发送数据类似。应用程序需要使用正确的数据描述符调用 :cpp:func:`spi_slave_hd_queue_trans`,并将通道参数设置为 :cpp:enumerator:`SPI_SLAVE_CHAN_RX`。随后,应用程序调用 :cpp:func:`spi_slave_hd_get_trans_res` 获取接收 buffer 的描述符,然后处理接收 buffer 中的数据。