diff --git a/components/esp_driver_spi/include/driver/spi_slave_hd.h b/components/esp_driver_spi/include/driver/spi_slave_hd.h index df9a10517f..73271a2043 100644 --- a/components/esp_driver_spi/include/driver/spi_slave_hd.h +++ b/components/esp_driver_spi/include/driver/spi_slave_hd.h @@ -166,7 +166,7 @@ esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t c * - ESP_OK: on success * - ESP_ERR_INVALID_ARG: Function is not valid * - ESP_ERR_TIMEOUT: There's no transaction done before timeout - * - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under segment mode. + * - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under segment mode. Or DMA hardware over/underflow occurred. */ esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, uint32_t timeout); @@ -223,7 +223,7 @@ esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t * - ESP_OK: on success * - ESP_ERR_INVALID_ARG: Function is not valid * - ESP_ERR_TIMEOUT: There's no transaction done before timeout - * - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under append mode. + * - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under append mode. Or DMA hardware over/underflow occurred. */ esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, uint32_t timeout); diff --git a/components/esp_driver_spi/src/gpspi/spi_slave_hd.c b/components/esp_driver_spi/src/gpspi/spi_slave_hd.c index 543db39fcc..2876afd060 100644 --- a/components/esp_driver_spi/src/gpspi/spi_slave_hd.c +++ b/components/esp_driver_spi/src/gpspi/spi_slave_hd.c @@ -40,6 +40,7 @@ typedef struct { spi_slave_hd_data_t *trans; //original trans void *aligned_buffer; //actually trans buffer (re-malloced if needed) + bool dma_hw_error; //true if DMA hardware over/underflow occurred } spi_slave_hd_trans_priv_t; typedef struct { @@ -48,7 +49,6 @@ typedef struct { spi_bus_attr_t* bus_attr; _Atomic spi_bus_fsm_t fsm; spi_dma_ctx_t *dma_ctx; - uint16_t internal_mem_align_size; portMUX_TYPE int_spinlock; intr_handle_t intr; intr_handle_t intr_dma; @@ -162,14 +162,6 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b host->hal.dmadesc_rx[i].desc = &host->dma_ctx->dmadesc_rx[i]; } -#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE - size_t alignment; - esp_cache_get_alignment(MALLOC_CAP_DMA, &alignment); - host->internal_mem_align_size = alignment; -#else - host->internal_mem_align_size = 4; -#endif - ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, NULL); if (ret != ESP_OK) { goto cleanup; @@ -422,6 +414,25 @@ static inline SPI_SLAVE_ISR_ATTR BaseType_t intr_check_clear_callback(spi_slave_ } return cb_awoken; } +static void SPI_SLAVE_ISR_ATTR spi_slave_hd_tx_dma_error_check(spi_slave_hd_slot_t *host, spi_slave_hd_trans_priv_t priv_trans) +{ +#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM //error checks only for psram dma + if (esp_ptr_external_ram(priv_trans.aligned_buffer) && spi_slave_hd_hal_check_clear_intr(&host->hal, SPI_LL_INTR_OUT_EMPTY)) { + priv_trans.dma_hw_error = true; + ESP_DRAM_LOGE(TAG, "DMA TX underflow detected"); + } +#endif +} +static void SPI_SLAVE_ISR_ATTR spi_slave_hd_rx_dma_error_check(spi_slave_hd_slot_t *host, spi_slave_hd_trans_priv_t priv_trans) +{ +#if SOC_PSRAM_DMA_CAPABLE && CONFIG_SPIRAM //error checks only for psram dma + if (esp_ptr_external_ram(priv_trans.aligned_buffer) && spi_slave_hd_hal_check_clear_intr(&host->hal, SPI_LL_INTR_IN_FULL)) { + priv_trans.dma_hw_error = true; + ESP_DRAM_LOGE(TAG, "DMA RX overflow detected"); + } +#endif +} + static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_segment_isr(void *arg) { spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t *)arg; @@ -446,6 +457,7 @@ static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_segment_isr(void *arg) portEXIT_CRITICAL_ISR(&host->int_spinlock); if (tx_done) { + spi_slave_hd_tx_dma_error_check(host, host->tx_curr_trans); bool ret_queue = true; if (callback->cb_sent) { spi_slave_hd_event_t ev = { @@ -464,15 +476,9 @@ static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_segment_isr(void *arg) host->tx_curr_trans.trans = NULL; } if (rx_done) { + spi_slave_hd_rx_dma_error_check(host, host->rx_curr_trans); bool ret_queue = true; host->rx_curr_trans.trans->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal); -#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible - uint16_t alignment = host->internal_mem_align_size; - uint32_t buff_len = (host->rx_curr_trans.trans->len + alignment - 1) & (~(alignment - 1)); - esp_err_t ret = esp_cache_msync((void *)host->rx_curr_trans.aligned_buffer, buff_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C); - assert(ret == ESP_OK); - (void)ret; -#endif if (callback->cb_recv) { spi_slave_hd_event_t ev = { .event = SPI_EV_RECV, @@ -553,7 +559,7 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_tx_isr(void *arg) BaseType_t awoken = pdFALSE; BaseType_t ret __attribute__((unused)); - spi_slave_hd_trans_priv_t ret_priv_trans; + spi_slave_hd_trans_priv_t ret_priv_trans = {}; while (1) { bool trans_finish = false; trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&ret_priv_trans.trans, &ret_priv_trans.aligned_buffer); @@ -565,6 +571,7 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_tx_isr(void *arg) portEXIT_CRITICAL_ISR(&host->int_spinlock); bool ret_queue = true; + spi_slave_hd_tx_dma_error_check(host, ret_priv_trans); if (callback->cb_sent) { spi_slave_hd_event_t ev = { .event = SPI_EV_SEND, @@ -596,7 +603,7 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_rx_isr(void *arg) BaseType_t awoken = pdFALSE; BaseType_t ret __attribute__((unused)); - spi_slave_hd_trans_priv_t ret_priv_trans; + spi_slave_hd_trans_priv_t ret_priv_trans = {}; size_t trans_len; while (1) { bool trans_finish = false; @@ -609,14 +616,8 @@ static SPI_SLAVE_ISR_ATTR void spi_slave_hd_append_rx_isr(void *arg) portEXIT_CRITICAL_ISR(&host->int_spinlock); ret_priv_trans.trans->trans_len = trans_len; -#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible - uint16_t alignment = host->internal_mem_align_size; - uint32_t buff_len = (ret_priv_trans.trans->len + alignment - 1) & (~(alignment - 1)); - esp_err_t ret = esp_cache_msync((void *)ret_priv_trans.aligned_buffer, buff_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C); - assert(ret == ESP_OK); - (void)ret; -#endif bool ret_queue = true; + spi_slave_hd_rx_dma_error_check(host, ret_priv_trans); if (callback->cb_recv) { spi_slave_hd_event_t ev = { .event = SPI_EV_RECV, @@ -685,7 +686,6 @@ static SPI_SLAVE_ISR_ATTR void s_spi_slave_hd_append_legacy_isr(void *arg) static void s_spi_slave_hd_destroy_priv_trans(spi_host_device_t host, spi_slave_hd_trans_priv_t *priv_trans, spi_slave_chan_t chan) { -#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE spi_slave_hd_data_t *orig_trans = priv_trans->trans; if (priv_trans->aligned_buffer != orig_trans->data) { if (chan == SPI_SLAVE_CHAN_RX) { @@ -693,39 +693,19 @@ static void s_spi_slave_hd_destroy_priv_trans(spi_host_device_t host, spi_slave_ } free(priv_trans->aligned_buffer); } -#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE } static esp_err_t s_spi_slave_hd_setup_priv_trans(spi_host_device_t host, spi_slave_hd_trans_priv_t *priv_trans, spi_slave_chan_t chan) { spi_slave_hd_data_t *orig_trans = priv_trans->trans; - priv_trans->aligned_buffer = orig_trans->data; - -#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE - uint16_t alignment = spihost[host]->internal_mem_align_size; - uint32_t byte_len = orig_trans->len; - - if (((uint32_t)orig_trans->data | byte_len) & (alignment - 1)) { - ESP_RETURN_ON_FALSE(orig_trans->flags & SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, TAG, "data buffer addr&len not align to %d byte, or not dma_capable", alignment); - byte_len = (byte_len + alignment - 1) & (~(alignment - 1)); // up align to alignment - ESP_LOGD(TAG, "Re-allocate %s buffer of len %" PRIu32 " for DMA", (chan == SPI_SLAVE_CHAN_TX) ? "TX" : "RX", byte_len); - priv_trans->aligned_buffer = heap_caps_aligned_alloc(64, byte_len, MALLOC_CAP_DMA); - if (priv_trans->aligned_buffer == NULL) { - return ESP_ERR_NO_MEM; - } + bool auto_malloc = (orig_trans->flags & SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO); + bool is_tx = (chan == SPI_SLAVE_CHAN_TX); + esp_err_t ret = spicommon_dma_setup_priv_buffer(host, (uint32_t *)orig_trans->data, orig_trans->len, is_tx, true, auto_malloc, (uint32_t **)&priv_trans->aligned_buffer); + if (ret != ESP_OK) { + s_spi_slave_hd_destroy_priv_trans(host, priv_trans, chan); + return ret; } - if (chan == SPI_SLAVE_CHAN_TX) { - ESP_COMPILER_DIAGNOSTIC_PUSH_IGNORE("-Wanalyzer-overlapping-buffers") // TODO IDF-11086 - memcpy(priv_trans->aligned_buffer, orig_trans->data, orig_trans->len); - ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-overlapping-buffers") - esp_err_t ret = esp_cache_msync((void *)priv_trans->aligned_buffer, byte_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M); - ESP_RETURN_ON_FALSE(ESP_OK == ret, ESP_ERR_INVALID_STATE, TAG, "mem sync c2m(writeback) fail"); - } else { - esp_err_t ret = esp_cache_msync((void *)priv_trans->aligned_buffer, byte_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C); - ESP_RETURN_ON_FALSE(ESP_OK == ret, ESP_ERR_INVALID_STATE, TAG, "mem sync m2c(invalid) fail"); - } -#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE return ESP_OK; } @@ -746,7 +726,7 @@ static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_ s_spi_slave_hd_destroy_priv_trans(host_id, &hd_priv_trans, chan); *out_trans = hd_priv_trans.trans; - return ESP_OK; + return hd_priv_trans.dma_hw_error ? ESP_ERR_INVALID_STATE : ESP_OK; } esp_err_t s_spi_slave_hd_append_txdma(spi_slave_hd_slot_t *host, uint8_t *data, size_t len, void *arg) @@ -837,12 +817,11 @@ esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t c spi_slave_hd_slot_t *host = spihost[host_id]; SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE); - SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG); SPIHD_CHECK(trans->len <= host->bus_attr->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG); SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG); spi_slave_hd_trans_priv_t hd_priv_trans = {.trans = trans}; - SPIHD_CHECK(ESP_OK == s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), "No mem to allocate new cache buffer", ESP_ERR_NO_MEM); + ESP_RETURN_ON_ERROR(s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), TAG, "Setup dma buffer failed"); if (chan == SPI_SLAVE_CHAN_TX) { BaseType_t ret = xQueueSend(host->tx_trans_queue, &hd_priv_trans, timeout); @@ -890,12 +869,11 @@ esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG); SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE); - SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG); SPIHD_CHECK(trans->len <= host->bus_attr->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG); SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG); spi_slave_hd_trans_priv_t hd_priv_trans = {.trans = trans}; - SPIHD_CHECK(ESP_OK == s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), "No mem to allocate new cache buffer", ESP_ERR_NO_MEM); + ESP_RETURN_ON_ERROR(s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), TAG, "Setup dma buffer failed"); if (chan == SPI_SLAVE_CHAN_TX) { BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout); diff --git a/components/esp_driver_spi/test_apps/param/main/test_spi_param.c b/components/esp_driver_spi/test_apps/param/main/test_spi_param.c index 160c711e10..b52f4c3887 100644 --- a/components/esp_driver_spi/test_apps/param/main/test_spi_param.c +++ b/components/esp_driver_spi/test_apps/param/main/test_spi_param.c @@ -1567,15 +1567,18 @@ static void test_slave_hd_dma(void) test_fill_random_to_buffers_dualboard(985 + mode + speed_level + i, slave_expect, slave_send, TEST_STEP_LEN); uint32_t test_trans_len = TEST_STEP_LEN; - spi_slave_hd_data_t *ret_trans, slave_trans = { + spi_slave_hd_data_t *ret_trans, slave_tx = { .data = slave_send, .len = test_trans_len, .flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, + }, slave_rx = { + .data = slave_receive, + .len = test_trans_len, + .flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, }; unity_send_signal("Slave ready"); - TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_trans, portMAX_DELAY)); - slave_trans.data = slave_receive; - TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_trans, portMAX_DELAY)); + TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_tx, portMAX_DELAY)); + TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_rx, portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY)); @@ -1677,16 +1680,19 @@ static void test_slave_hd_no_dma(void) test_fill_random_to_buffers_dualboard(911 + mode + speed_level + i, slave_expect, slave_send, SOC_SPI_MAXIMUM_BUFFER_SIZE); uint32_t test_trans_len = SOC_SPI_MAXIMUM_BUFFER_SIZE; - spi_slave_hd_data_t *ret_trans, slave_trans = { + spi_slave_hd_data_t *ret_trans, slave_tx = { .data = slave_send, .len = test_trans_len, .flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, + }, slave_rx = { + .data = slave_receive, + .len = test_trans_len, + .flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, }; - TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_trans, portMAX_DELAY)); + TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_tx, portMAX_DELAY)); unity_send_signal("Slave ready"); TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY)); - slave_trans.data = slave_receive; - TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_trans, portMAX_DELAY)); + TEST_ESP_OK(spi_slave_hd_queue_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_rx, portMAX_DELAY)); unity_send_signal("Slave ready"); TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY)); diff --git a/components/esp_driver_spi/test_apps/slave_hd/main/test_spi_slave_hd.c b/components/esp_driver_spi/test_apps/slave_hd/main/test_spi_slave_hd.c index 4ff0817d02..9bf1b7061e 100644 --- a/components/esp_driver_spi/test_apps/slave_hd/main/test_spi_slave_hd.c +++ b/components/esp_driver_spi/test_apps/slave_hd/main/test_spi_slave_hd.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2021-2026 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -1085,3 +1085,66 @@ TEST_CASE("test_spi_slave_hd_append_sleep_retention", "[spi]") #endif } #endif //SOC_LIGHT_SLEEP_SUPPORTED + +#if CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE +// function pointers for segment and append mode +static esp_err_t (*hd_trans[2])(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, uint32_t timeout) = { + spi_slave_hd_queue_trans, spi_slave_hd_append_trans +}; +static esp_err_t (*hd_get_trans_res[2])(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, uint32_t timeout) = { + spi_slave_hd_get_trans_res, spi_slave_hd_get_append_trans_res +}; + +#define TEST_PSRAM_TRANS_LEN 1000 +TEST_CASE("test slave hd edma segment and append mode", "[spi]") +{ + uint8_t *mst_tx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_DEFAULT); + uint8_t *mst_rx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_DEFAULT); + uint8_t *slv_tx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_SPIRAM); + uint8_t *slv_rx = heap_caps_malloc(TEST_PSRAM_TRANS_LEN, MALLOC_CAP_SPIRAM); + spi_slave_hd_data_t *ret_trans, tx_data = { + .data = slv_tx, + .len = TEST_PSRAM_TRANS_LEN, + }, rx_data = { + .data = slv_rx, + .len = TEST_PSRAM_TRANS_LEN, + .flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, + }; + + for (int i = 0; i < 2; i++) { + printf("\ntest slave hd edma %s mode\n", i ? "append" : "segment"); + test_fill_random_to_buffers_dualboard(i + 1, mst_tx, slv_tx, TEST_PSRAM_TRANS_LEN); + + spi_bus_config_t bus_cfg = SPI_BUS_TEST_DEFAULT_CONFIG(); + bus_cfg.max_transfer_sz = 4092 * 4; // append mode require at least 2 for tx and 2 for rx dma descs + bus_cfg.flags |= SPICOMMON_BUSFLAG_GPIO_PINS; + spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG(); + slave_hd_cfg.flags |= i ? SPI_SLAVE_HD_APPEND_MODE : 0; + TEST_ESP_OK(spi_slave_hd_init(TEST_SLAVE_HOST, &bus_cfg, &slave_hd_cfg)); + same_pin_func_sel(0, TEST_SLAVE_HOST, bus_cfg, slave_hd_cfg.spics_io_num); + vTaskDelay(1); + + TEST_ESP_OK(hd_trans[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &tx_data, portMAX_DELAY)); + TEST_ESP_OK(hd_trans[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &rx_data, portMAX_DELAY)); + + // tx append transaction + printf("tx %d bytes\n", TEST_PSRAM_TRANS_LEN); + essl_sspi_hd_dma_trans_seg(bus_cfg, slave_hd_cfg.spics_io_num, 0, false, mst_tx, TEST_PSRAM_TRANS_LEN, -1); + TEST_ESP_OK(hd_get_trans_res[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY)); + + // rx append transaction + printf("rx %d bytes\n", TEST_PSRAM_TRANS_LEN); + essl_sspi_hd_dma_trans_seg(bus_cfg, slave_hd_cfg.spics_io_num, 0, true, mst_rx, TEST_PSRAM_TRANS_LEN, -1); + TEST_ESP_OK(hd_get_trans_res[i](TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY)); + + spitest_cmp_or_dump(slv_rx, mst_tx, TEST_PSRAM_TRANS_LEN); + spitest_cmp_or_dump(mst_rx, slv_tx, TEST_PSRAM_TRANS_LEN); + spi_slave_hd_deinit(TEST_SLAVE_HOST); + printf("test done\n"); + } + free(mst_tx); + free(mst_rx); + free(slv_tx); + free(slv_rx); +} +#endif //CONFIG_SPIRAM && SOC_PSRAM_DMA_CAPABLE diff --git a/components/esp_hal_gpspi/include/hal/spi_slave_hd_hal.h b/components/esp_hal_gpspi/include/hal/spi_slave_hd_hal.h index b718a8be6d..225e2f283a 100644 --- a/components/esp_hal_gpspi/include/hal/spi_slave_hd_hal.h +++ b/components/esp_hal_gpspi/include/hal/spi_slave_hd_hal.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -138,6 +138,15 @@ void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_h */ bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t* hal, spi_event_t ev); +/** + * @brief Check and clear the interrupt by mask + * + * @param hal Context of the HAL layer + * @param mask Mask of the interrupt bits to check + * @return True if the masked interrupts are set, false otherwise + */ +bool spi_slave_hd_hal_check_clear_intr(spi_slave_hd_hal_context_t *hal, uint32_t mask); + /** * @brief Check and clear the interrupt of one event. * diff --git a/components/esp_hal_gpspi/spi_slave_hd_hal.c b/components/esp_hal_gpspi/spi_slave_hd_hal.c index 8d2816e197..d6278a0421 100644 --- a/components/esp_hal_gpspi/spi_slave_hd_hal.c +++ b/components/esp_hal_gpspi/spi_slave_hd_hal.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -154,16 +154,20 @@ static spi_ll_intr_t get_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t return intr; } -bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev) +bool spi_slave_hd_hal_check_clear_intr(spi_slave_hd_hal_context_t *hal, uint32_t mask) { - spi_ll_intr_t intr = get_event_intr(hal, ev); - if (spi_ll_get_intr(hal->dev, intr)) { - spi_ll_clear_intr(hal->dev, intr); + if (spi_ll_get_intr(hal->dev, mask)) { + spi_ll_clear_intr(hal->dev, mask); return true; } return false; } +bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev) +{ + return spi_slave_hd_hal_check_clear_intr(hal, get_event_intr(hal, ev)); +} + bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev) { //The trans_done interrupt is used for the workaround when some interrupt is not writable diff --git a/docs/en/api-reference/peripherals/spi_slave_hd.rst b/docs/en/api-reference/peripherals/spi_slave_hd.rst index e849c700fe..9453a3c952 100644 --- a/docs/en/api-reference/peripherals/spi_slave_hd.rst +++ b/docs/en/api-reference/peripherals/spi_slave_hd.rst @@ -63,6 +63,12 @@ Send/Receive Data by DMA Channels To send data to the master through the sending DMA channel, the application should properly wrap the data in an :cpp:type:`spi_slave_hd_data_t` descriptor structure before calling :cpp:func:`spi_slave_hd_queue_trans` with the data descriptor and the channel argument of :cpp:enumerator:`SPI_SLAVE_CHAN_TX`. The pointers to descriptors are stored in the queue, and the data is sent to the master in the same order they are enqueued using :cpp:func:`spi_slave_hd_queue_trans`, upon receiving the master's ``Rd_DMA`` command. +.. only:: SOC_PSRAM_DMA_CAPABLE + + The driver supports using PSRAM for DMA transfer. Directly passing a PSRAM address as :cpp:member:`spi_slave_hd_data_t::data` is supported. For the DMA receive channel, its memory address and transfer length have alignment requirements, using :cpp:func:`heap_caps_malloc` to allocate memory can automatically handle the alignment requirements. For the buffers that you can not control, you can also use the :c:macro:`SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO` flag to enable driver to automatically align the buffer from PSRAM. + + Note that this feature shares the MSPI bus bandwidth (bus frequency * bus width), so the transmission bandwidth of the host to this device should be less than the PSRAM bandwidth, otherwise **data may be lost**, and then getting the transmission result will return the :c:macro:`ESP_ERR_INVALID_STATE` error. + The application should check the result of data sending by calling :cpp:func:`spi_slave_hd_get_trans_res` with the channel set as :cpp:enumerator:`SPI_SLAVE_CHAN_TX`. This function blocks until the transaction with the command ``Rd_DMA`` from the master successfully completes (or timeout). The ``out_trans`` argument of the function outputs the pointer of the data descriptor which is just finished, providing information about the sending. Receiving data from the master through the receiving DMA channel is quite similar. The application calls :cpp:func:`spi_slave_hd_queue_trans` with proper data descriptor and the channel argument of :cpp:enumerator:`SPI_SLAVE_CHAN_RX`. And the application calls the :cpp:func:`spi_slave_hd_get_trans_res` later to get the descriptor to the receiving buffer before it handles the data in the receiving buffer. diff --git a/docs/zh_CN/api-reference/peripherals/spi_slave_hd.rst b/docs/zh_CN/api-reference/peripherals/spi_slave_hd.rst index e1ce48cb33..bb7b79e18e 100644 --- a/docs/zh_CN/api-reference/peripherals/spi_slave_hd.rst +++ b/docs/zh_CN/api-reference/peripherals/spi_slave_hd.rst @@ -63,6 +63,12 @@ SPI 从机半双工模式 要通过 DMA 通道向主设备发送数据,应用程序需要先将数据正确地封装在 :cpp:type:`spi_slave_hd_data_t` 描述符结构体中,然后再将数据描述符和通道参数 :cpp:enumerator:`SPI_SLAVE_CHAN_TX` 传递给 :cpp:func:`spi_slave_hd_queue_trans`。数据描述符的指针存储在队列中,一旦接收到主设备的 Rd_DMA 命令,就会按照调用 :cpp:func:`spi_slave_hd_queue_trans` 时数据进入队列的顺序,依次将数据发送给主设备。 +.. only:: SOC_PSRAM_DMA_CAPABLE + + 驱动程序支持使用 PSRAM 进行传输。直接传入 PSRAM 地址作为 :cpp:member:`spi_slave_hd_data_t::data` 即可。对于 DMA 接收通道,其内存地址和传输长度有对齐要求,使用 :cpp:func:`heap_caps_malloc` 分配内存可以自动处理对齐要求。对于不能控制的内存,也可以使用 :c:macro:`SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO` 标志位,驱动会自动从 PSRAM 重新分配满足要求的内存。 + + 请注意该功能共享 MSPI 总线带宽(总线频率 * 总线位宽),因此主机对该设备的传输带宽应小于 PSRAM 带宽,否则 **可能会丢失传输数据**,此时获取传输结果会返回 :c:macro:`ESP_ERR_INVALID_STATE` 错误。 + 应用程序需要检查数据发送的结果。为此,应用程序可以调用 :cpp:func:`spi_slave_hd_get_trans_res`,并将通道参数设置为 :cpp:enumerator:`SPI_SLAVE_CHAN_TX`。该函数将阻塞程序,直到主设备发起的 Rd_DMA 命令事务成功完成或超时。函数中的参数 ``out_trans`` 将输出刚刚完成的数据描述符的指针,从而提供有关已完成的发送操作的信息。 通过 DMA 通道从主设备接收数据的操作与发送数据类似。应用程序需要使用正确的数据描述符调用 :cpp:func:`spi_slave_hd_queue_trans`,并将通道参数设置为 :cpp:enumerator:`SPI_SLAVE_CHAN_RX`。随后,应用程序调用 :cpp:func:`spi_slave_hd_get_trans_res` 获取接收 buffer 的描述符,然后处理接收 buffer 中的数据。