Merge branch 'feature/update_gdbstub_v5.5' into 'release/v5.5'

gdbstub: backport fixes (v5.5)

See merge request espressif/esp-idf!46201
This commit is contained in:
Jiang Jiang Jian
2026-03-31 14:15:09 +08:00
17 changed files with 1427 additions and 317 deletions
+2 -1
View File
@@ -18,7 +18,8 @@ if(CONFIG_IDF_TARGET_ARCH_XTENSA)
list(APPEND priv_includes "src/port/xtensa/include")
elseif(CONFIG_IDF_TARGET_ARCH_RISCV)
list(APPEND srcs "src/port/riscv/gdbstub_riscv.c"
"src/port/riscv/rv_decode.c")
"src/port/riscv/rv_decode.c"
"src/port/riscv/target_xml.c")
list(APPEND priv_includes "src/port/riscv/include")
endif()
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -91,6 +91,13 @@ void gdbstub_handle_uart_int(esp_gdbstub_frame_t *regs_frame);
* @param dst pointer to the GDB register file
*/
void esp_gdbstub_tcb_to_regfile(TaskHandle_t tcb, esp_gdbstub_gdb_regfile_t *dst);
/**
* Find the TCB that owns the given exception frame
* @param frame pointer to the exception frame
* @return pointer to the TCB, or NULL if not found
*/
const StaticTask_t *esp_gdbstub_find_tcb_by_frame(const esp_gdbstub_frame_t *frame);
#endif // CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
@@ -133,6 +140,9 @@ void esp_gdbstub_send_char(char c);
/** Send a string as part of the packet */
void esp_gdbstub_send_str(const char *s);
/** Send a string of limited length as part of a packet */
void esp_gdbstub_send_str_n(const char *c, size_t len);
/** Send a hex value as part of the packet */
void esp_gdbstub_send_hex(int val, int bits);
@@ -159,10 +169,19 @@ void esp_gdbstub_clear_step(void);
void esp_gdbstub_do_step(esp_gdbstub_frame_t *regs_frame);
void esp_gdbstub_trigger_cpu(void);
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
/**
* Check if a watchpoint triggered the current debug exception.
* @param[out] addr Address of the triggered watchpoint (only valid if return is true)
* @return true if a watchpoint triggered, false otherwise (breakpoint/step/other)
*/
bool esp_gdbstub_get_watchpoint_trigger_addr(uint32_t *addr);
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
/**
* Write a value to register in frame
* @param frame gdbstub frame
* @param reg_index register index, depends on architecture
* @param value 32 bit data value
*/
void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t value);
void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t *value_ptr);
+148 -28
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -24,6 +24,16 @@
#include "freertos/task.h"
#include "sdkconfig.h"
#if GDBSTUB_QXFER_FEATURES_ENABLED
#define GDBSTUB_QXFER_SUPPORTED_STR ";qXfer:features:read+"
#else
#define GDBSTUB_QXFER_SUPPORTED_STR ""
#endif
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
static void send_watchpoint_reason(void);
#endif
#ifdef CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
static inline int gdb_tid_to_task_index(int tid);
static inline int task_index_to_gdb_tid(int tid);
@@ -105,6 +115,9 @@ static void send_reason(void)
esp_gdbstub_send_start();
esp_gdbstub_send_char('T');
esp_gdbstub_send_hex(s_scratch.signal, 8);
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
send_watchpoint_reason();
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
esp_gdbstub_send_end();
}
@@ -193,7 +206,9 @@ int getActiveTaskNum(void);
int __swrite(struct _reent *, void *, const char *, int);
int gdbstub__swrite(struct _reent *data1, void *data2, const char *buff, int len);
volatile esp_gdbstub_frame_t *temp_regs_frame;
volatile esp_gdbstub_frame_t *selected_task_frame; /* related to task that has been chosen via GDB */
volatile esp_gdbstub_frame_t *running_task_frame; /* related to task that was interrupted. GDBStub implements all-stop mode,
and this frame is needed to continue executing the task that was interrupted. */
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
static int bp_count = 0;
@@ -208,6 +223,35 @@ static bool not_send_reason = false;
static bool process_gdb_kill = false;
static bool gdb_debug_int = false;
/**
* Detect if a watchpoint triggered and append the corresponding
* GDB RSP stop-reply field (watch/rwatch/awatch) to the current packet.
*/
static void send_watchpoint_reason(void)
{
uint32_t wp_addr = 0;
if (!esp_gdbstub_get_watchpoint_trigger_addr(&wp_addr)) {
return;
}
const char *type_str = "watch";
for (size_t i = 0; i < SOC_CPU_WATCHPOINTS_NUM; i++) {
if (wp_list[i] == wp_addr) {
if (wp_access[i] == ESP_CPU_WATCHPOINT_LOAD) {
type_str = "rwatch";
} else if (wp_access[i] == ESP_CPU_WATCHPOINT_ACCESS) {
type_str = "awatch";
}
break;
}
}
esp_gdbstub_send_str(type_str);
esp_gdbstub_send_char(':');
esp_gdbstub_send_hex(wp_addr, 32);
esp_gdbstub_send_char(';');
}
/**
* @brief Handle UART interrupt
*
@@ -220,7 +264,7 @@ static bool gdb_debug_int = false;
*/
void gdbstub_handle_uart_int(esp_gdbstub_frame_t *regs_frame)
{
temp_regs_frame = regs_frame;
running_task_frame = selected_task_frame = regs_frame;
not_send_reason = step_in_progress;
if (step_in_progress == true) {
esp_gdbstub_send_str_packet("S05");
@@ -297,7 +341,7 @@ void gdbstub_handle_debug_int(esp_gdbstub_frame_t *regs_frame)
{
bp_count = 0;
wp_count = 0;
temp_regs_frame = regs_frame;
running_task_frame = selected_task_frame = regs_frame;
gdb_debug_int = true;
not_send_reason = step_in_progress;
if (step_in_progress == true) {
@@ -367,6 +411,9 @@ void gdbstub_handle_debug_int(esp_gdbstub_frame_t *regs_frame)
* */
void esp_gdbstub_init(void)
{
#ifdef CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
s_scratch.paniced_task_index = GDBSTUB_CUR_TASK_INDEX_UNKNOWN;
#endif
esp_intr_alloc(ETS_UART0_INTR_SOURCE, 0, esp_gdbstub_int, NULL, NULL);
esp_gdbstub_init_dports();
}
@@ -374,6 +421,36 @@ void esp_gdbstub_init(void)
#ifdef CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
const StaticTask_t *esp_gdbstub_find_tcb_by_frame(const esp_gdbstub_frame_t *frame)
{
/*
* Determine which task owns the current frame.
* Perform a search across all tasks, as GDBstub may not include task information
* if configured with ESP_GDBSTUB_SUPPORT_TASKS disabled.
*/
TaskIterator_t xTaskIter = {0}; /* Point to the first task list */
while (xTaskGetNext(&xTaskIter) != -1) {
StaticTask_t *tcb = (StaticTask_t *)xTaskIter.pxTaskHandle;
/*
* For the currently running task, pxTopOfStack is not up-to-date — it is only
* updated on the next context switch. Therefore we cannot rely on it to match
* the frame to a task. Instead, check if the frame lies within the task's stack.
*/
if ((uintptr_t)frame >= (uintptr_t)tcb->pxDummy6 /* pxStack */ &&
(uintptr_t)frame <= (uintptr_t)tcb->pxDummy8 /* pxEndOfStack */) {
return tcb;
}
}
/*
* If no task stack contains the frame, it is likely allocated on the interrupt/exception
* stack (e.g. during a panic event). In that case, return the current task handle,
* which is the task that was running on this core when the exception occurred.
*/
return (const StaticTask_t *)xTaskGetCurrentTaskHandle();
}
/** Send string as a het to uart */
static void esp_gdbstub_send_str_as_hex(const char *str)
{
@@ -654,7 +731,7 @@ static void handle_S_command(const unsigned char *cmd, int len)
static void handle_s_command(const unsigned char *cmd, int len)
{
step_in_progress = true;
esp_gdbstub_do_step((esp_gdbstub_frame_t *)temp_regs_frame);
esp_gdbstub_do_step((esp_gdbstub_frame_t *)running_task_frame);
}
/** Step ... */
@@ -667,31 +744,37 @@ static void handle_C_command(const unsigned char *cmd, int len)
/* Set Register ... */
static void handle_P_command(const unsigned char *cmd, int len)
{
uint32_t reg_index = 0;
if (cmd[1] == '=') {
reg_index = esp_gdbstub_gethex(&cmd, 4);
cmd++;
} else if (cmd[2] == '=') {
reg_index = esp_gdbstub_gethex(&cmd, 8);
cmd++;
cmd++;
} else {
esp_gdbstub_send_str_packet("E02");
uint32_t reg_index = esp_gdbstub_gethex(&cmd, -1);
if (*cmd != '=') {
esp_gdbstub_send_str_packet("E.unexpected P packet format");
return;
}
uint32_t addr = esp_gdbstub_gethex(&cmd, -1);
/* The address comes with inverted byte order.*/
uint8_t *addr_ptr = (uint8_t *)&addr;
uint32_t p_address = 0;
uint8_t *p_addr_ptr = (uint8_t *)&p_address;
p_addr_ptr[3] = addr_ptr[0];
p_addr_ptr[2] = addr_ptr[1];
p_addr_ptr[1] = addr_ptr[2];
p_addr_ptr[0] = addr_ptr[3];
cmd++; /* skip '=' */
esp_gdbstub_set_register((esp_gdbstub_frame_t *)temp_regs_frame, reg_index, p_address);
/* In general, we operate with 32-bit sized values here.
* However, some registers may be larger. For example, q registers are 128-bit sized. */
#if GDBSTUB_MAX_REGISTER_SIZE > 4
uint8_t value[GDBSTUB_MAX_REGISTER_SIZE * sizeof(uint32_t)] = {0};
uint32_t *value_ptr = (uint32_t *)value;
for(int i = 0; i < sizeof(value); i++) {
value[i] = (uint8_t) esp_gdbstub_gethex(&cmd, 8);
if (*cmd == 0)
break;
}
#else
uint32_t value;
uint32_t *value_ptr = &value;
value = gdbstub_hton(esp_gdbstub_gethex(&cmd, -1));
#endif
if (*cmd != 0) {
esp_gdbstub_send_str_packet("E.unexpected register size");
return;
}
esp_gdbstub_set_register((esp_gdbstub_frame_t *)selected_task_frame, reg_index, value_ptr);
/* Convert current register file to GDB*/
esp_gdbstub_frame_to_regfile((esp_gdbstub_frame_t *)temp_regs_frame, gdb_local_regfile);
esp_gdbstub_frame_to_regfile((esp_gdbstub_frame_t *)selected_task_frame, gdb_local_regfile);
/* Sen OK response*/
esp_gdbstub_send_str_packet("OK");
}
@@ -701,10 +784,42 @@ static void handle_P_command(const unsigned char *cmd, int len)
static void handle_qSupported_command(const unsigned char *cmd, int len)
{
esp_gdbstub_send_start();
esp_gdbstub_send_str("qSupported:multiprocess+;swbreak-;hwbreak+;qRelocInsn+;fork-events+;vfork-events+;exec-events+;vContSupported+;no-resumed+");
esp_gdbstub_send_str("qSupported:multiprocess+;swbreak-;hwbreak+;qRelocInsn+;fork-events+;vfork-events+;exec-events+;vContSupported+;no-resumed+" GDBSTUB_QXFER_SUPPORTED_STR);
esp_gdbstub_send_end();
}
#if GDBSTUB_QXFER_FEATURES_ENABLED
static void qXfer_data(const char *ptr, uint32_t size, uint32_t offset, uint32_t length)
{
if (offset >= size) {
/* No data to send. */
esp_gdbstub_send_str_packet("l");
} else {
size_t len = MIN(length, size - offset);
esp_gdbstub_send_start();
esp_gdbstub_send_char('m');
esp_gdbstub_send_str_n(ptr + offset, len);
esp_gdbstub_send_end();
}
}
static void handle_qXfer_command(const unsigned char *cmd, int len)
{
uint32_t offset;
uint32_t length;
const char *target_feature_str = "qXfer:features:read:target.xml:";
const int target_feature_str_len = strlen(target_feature_str);
if (!command_name_matches(target_feature_str, cmd, target_feature_str_len)) {
/* Send empty packet for not supported requests. */
esp_gdbstub_send_str_packet(NULL);
}
cmd += target_feature_str_len;
offset = esp_gdbstub_gethex(&cmd, -1);
cmd++; /* skip ',' */
length = esp_gdbstub_gethex(&cmd, -1);
qXfer_data(target_xml, strlen(target_xml), offset, length);
}
#endif // GDBSTUB_QXFER_FEATURES_ENABLED
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
/** Handle a command received from gdb */
@@ -782,6 +897,10 @@ int esp_gdbstub_handle_command(unsigned char *cmd, int len)
return GDBSTUB_ST_CONT;
} else if (command_name_matches("qSupported", cmd, 10)) {
handle_qSupported_command(cmd, len);
#if GDBSTUB_QXFER_FEATURES_ENABLED
} else if (command_name_matches("qXfer", cmd, 5)) {
handle_qXfer_command(cmd, len);
#endif // GDBSTUB_QXFER_FEATURES_ENABLED
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
#if CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
} else if (s_scratch.state != GDBSTUB_TASK_SUPPORT_DISABLED) {
@@ -964,12 +1083,13 @@ static void set_active_task(size_t index)
esp_gdbstub_frame_to_regfile(&s_scratch.paniced_frame, &s_scratch.regfile);
} else {
/* Get the registers from TCB.
* FIXME: for the task currently running on the other CPU, extracting the registers from TCB
* TODO: IDF-12550. For the task currently running on the other CPU, extracting the registers from TCB
* isn't valid. Need to use some IPC mechanism to obtain the registers of the other CPU.
*/
TaskHandle_t handle = NULL;
get_task_handle(index, &handle);
if (handle != NULL) {
selected_task_frame = ((StaticTask_t *)handle)->pxDummy1 /* pxTopOfStack */;
esp_gdbstub_tcb_to_regfile(handle, &s_scratch.regfile);
}
}
+12 -2
View File
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -33,6 +33,16 @@ void esp_gdbstub_send_char(char c)
}
}
// Send a string of limited length as part of a packet
void esp_gdbstub_send_str_n(const char *c, size_t len)
{
while (*c != 0 && len != 0) {
esp_gdbstub_send_char(*c);
c++;
len--;
}
}
// Send a string as part of a packet
void esp_gdbstub_send_str(const char *c)
{
@@ -84,7 +94,7 @@ uint32_t esp_gdbstub_gethex(const unsigned char **ptr, int bits)
char c;
no = bits / 4;
if (bits == -1) {
no = 64;
no = 32 / 4;
}
for (i = 0; i < no; i++) {
c = **ptr;
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -7,12 +7,18 @@
#include <string.h>
#include "esp_gdbstub.h"
#include "esp_gdbstub_common.h"
#include "soc/soc_caps.h"
#include "esp_cpu.h"
#include "esp_ipc_isr.h"
#include "rv_decode.h"
#include "sdkconfig.h"
#include "esp_private/crosscore_int.h"
#include "esp_private/freertos_debug.h"
#include "freertos/portmacro.h"
#include "freertos/FreeRTOS.h"
#include "freertos/FreeRTOSConfig.h"
#define GDBSTUB_CSA_NOT_INITIALIZED ((void *) 0xFFFFFFFF)
extern volatile esp_gdbstub_frame_t *temp_regs_frame;
@@ -21,6 +27,162 @@ static inline void init_regfile(esp_gdbstub_gdb_regfile_t *dst)
memset(dst, 0, sizeof(*dst));
}
#if SOC_CPU_HAS_FPU
static void esp_gdbstub_fpu_regs_to_regfile (esp_gdbstub_gdb_regfile_t *dst)
{
/*
* NOTE: The GDB stub logic executes within an ISR, where coprocessors are disabled.
* Therefore, we must enable the coprocessor before reading or writing its registers.
*/
rv_utils_enable_fpu();
__asm__ volatile ("fsw ft0, %0" : "=m" (dst->f.ft0));
__asm__ volatile ("fsw ft1, %0" : "=m" (dst->f.ft1));
__asm__ volatile ("fsw ft2, %0" : "=m" (dst->f.ft2));
__asm__ volatile ("fsw ft3, %0" : "=m" (dst->f.ft3));
__asm__ volatile ("fsw ft4, %0" : "=m" (dst->f.ft4));
__asm__ volatile ("fsw ft5, %0" : "=m" (dst->f.ft5));
__asm__ volatile ("fsw ft6, %0" : "=m" (dst->f.ft6));
__asm__ volatile ("fsw ft7, %0" : "=m" (dst->f.ft7));
__asm__ volatile ("fsw fs0, %0" : "=m" (dst->f.fs0));
__asm__ volatile ("fsw fs1, %0" : "=m" (dst->f.fs1));
__asm__ volatile ("fsw fa0, %0" : "=m" (dst->f.fa0));
__asm__ volatile ("fsw fa1, %0" : "=m" (dst->f.fa1));
__asm__ volatile ("fsw fa2, %0" : "=m" (dst->f.fa2));
__asm__ volatile ("fsw fa3, %0" : "=m" (dst->f.fa3));
__asm__ volatile ("fsw fa4, %0" : "=m" (dst->f.fa4));
__asm__ volatile ("fsw fa5, %0" : "=m" (dst->f.fa5));
__asm__ volatile ("fsw fa6, %0" : "=m" (dst->f.fa6));
__asm__ volatile ("fsw fa7, %0" : "=m" (dst->f.fa7));
__asm__ volatile ("fsw fs2, %0" : "=m" (dst->f.fs2));
__asm__ volatile ("fsw fs3, %0" : "=m" (dst->f.fs3));
__asm__ volatile ("fsw fs4, %0" : "=m" (dst->f.fs4));
__asm__ volatile ("fsw fs5, %0" : "=m" (dst->f.fs5));
__asm__ volatile ("fsw fs6, %0" : "=m" (dst->f.fs6));
__asm__ volatile ("fsw fs7, %0" : "=m" (dst->f.fs7));
__asm__ volatile ("fsw fs8, %0" : "=m" (dst->f.fs8));
__asm__ volatile ("fsw fs9, %0" : "=m" (dst->f.fs9));
__asm__ volatile ("fsw fs10, %0" : "=m" (dst->f.fs10));
__asm__ volatile ("fsw fs11, %0" : "=m" (dst->f.fs11));
__asm__ volatile ("fsw ft8, %0" : "=m" (dst->f.ft8));
__asm__ volatile ("fsw ft9, %0" : "=m" (dst->f.ft9));
__asm__ volatile ("fsw ft10, %0" : "=m" (dst->f.ft10));
__asm__ volatile ("fsw ft11, %0" : "=m" (dst->f.ft11));
__asm__ volatile ("csrr %0, fcsr" : "=r" (dst->f.fcsr));
rv_utils_disable_fpu();
}
#endif
#if SOC_CPU_HAS_PIE
static void esp_gdbstub_pie_regs_to_regfile (esp_gdbstub_gdb_regfile_t *dst)
{
/*
* NOTE: The GDB stub logic executes within an ISR, where coprocessors are disabled.
* Therefore, we must enable the coprocessor before reading or writing its registers.
*/
rv_utils_enable_pie();
register void* ptr asm("a5") = &dst->pie;
__asm__ volatile ("esp.vst.128.ip q0, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q1, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q2, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q3, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q4, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q5, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q6, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q7, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.l.l.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.l.h.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.h.l.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.h.h.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.ua.state.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.s.xacc.ip %0, 0" :: "r" (ptr));
rv_utils_disable_pie();
}
#endif
#if SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE
static void *esp_gdbstub_coproc_saved_area(const StaticTask_t *tcb, int coproc, bool is_read) {
/*
* Coprocessors have lazy register saving mechanism:
* 1. Defer saving coprocessor registers until a task actually uses a coprocessor instruction.
* This triggers an exception when the instruction is executed.
* 2. In the exception handler:
* - Enable the coprocessor and designate the task as the new coprocessor owner.
* - If another task previously owned the coprocessor, save its registers.
* - Restore the coprocessor registers for the new owner before execution resumes.
*
*
* To determine whether to read/write coprocessor registers directly or use stack memory:
* - Check if the current task is the coprocessor owner:
* - Yes: Perform direct read/write operations.
* - No: Read/write from the stack.
*/
RvCoprocSaveArea* pxPortGetCoprocArea(StaticTask_t* task, bool allocate, int coproc);
RvCoprocSaveArea* csa;
extern StaticTask_t* port_uxCoprocOwner[portNUM_PROCESSORS][SOC_CPU_COPROC_NUM];
uint32_t core = rv_utils_get_core_id();
uint32_t coproc_bit;
if (port_uxCoprocOwner[core][coproc] == tcb) {
return NULL;
}
csa = pxPortGetCoprocArea((StaticTask_t*)tcb, false, coproc);
if (csa->sa_coprocs[coproc]) {
return csa->sa_coprocs[coproc];
}
if (is_read) {
/* Don't allocate csa for coprocessor for read. (Just return zeroed registers) */
return GDBSTUB_CSA_NOT_INITIALIZED;
}
/*
* Ignore 'csa->sa_enable' flag to read/write from the frame if the coprocessor has not been used by this task yet.
* This ensures that the registers are correctly restored when the task's coprocessor context is switched.
* That's why true is passed to the allocate parameter of pxPortGetCoprocArea.
*/
/* TODO: IDF-12550. Provide correct read/write access for coprocessor owned by another CPU.
* Accessing registers in stack-frame is not correct in this case.
*/
csa = pxPortGetCoprocArea((StaticTask_t*)tcb, true, coproc);
coproc_bit = 1 << coproc;
if (!(csa->sa_enable & coproc_bit)) {
uint32_t sa_coproc_size = coproc == 0 ? RV_COPROC0_SIZE : (coproc == 1 ? RV_COPROC1_SIZE : RV_COPROC2_SIZE);
/* coproc registers were not saved for this task yet. Initialize with zeroes. */
memset(csa->sa_coprocs[coproc], 0, sa_coproc_size);
csa->sa_enable |= coproc_bit;
}
return csa->sa_coprocs[coproc];
}
static void esp_gdbstub_coproc_regs_to_regfile(const esp_gdbstub_frame_t *frame, esp_gdbstub_gdb_regfile_t *dst)
{
const StaticTask_t *tcb = esp_gdbstub_find_tcb_by_frame(frame);
void *csa;
#if SOC_CPU_HAS_FPU
csa = esp_gdbstub_coproc_saved_area(tcb, FPU_COPROC_IDX, true);
if (csa == NULL) {
esp_gdbstub_fpu_regs_to_regfile(dst);
} else if (csa == GDBSTUB_CSA_NOT_INITIALIZED) {
memset(&dst->f, 0, sizeof(dst->f));
} else {
memcpy(&dst->f, csa, sizeof(dst->f));
}
#endif
#if SOC_CPU_HAS_PIE
csa = esp_gdbstub_coproc_saved_area(tcb, PIE_COPROC_IDX, true);
if (csa == NULL) {
esp_gdbstub_pie_regs_to_regfile(dst);
} else if (csa == GDBSTUB_CSA_NOT_INITIALIZED) {
memset(&dst->pie, 0, sizeof(dst->pie));
} else {
memcpy(&dst->pie, csa, sizeof(dst->pie));
}
#endif
}
#endif /* SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE */
void esp_gdbstub_frame_to_regfile(const esp_gdbstub_frame_t *frame, esp_gdbstub_gdb_regfile_t *dst)
{
init_regfile(dst);
@@ -29,28 +191,21 @@ void esp_gdbstub_frame_to_regfile(const esp_gdbstub_frame_t *frame, esp_gdbstub_
// We omit register x0 here since it's the zero register and always hard-wired to 0.
// See The RISC-V Instruction Set Manual Volume I: Unprivileged ISA Document Version 20191213 for more details.
memcpy(&(dst->x[1]), &frame->ra, sizeof(uint32_t) * 31);
#if SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE
esp_gdbstub_coproc_regs_to_regfile(frame, dst);
#endif
}
#if CONFIG_ESP_GDBSTUB_SUPPORT_TASKS || CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
/* Represents FreeRTOS TCB structure */
typedef struct {
uint8_t *top_of_stack;
/* Other members aren't needed */
} dummy_tcb_t;
#if CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
void esp_gdbstub_tcb_to_regfile(TaskHandle_t tcb, esp_gdbstub_gdb_regfile_t *dst)
{
const dummy_tcb_t *dummy_tcb = (const dummy_tcb_t *) tcb;
const RvExcFrame *frame = (RvExcFrame *) dummy_tcb->top_of_stack;
const RvExcFrame *frame = (RvExcFrame *) ((StaticTask_t *) tcb)->pxDummy1 /* top_of_stack */;
esp_gdbstub_frame_to_regfile(frame, dst);
}
#endif // CONFIG_ESP_GDBSTUB_SUPPORT_TASKS
#endif // CONFIG_ESP_GDBSTUB_SUPPORT_TASKS || CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
int esp_gdbstub_get_signal(const esp_gdbstub_frame_t *frame)
{
@@ -87,14 +242,30 @@ void esp_gdbstub_int(__attribute__((unused)) void *frame)
/* Pointer to saved frame is in pxCurrentTCB
* See rtos_int_enter function
*/
dummy_tcb_t *tcb = (dummy_tcb_t *)pvTaskGetCurrentTCBForCore(esp_cpu_get_core_id());
gdbstub_handle_uart_int((esp_gdbstub_frame_t *)tcb->top_of_stack);
StaticTask_t *tcb = (StaticTask_t *)pvTaskGetCurrentTCBForCore(esp_cpu_get_core_id());
gdbstub_handle_uart_int((esp_gdbstub_frame_t *)tcb->pxDummy1 /* top_of_stack */);
}
void esp_gdbstub_init_dports(void)
{
}
bool esp_gdbstub_get_watchpoint_trigger_addr(uint32_t *addr)
{
for (int i = 0; i < SOC_CPU_BREAKPOINTS_NUM; i++) {
RV_WRITE_CSR(tselect, i);
uint32_t tdata1 = RV_READ_CSR(tdata1);
bool is_load = tdata1 & TDATA1_LOAD;
bool is_store = tdata1 & TDATA1_STORE;
bool is_exec = tdata1 & TDATA1_EXECUTE;
if (!is_exec && (is_load || is_store)) {
*addr = RV_READ_CSR(tdata2);
return true;
}
}
return false;
}
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
#if (!CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE) && CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
@@ -152,8 +323,168 @@ void esp_gdbstub_trigger_cpu(void)
#endif
}
void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t value)
#if SOC_CPU_HAS_FPU
void esp_gdbstub_set_fpu_register(uint32_t reg_index, uint32_t value)
{
/*
* NOTE: The GDB stub logic executes within an ISR, where coprocessors are disabled.
* Therefore, we must enable the coprocessor before reading or writing its registers.
*/
rv_utils_enable_fpu();
if (reg_index == 0) {
__asm__ volatile ("flw ft0, %0" : "=m" (value));
} else if (reg_index == 1) {
__asm__ volatile ("flw ft1, %0" : "=m" (value));
} else if (reg_index == 2) {
__asm__ volatile ("flw ft2, %0" : "=m" (value));
} else if (reg_index == 3) {
__asm__ volatile ("flw ft3, %0" : "=m" (value));
} else if (reg_index == 4) {
__asm__ volatile ("flw ft4, %0" : "=m" (value));
} else if (reg_index == 5) {
__asm__ volatile ("flw ft5, %0" : "=m" (value));
} else if (reg_index == 6) {
__asm__ volatile ("flw ft6, %0" : "=m" (value));
} else if (reg_index == 7) {
__asm__ volatile ("flw ft7, %0" : "=m" (value));
} else if (reg_index == 8) {
__asm__ volatile ("flw fs0, %0" : "=m" (value));
} else if (reg_index == 9) {
__asm__ volatile ("flw fs1, %0" : "=m" (value));
} else if (reg_index == 10) {
__asm__ volatile ("flw fa0, %0" : "=m" (value));
} else if (reg_index == 11) {
__asm__ volatile ("flw fa1, %0" : "=m" (value));
} else if (reg_index == 12) {
__asm__ volatile ("flw fa2, %0" : "=m" (value));
} else if (reg_index == 13) {
__asm__ volatile ("flw fa3, %0" : "=m" (value));
} else if (reg_index == 14) {
__asm__ volatile ("flw fa4, %0" : "=m" (value));
} else if (reg_index == 15) {
__asm__ volatile ("flw fa5, %0" : "=m" (value));
} else if (reg_index == 16) {
__asm__ volatile ("flw fa6, %0" : "=m" (value));
} else if (reg_index == 17) {
__asm__ volatile ("flw fa7, %0" : "=m" (value));
} else if (reg_index == 18) {
__asm__ volatile ("flw fs2, %0" : "=m" (value));
} else if (reg_index == 19) {
__asm__ volatile ("flw fs3, %0" : "=m" (value));
} else if (reg_index == 20) {
__asm__ volatile ("flw fs4, %0" : "=m" (value));
} else if (reg_index == 21) {
__asm__ volatile ("flw fs5, %0" : "=m" (value));
} else if (reg_index == 22) {
__asm__ volatile ("flw fs6, %0" : "=m" (value));
} else if (reg_index == 23) {
__asm__ volatile ("flw fs7, %0" : "=m" (value));
} else if (reg_index == 24) {
__asm__ volatile ("flw fs8, %0" : "=m" (value));
} else if (reg_index == 25) {
__asm__ volatile ("flw fs9, %0" : "=m" (value));
} else if (reg_index == 26) {
__asm__ volatile ("flw fs10, %0" : "=m" (value));
} else if (reg_index == 27) {
__asm__ volatile ("flw fs11, %0" : "=m" (value));
} else if (reg_index == 28) {
__asm__ volatile ("flw ft8, %0" : "=m" (value));
} else if (reg_index == 29) {
__asm__ volatile ("flw ft9, %0" : "=m" (value));
} else if (reg_index == 30) {
__asm__ volatile ("flw ft10, %0" : "=m" (value));
} else if (reg_index == 31) {
__asm__ volatile ("flw ft11, %0" : "=m" (value));
} else if (reg_index == 32) {
__asm__ volatile ("csrw fcsr, %0" : : "r" (value));
}
rv_utils_disable_fpu();
}
#endif /* SOC_CPU_HAS_FPU */
#if SOC_CPU_HAS_PIE
void esp_gdbstub_set_pie_register(uint32_t reg_index, uint32_t *value_ptr)
{
/*
* NOTE: The GDB stub logic executes within an ISR, where coprocessors are disabled.
* Therefore, we must enable the coprocessor before reading or writing its registers.
*/
register void *ptr asm("a5") = value_ptr;
rv_utils_enable_pie();
if (reg_index == 0) {
__asm__ volatile ("esp.vld.128.ip q0, %0, 0" :: "r" (ptr));
} else if (reg_index == 1) {
__asm__ volatile ("esp.vld.128.ip q1, %0, 0" :: "r" (ptr));
} else if (reg_index == 2) {
__asm__ volatile ("esp.vld.128.ip q2, %0, 0" :: "r" (ptr));
} else if (reg_index == 3) {
__asm__ volatile ("esp.vld.128.ip q3, %0, 0" :: "r" (ptr));
} else if (reg_index == 4) {
__asm__ volatile ("esp.vld.128.ip q4, %0, 0" :: "r" (ptr));
} else if (reg_index == 5) {
__asm__ volatile ("esp.vld.128.ip q5, %0, 0" :: "r" (ptr));
} else if (reg_index == 6) {
__asm__ volatile ("esp.vld.128.ip q6, %0, 0" :: "r" (ptr));
} else if (reg_index == 7) {
__asm__ volatile ("esp.vld.128.ip q7, %0, 0" :: "r" (ptr));
} else if (reg_index == 8) {
__asm__ volatile ("esp.ld.qacc.l.l.128.ip %0, 0" :: "r" (ptr));
} else if (reg_index == 9) {
__asm__ volatile ("esp.ld.qacc.l.h.128.ip %0, 0" :: "r" (ptr));
} else if (reg_index == 10) {
__asm__ volatile ("esp.ld.qacc.h.l.128.ip %0, 0" :: "r" (ptr));
} else if (reg_index == 11) {
__asm__ volatile ("esp.ld.qacc.h.h.128.ip %0, 0" :: "r" (ptr));
} else if (reg_index == 12) {
__asm__ volatile ("esp.ld.ua.state.ip %0, 0" :: "r" (ptr));
} else if (reg_index == 13) {
__asm__ volatile ("esp.ld.xacc.ip %0, 0" :: "r" (ptr));
}
rv_utils_disable_pie();
}
#endif /* SOC_CPU_HAS_PIE */
#if SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE
void esp_gdbstub_set_coproc_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t *value_ptr) {
const StaticTask_t *tcb = esp_gdbstub_find_tcb_by_frame(frame);
uint32_t *csa; /* points to coprocessor registers data. */
#if SOC_CPU_HAS_FPU
if (reg_index >= 33 && reg_index <= 68) {
reg_index -= 33;
if (reg_index > 32 /* fcsr */) {
reg_index = 32;
}
csa = esp_gdbstub_coproc_saved_area(tcb, FPU_COPROC_IDX, false);
if (csa == NULL) {
esp_gdbstub_set_fpu_register(reg_index, *value_ptr);
} else {
csa[reg_index] = *value_ptr;
}
return;
}
#endif
#if SOC_CPU_HAS_PIE
if (reg_index >= 4211 && reg_index <= 4224) {
reg_index -= 4211;
csa = esp_gdbstub_coproc_saved_area(tcb, PIE_COPROC_IDX, false);
if (csa == NULL) {
esp_gdbstub_set_pie_register(reg_index, value_ptr);
} else {
if (reg_index < 13) { /* 128-bit registers: q, qacc, ua_state */
memcpy(&csa[reg_index * 4], value_ptr, sizeof(uint32_t) * 4);
} else { /* 40-bit regiseter: xacc */
memcpy(&csa[4 * 13], value_ptr, sizeof(uint8_t) * 5);
}
}
}
#endif
}
#endif /* SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE */
void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t *value_ptr)
{
uint32_t value = *value_ptr;
/* RISC-V base ISA has registers x0-x31 */
if (reg_index == 0) { /* skip zero-wired register */
return;
@@ -162,4 +493,7 @@ void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, ui
} else if (reg_index == 32) { /* register 32 is PC */
frame->mepc = value;
}
#if SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE
esp_gdbstub_set_coproc_register(frame, reg_index, value_ptr);
#endif
}
@@ -1,12 +1,22 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <assert.h>
#include <stdint.h>
#include "soc/soc_caps.h"
#include "riscv/rvruntime-frames.h"
#include "sdkconfig.h"
#if SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE
#define GDBSTUB_QXFER_FEATURES_ENABLED 1
#if SOC_CPU_HAS_PIE
#define GDBSTUB_MAX_REGISTER_SIZE 16
#endif
#endif
#ifdef __cplusplus
extern "C" {
@@ -14,12 +24,31 @@ extern "C" {
typedef RvExcFrame esp_gdbstub_frame_t;
#if SOC_CPU_HAS_FPU
static_assert(sizeof(RvFPUSaveArea) == (32 + 1) * sizeof(uint32_t),
"Expected 32 float registers + fcsr. Please update gdbstub internals.");
#endif
#if SOC_CPU_HAS_PIE
static_assert(sizeof(RvPIESaveArea) == ((8 + 4 + 1) * (4 * sizeof(uint32_t))) + (2 * sizeof(uint32_t)),
"Expected 8 Q regs, QACC, UA_STATE, XACC. Please update gdbstub internals.");
#endif
/* GDB regfile structure, configuration dependent */
typedef struct {
uint32_t x[32];
uint32_t pc;
#if SOC_CPU_HAS_FPU
RvFPUSaveArea f;
#endif
#if SOC_CPU_HAS_PIE
RvPIESaveArea pie;
#endif
} esp_gdbstub_gdb_regfile_t;
#if GDBSTUB_QXFER_FEATURES_ENABLED
extern const char target_xml[];
#endif
#ifdef __cplusplus
}
#endif
@@ -5,6 +5,7 @@
*/
#include "rv_decode.h"
#include "riscv/csr.h"
#include "riscv/csr_hwlp.h"
#include "soc/soc_caps.h"
@@ -0,0 +1,116 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/soc_caps.h"
#include "sdkconfig.h"
#if SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE
const char target_xml[] =
"<?xml version=\"1.0\"?>"
"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
"<target>"
"<architecture>riscv</architecture>"
"<feature name=\"org.gnu.gdb.riscv.cpu\">"
"<reg name=\"zero\" bitsize=\"32\"/>"
"<reg name=\"ra\" bitsize=\"32\" type=\"code_ptr\"/>"
"<reg name=\"sp\" bitsize=\"32\" type=\"data_ptr\"/>"
"<reg name=\"gp\" bitsize=\"32\" type=\"data_ptr\"/>"
"<reg name=\"tp\" bitsize=\"32\" type=\"data_ptr\"/>"
"<reg name=\"t0\" bitsize=\"32\"/>"
"<reg name=\"t1\" bitsize=\"32\"/>"
"<reg name=\"t2\" bitsize=\"32\"/>"
"<reg name=\"fp\" bitsize=\"32\" type=\"data_ptr\"/>"
"<reg name=\"s1\" bitsize=\"32\"/>"
"<reg name=\"a0\" bitsize=\"32\"/>"
"<reg name=\"a1\" bitsize=\"32\"/>"
"<reg name=\"a2\" bitsize=\"32\"/>"
"<reg name=\"a3\" bitsize=\"32\"/>"
"<reg name=\"a4\" bitsize=\"32\"/>"
"<reg name=\"a5\" bitsize=\"32\"/>"
"<reg name=\"a6\" bitsize=\"32\"/>"
"<reg name=\"a7\" bitsize=\"32\"/>"
"<reg name=\"s2\" bitsize=\"32\"/>"
"<reg name=\"s3\" bitsize=\"32\"/>"
"<reg name=\"s4\" bitsize=\"32\"/>"
"<reg name=\"s5\" bitsize=\"32\"/>"
"<reg name=\"s6\" bitsize=\"32\"/>"
"<reg name=\"s7\" bitsize=\"32\"/>"
"<reg name=\"s8\" bitsize=\"32\"/>"
"<reg name=\"s9\" bitsize=\"32\"/>"
"<reg name=\"s10\" bitsize=\"32\"/>"
"<reg name=\"s11\" bitsize=\"32\"/>"
"<reg name=\"t3\" bitsize=\"32\"/>"
"<reg name=\"t4\" bitsize=\"32\"/>"
"<reg name=\"t5\" bitsize=\"32\"/>"
"<reg name=\"t6\" bitsize=\"32\"/>"
"<reg name=\"pc\" bitsize=\"32\" type=\"code_ptr\"/>"
"</feature>"
#if SOC_CPU_HAS_FPU
"<feature name=\"org.gnu.gdb.riscv.fpu\">"
"<reg name=\"ft0\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft1\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft2\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft3\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft4\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft5\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft6\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft7\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs0\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs1\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa0\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa1\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa2\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa3\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa4\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa5\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa6\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fa7\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs2\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs3\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs4\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs5\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs6\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs7\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs8\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs9\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs10\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fs11\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft8\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft9\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft10\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"ft11\" bitsize=\"32\" type=\"float\"/>"
"<reg name=\"fcsr\" bitsize=\"32\" regnum=\"68\"/>"
"</feature>"
#endif /* SOC_CPU_HAS_FPU */
#if SOC_CPU_HAS_PIE
"<feature name=\"org.gnu.gdb.riscv.xesppie\">"
"<vector id=\"v5i8\" type=\"int8\" count=\"5\"/>"
"<vector id=\"v3i8\" type=\"int8\" count=\"3\"/>"
"<vector id=\"v4i32\" type=\"int32\" count=\"4\"/>"
"<vector id=\"v2i64\" type=\"int64\" count=\"2\"/>"
"<union id=\"vec128\">"
"<field name=\"v4_int32\" type=\"v4i32\"/>"
"<field name=\"v2_int64\" type=\"v2i64\"/>"
"<field name=\"uint128\" type=\"uint128\"/>"
"</union>"
"<reg name=\"q0\" bitsize=\"128\" type=\"vec128\" group=\"vector\" regnum=\"4211\"/>"
"<reg name=\"q1\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"q2\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"q3\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"q4\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"q5\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"q6\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"q7\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"qacc_l_l\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"qacc_l_h\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"qacc_h_l\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"qacc_h_h\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"ua_state\" bitsize=\"128\" type=\"vec128\" group=\"vector\"/>"
"<reg name=\"xacc\" bitsize=\"40\" type=\"v5i8\" group=\"vector\"/>"
"<reg name=\"misc\" bitsize=\"24\" type=\"v3i8\" group=\"vector\"/>" /* make GDB happy about "Truncated register 4207 in remote 'g' packet" */
"</feature>"
#endif /* SOC_CPU_HAS_PIE */
"</target>";
#endif
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -42,43 +42,135 @@ static void update_regfile_common(esp_gdbstub_gdb_regfile_t *dst)
#if XCHAL_HAVE_FP
/** @brief Read FPU registers to memory
*/
static void gdbstub_read_fpu_regs(void *data)
static void gdbstub_read_fpu_regs(xtensa_fpu_regs_t *fpu)
{
float *ptr0;
void *ptr1;
uint32_t tmp;
asm volatile ("mov %0, %1" : "=a" (ptr0) : "a" (data));
/* Read FPU registers from memory */
asm volatile ("ssi f0, %0, 0" :: "a" (&fpu->f[0]));
asm volatile ("ssi f1, %0, 0" :: "a" (&fpu->f[1]));
asm volatile ("ssi f2, %0, 0" :: "a" (&fpu->f[2]));
asm volatile ("ssi f3, %0, 0" :: "a" (&fpu->f[3]));
asm volatile ("ssi f4, %0, 0" :: "a" (&fpu->f[4]));
asm volatile ("ssi f5, %0, 0" :: "a" (&fpu->f[5]));
asm volatile ("ssi f6, %0, 0" :: "a" (&fpu->f[6]));
asm volatile ("ssi f7, %0, 0" :: "a" (&fpu->f[7]));
asm volatile ("ssi f8, %0, 0" :: "a" (&fpu->f[8]));
asm volatile ("ssi f9, %0, 0" :: "a" (&fpu->f[9]));
asm volatile ("ssi f10, %0, 0" :: "a" (&fpu->f[10]));
asm volatile ("ssi f11, %0, 0" :: "a" (&fpu->f[11]));
asm volatile ("ssi f12, %0, 0" :: "a" (&fpu->f[12]));
asm volatile ("ssi f13, %0, 0" :: "a" (&fpu->f[13]));
asm volatile ("ssi f14, %0, 0" :: "a" (&fpu->f[14]));
asm volatile ("ssi f15, %0, 0" :: "a" (&fpu->f[15]));
asm volatile ("rur.FCR %0" : "=a" (ptr1));
asm volatile ("s32i %0, %1, 64" : "=a" (ptr1) : "a" (ptr0));
asm volatile ("rur.FSR %0" : "=a" (ptr1));
asm volatile ("s32i %0, %1, 68" : "=a" (ptr1) : "a" (ptr0));
/* Read FCR and FSR from CPU registers */
asm volatile ("rur.FCR %0" : "=a" (tmp));
asm volatile ("s32i %0, %1, 0" : "=a" (tmp) : "a" (&fpu->fcr));
asm volatile ("rur.FSR %0" : "=a" (tmp));
asm volatile ("s32i %0, %1, 0" : "=a" (tmp) : "a" (&fpu->fsr));
}
asm volatile ("ssi f0, %0, 0" :: "a" (ptr0)); //*(ptr0 + 0) = f0;
asm volatile ("ssi f1, %0, 4" :: "a" (ptr0)); //*(ptr0 + 4) = f1;
asm volatile ("ssi f2, %0, 8" :: "a" (ptr0)); //...
asm volatile ("ssi f3, %0, 12" :: "a" (ptr0));
asm volatile ("ssi f4, %0, 16" :: "a" (ptr0));
asm volatile ("ssi f5, %0, 20" :: "a" (ptr0));
asm volatile ("ssi f6, %0, 24" :: "a" (ptr0));
asm volatile ("ssi f7, %0, 28" :: "a" (ptr0));
asm volatile ("ssi f8, %0, 32" :: "a" (ptr0));
asm volatile ("ssi f9, %0, 36" :: "a" (ptr0));
asm volatile ("ssi f10, %0, 40" :: "a" (ptr0));
asm volatile ("ssi f11, %0, 44" :: "a" (ptr0));
asm volatile ("ssi f12, %0, 48" :: "a" (ptr0));
asm volatile ("ssi f13, %0, 52" :: "a" (ptr0));
asm volatile ("ssi f14, %0, 56" :: "a" (ptr0));
asm volatile ("ssi f15, %0, 60" :: "a" (ptr0));
static void *esp_gdbstub_coproc_saved_area(void *tcb, int coproc)
{
/**
* Offset to start of the CPSA area on the stack. See uxInitialiseStackCPSA().
*/
extern const uint32_t offset_cpsa;
extern const uint32_t offset_pxEndOfStack;
extern uintptr_t _xt_coproc_owner_sa[portNUM_PROCESSORS][XCHAL_CP_MAX];
uint32_t core = esp_cpu_get_core_id();
uint16_t coproc_bit = 1 << coproc;
/*
* Calculate CP save area header pointer (same as get_cpsa_from_tcb macro):
* 1. Get pxEndOfStack from TCB
* 2. Subtract offset_cpsa
* 3. Align down to 16 bytes
*
* For more details refer to comments in uxInitialiseStackCPSA() in port.c.
*/
void *cpsa_header_ptr = *(void **)((char *)tcb + offset_pxEndOfStack);
cpsa_header_ptr = (char *)cpsa_header_ptr - offset_cpsa;
cpsa_header_ptr = (void *)((uintptr_t)cpsa_header_ptr & ~0xF);
/* For more details about fields, refer to comments in xtensa_context.h */
typedef struct {
uint16_t xt_cpenable;
uint16_t xt_cpstored;
uint16_t xt_cp_cs_st;
uint16_t dummy;
void *xt_cp_asa;
} cpsa_header_t;
cpsa_header_t *cpsa_header = (cpsa_header_t *)cpsa_header_ptr;
/* TODO: IDF-12550. Provide correct read access for coprocessor owned by another CPU.
* Accessing registers in stack-frame is not correct in this case.
*/
for (uint32_t i = 0; i < portNUM_PROCESSORS; i++) {
if (i == core) {
continue;
}
if (_xt_coproc_owner_sa[i][coproc] == (uintptr_t)cpsa_header) {
return cpsa_header->xt_cp_asa;
}
}
/* TODO IDF-15054:
* - Handle case when coprocessor instructions have not been called yet for this task
*/
if ((cpsa_header->xt_cpstored & coproc_bit) ||
(cpsa_header->xt_cp_cs_st & coproc_bit)) {
return cpsa_header->xt_cp_asa;
}
return NULL;
}
static uint32_t enable_coproc(int coproc)
{
bool fpu_enabled = false;
uint32_t cp_enabled;
RSR(CPENABLE, cp_enabled);
if (cp_enabled & (1 << coproc)) {
fpu_enabled = true;
}
if (!fpu_enabled) {
uint32_t new_cp_enabled = cp_enabled | (1 << coproc);
WSR(CPENABLE, new_cp_enabled);
}
return cp_enabled;
}
static void write_fpu_regs_to_regfile(void *tcb, esp_gdbstub_gdb_regfile_t *dst)
{
xtensa_fpu_regs_t *fpu_save_area = esp_gdbstub_coproc_saved_area(tcb, XCHAL_CP_ID_FPU);
/*
* In case of current thread is the owner of FPU, that means FPU registers was not stored to thread TCB.
* According to the lazy saving of FPU registers, we have to read from CPU registers.
*
* NOTE: FPU must be enabled before reading from CPU registers to avoid triggering exception.
*/
if (fpu_save_area == NULL) {
/* enable FPU first to avoid triggering exception */
uint32_t cp_enabled = enable_coproc(XCHAL_CP_ID_FPU);
/* Read FPU registers from CPU registers */
gdbstub_read_fpu_regs(&dst->fpu);
/* Restore FPU enabled state */
WSR(CPENABLE, cp_enabled);
} else {
/* FPU registers was stored to thread TCB, copy them to the register file */
memcpy (&dst->fpu, fpu_save_area, sizeof(dst->fpu));
}
}
#endif // XCHAL_HAVE_FP
extern const uint32_t offset_pxEndOfStack;
extern const uint32_t offset_cpsa; /* Offset to start of the CPSA area on the stack. See uxInitialiseStackCPSA(). */
extern uint32_t _xt_coproc_owner_sa[2];
void esp_gdbstub_frame_to_regfile(const esp_gdbstub_frame_t *frame, esp_gdbstub_gdb_regfile_t *dst)
{
init_regfile(dst);
@@ -102,34 +194,8 @@ void esp_gdbstub_frame_to_regfile(const esp_gdbstub_frame_t *frame, esp_gdbstub_
}
#if XCHAL_HAVE_FP
extern void *pxCurrentTCBs[2];
void *current_tcb_ptr = pxCurrentTCBs[0];
uint32_t *current_fpu_ptr = NULL;
#if !CONFIG_FREERTOS_UNICORE
current_tcb_ptr = pxCurrentTCBs[esp_cpu_get_core_id()];
#endif
uint32_t cp_enabled;
RSR(CPENABLE, cp_enabled);
// Check if the co-processor is enabled
if (cp_enabled) {
gdbstub_read_fpu_regs(dst->f);
} else {
current_tcb_ptr += offset_pxEndOfStack;
current_tcb_ptr = *(void **)current_tcb_ptr;
current_tcb_ptr -= offset_cpsa;
// Operation (&~0xf) required in .macro get_cpsa_from_tcb reg_A reg_B
current_tcb_ptr = (void*)((uint32_t)current_tcb_ptr&~0xf);
current_fpu_ptr = *(uint32_t **)(current_tcb_ptr + XT_CP_ASA);
dst->fcr = current_fpu_ptr[0];
dst->fsr = current_fpu_ptr[1];
for (int i = 0; i < 16; i++) {
dst->f[i] = current_fpu_ptr[i + 2];
}
}
extern void *pxCurrentTCBs[portNUM_PROCESSORS];
write_fpu_regs_to_regfile(pxCurrentTCBs[esp_cpu_get_core_id()], dst);
#endif //XCHAL_HAVE_FP
#if XCHAL_HAVE_LOOPS
dst->lbeg = frame->lbeg;
@@ -175,40 +241,7 @@ void esp_gdbstub_tcb_frame_to_regfile(dummy_tcb_t *tcb, esp_gdbstub_gdb_regfile_
}
#if XCHAL_HAVE_FP
uint32_t *current_xt_coproc_owner_sa = (uint32_t *)_xt_coproc_owner_sa[0];
#if !CONFIG_FREERTOS_UNICORE
current_xt_coproc_owner_sa = (uint32_t *)_xt_coproc_owner_sa[esp_cpu_get_core_id()];
#endif
uint32_t cp_enabled;
RSR(CPENABLE, cp_enabled);
void *current_tcb_ptr = tcb;
uint32_t *current_fpu_ptr = NULL;
{
current_tcb_ptr += offset_pxEndOfStack;
current_tcb_ptr = *(void **)current_tcb_ptr;
current_tcb_ptr -= offset_cpsa;
// Operation (&~0xf) required in .macro get_cpsa_from_tcb reg_A reg_B
current_tcb_ptr = (void*)((uint32_t)current_tcb_ptr&~0xf);
current_fpu_ptr = *(uint32_t **)(current_tcb_ptr + XT_CP_ASA);
bool use_fpu_regs = ((false == cp_enabled) && (current_xt_coproc_owner_sa[0] == 1) && (current_fpu_ptr == (uint32_t*)current_xt_coproc_owner_sa[2]));
dst->fcr = current_fpu_ptr[0];
dst->fsr = current_fpu_ptr[1];
for (int i = 0; i < 16; i++) {
dst->f[i] = current_fpu_ptr[i + 2];
}
/* We have situation when FPU is in use, but the context not stored
to the memory, and we have to read from CPU registers.
*/
if (use_fpu_regs) {
gdbstub_read_fpu_regs(dst->f);
}
}
write_fpu_regs_to_regfile(tcb, dst);
#endif // XCHAL_HAVE_FP
#if XCHAL_HAVE_LOOPS
@@ -274,6 +307,23 @@ void esp_gdbstub_init_dports(void)
{
}
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
bool esp_gdbstub_get_watchpoint_trigger_addr(uint32_t *addr)
{
uint32_t debugcause;
RSR(DEBUGCAUSE, debugcause);
if (debugcause & XCHAL_DEBUGCAUSE_DBREAK_MASK) {
if (debugcause & (1 << 8)) {
RSR(DBREAKA_1, *addr);
} else {
RSR(DBREAKA_0, *addr);
}
return true;
}
return false;
}
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
#if CONFIG_IDF_TARGET_ARCH_XTENSA && (!CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE) && CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
static bool stall_started = false;
#endif
@@ -344,17 +394,87 @@ void esp_gdbstub_trigger_cpu(void)
#endif
}
#if XCHAL_HAVE_FP
static void gdbstub_set_fpu_register(uint32_t fpu_reg_index, float *value_ptr)
{
if (fpu_reg_index == 0) {
asm volatile ("lsi f0, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 1) {
asm volatile ("lsi f1, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 2) {
asm volatile ("lsi f2, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 3) {
asm volatile ("lsi f3, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 4) {
asm volatile ("lsi f4, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 5) {
asm volatile ("lsi f5, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 6) {
asm volatile ("lsi f6, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 7) {
asm volatile ("lsi f7, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 8) {
asm volatile ("lsi f8, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 9) {
asm volatile ("lsi f9, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 10) {
asm volatile ("lsi f10, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 11) {
asm volatile ("lsi f11, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 12) {
asm volatile ("lsi f12, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 13) {
asm volatile ("lsi f13, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 14) {
asm volatile ("lsi f14, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 15) {
asm volatile ("lsi f15, %0, 0" :: "a" (value_ptr));
} else if (fpu_reg_index == 16) {
asm volatile ("wur.FCR %0" :: "a" (*value_ptr));
} else if (fpu_reg_index == 17) {
asm volatile ("wur.FSR %0" :: "a" (*value_ptr));
}
}
static void gdbstub_write_fpu_regs(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t *value_ptr)
{
#if CONFIG_IDF_TARGET_ESP32
const uint32_t fpu_start_register = 87;
#elif CONFIG_IDF_TARGET_ESP32S3
const uint32_t fpu_start_register = 84;
#else
#error "Unknown Xtensa chip"
#endif
const StaticTask_t *tcb;
uint32_t *fpu_save_area;
uint32_t fpu_reg_index = reg_index - fpu_start_register;
if (fpu_reg_index >= (16 + 2)) {
return;
}
tcb = esp_gdbstub_find_tcb_by_frame(frame);
fpu_save_area = esp_gdbstub_coproc_saved_area((void *)tcb, XCHAL_CP_ID_FPU);
if (fpu_save_area == NULL) {
uint32_t cp_enabled = enable_coproc(XCHAL_CP_ID_FPU);
gdbstub_set_fpu_register(fpu_reg_index, (float *)value_ptr);
WSR(CPENABLE, cp_enabled);
} else {
fpu_save_area[fpu_reg_index] = *value_ptr;
}
}
#endif // XCHAL_HAVE_FP
/** @brief GDB set register in frame
* Set register in frame with address to value
*
* */
void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t value)
void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, uint32_t *value_ptr)
{
uint32_t temp_fpu_value = value;
float *ptr0;
asm volatile ("mov %0, %1" : "=a" (ptr0) : "a" (&temp_fpu_value));
uint32_t value = *value_ptr;
if (reg_index == 0) {
frame->pc = value;
@@ -362,66 +482,6 @@ void esp_gdbstub_set_register(esp_gdbstub_frame_t *frame, uint32_t reg_index, ui
(&frame->a0)[reg_index - 1] = value;
}
#if XCHAL_HAVE_FP
void *ptr1;
uint32_t cp_enabled;
RSR(CPENABLE, cp_enabled);
if (cp_enabled != 0) {
if (reg_index == 87) {
asm volatile ("lsi f0, %0, 0" :: "a" (ptr0));
}
if (reg_index == 88) {
asm volatile ("lsi f1, %0, 0" :: "a" (ptr0));
}
if (reg_index == 89) {
asm volatile ("lsi f2, %0, 0" :: "a" (ptr0));
}
if (reg_index == 90) {
asm volatile ("lsi f3, %0, 0" :: "a" (ptr0));
}
if (reg_index == 91) {
asm volatile ("lsi f4, %0, 0" :: "a" (ptr0));
}
if (reg_index == 92) {
asm volatile ("lsi f5, %0, 0" :: "a" (ptr0));
}
if (reg_index == 93) {
asm volatile ("lsi f6, %0, 0" :: "a" (ptr0));
}
if (reg_index == 94) {
asm volatile ("lsi f7, %0, 0" :: "a" (ptr0));
}
if (reg_index == 95) {
asm volatile ("lsi f8, %0, 0" :: "a" (ptr0));
}
if (reg_index == 96) {
asm volatile ("lsi f9, %0, 0" :: "a" (ptr0));
}
if (reg_index == 97) {
asm volatile ("lsi f10, %0, 0" :: "a" (ptr0));
}
if (reg_index == 98) {
asm volatile ("lsi f11, %0, 0" :: "a" (ptr0));
}
if (reg_index == 99) {
asm volatile ("lsi f12, %0, 0" :: "a" (ptr0));
}
if (reg_index == 100) {
asm volatile ("lsi f13, %0, 0" :: "a" (ptr0));
}
if (reg_index == 101) {
asm volatile ("lsi f14, %0, 0" :: "a" (ptr0));
}
if (reg_index == 102) {
asm volatile ("lsi f15, %0, 0" :: "a" (ptr0));
}
if (reg_index == 103) {
asm volatile ("l32i %0, %1, 0" : "=a" (ptr1) : "a" (ptr0));
asm volatile ("wur.FCR %0" : "=a" (ptr1));
}
if (reg_index == 104) {
asm volatile ("l32i %0, %1, 0" : "=a" (ptr1) : "a" (ptr0));
asm volatile ("wur.FSR %0" : "=a" (ptr1));
}
}
gdbstub_write_fpu_regs(frame, reg_index, value_ptr);
#endif // XCHAL_HAVE_FP
}
@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -9,20 +9,20 @@
#include "xtensa_context.h"
#include "sdkconfig.h"
#if CONFIG_IDF_TARGET_ESP32
#define GDBSTUB_EXTRA_TIE_SIZE 0
#elif defined(CONFIG_IDF_TARGET_ESP32S2) || defined(CONFIG_IDF_TARGET_ESP32S3)
#define GDBSTUB_EXTRA_TIE_SIZE 1
#else
#error "Unknown Xtensa chip"
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef XtExcFrame esp_gdbstub_frame_t;
#if XCHAL_HAVE_FP
typedef struct {
uint32_t f[16];
uint32_t fcr;
uint32_t fsr;
} xtensa_fpu_regs_t;
#endif
/* GDB regfile structure, configuration dependent */
typedef struct {
uint32_t pc;
@@ -73,14 +73,12 @@ typedef struct {
uint32_t f64s;
#endif
#if XCHAL_HAVE_FP
uint32_t f[16];
uint32_t fcr;
uint32_t fsr;
#if CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
uint32_t gpio_out;
#endif
#if GDBSTUB_EXTRA_TIE_SIZE > 0
uint32_t tie[GDBSTUB_EXTRA_TIE_SIZE];
#if XCHAL_HAVE_FP
xtensa_fpu_regs_t fpu;
#endif
} esp_gdbstub_gdb_regfile_t;
@@ -5,4 +5,4 @@
*/
#pragma once
#define CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
#define CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME 1
@@ -231,26 +231,26 @@
// Custom caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 948, 4, 4
ssi f0, \ptr, .Lxchal_ofs_+0
ssi f1, \ptr, .Lxchal_ofs_+4
ssi f2, \ptr, .Lxchal_ofs_+8
ssi f3, \ptr, .Lxchal_ofs_+12
ssi f4, \ptr, .Lxchal_ofs_+16
ssi f5, \ptr, .Lxchal_ofs_+20
ssi f6, \ptr, .Lxchal_ofs_+24
ssi f7, \ptr, .Lxchal_ofs_+28
ssi f8, \ptr, .Lxchal_ofs_+32
ssi f9, \ptr, .Lxchal_ofs_+36
ssi f10, \ptr, .Lxchal_ofs_+40
ssi f11, \ptr, .Lxchal_ofs_+44
ssi f12, \ptr, .Lxchal_ofs_+48
ssi f13, \ptr, .Lxchal_ofs_+52
ssi f14, \ptr, .Lxchal_ofs_+56
ssi f15, \ptr, .Lxchal_ofs_+60
rur.FCR \at1 // ureg 232
s32i \at1, \ptr, .Lxchal_ofs_+0
s32i \at1, \ptr, .Lxchal_ofs_+64
rur.FSR \at1 // ureg 233
s32i \at1, \ptr, .Lxchal_ofs_+4
ssi f0, \ptr, .Lxchal_ofs_+8
ssi f1, \ptr, .Lxchal_ofs_+12
ssi f2, \ptr, .Lxchal_ofs_+16
ssi f3, \ptr, .Lxchal_ofs_+20
ssi f4, \ptr, .Lxchal_ofs_+24
ssi f5, \ptr, .Lxchal_ofs_+28
ssi f6, \ptr, .Lxchal_ofs_+32
ssi f7, \ptr, .Lxchal_ofs_+36
ssi f8, \ptr, .Lxchal_ofs_+40
ssi f9, \ptr, .Lxchal_ofs_+44
ssi f10, \ptr, .Lxchal_ofs_+48
ssi f11, \ptr, .Lxchal_ofs_+52
ssi f12, \ptr, .Lxchal_ofs_+56
ssi f13, \ptr, .Lxchal_ofs_+60
ssi f14, \ptr, .Lxchal_ofs_+64
ssi f15, \ptr, .Lxchal_ofs_+68
s32i \at1, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 948, 4, 4
@@ -273,26 +273,26 @@
// Custom caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 948, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
lsi f0, \ptr, .Lxchal_ofs_+0
lsi f1, \ptr, .Lxchal_ofs_+4
lsi f2, \ptr, .Lxchal_ofs_+8
lsi f3, \ptr, .Lxchal_ofs_+12
lsi f4, \ptr, .Lxchal_ofs_+16
lsi f5, \ptr, .Lxchal_ofs_+20
lsi f6, \ptr, .Lxchal_ofs_+24
lsi f7, \ptr, .Lxchal_ofs_+28
lsi f8, \ptr, .Lxchal_ofs_+32
lsi f9, \ptr, .Lxchal_ofs_+36
lsi f10, \ptr, .Lxchal_ofs_+40
lsi f11, \ptr, .Lxchal_ofs_+44
lsi f12, \ptr, .Lxchal_ofs_+48
lsi f13, \ptr, .Lxchal_ofs_+52
lsi f14, \ptr, .Lxchal_ofs_+56
lsi f15, \ptr, .Lxchal_ofs_+60
l32i \at1, \ptr, .Lxchal_ofs_+64
wur.FCR \at1 // ureg 232
l32i \at1, \ptr, .Lxchal_ofs_+4
l32i \at1, \ptr, .Lxchal_ofs_+68
wur.FSR \at1 // ureg 233
lsi f0, \ptr, .Lxchal_ofs_+8
lsi f1, \ptr, .Lxchal_ofs_+12
lsi f2, \ptr, .Lxchal_ofs_+16
lsi f3, \ptr, .Lxchal_ofs_+20
lsi f4, \ptr, .Lxchal_ofs_+24
lsi f5, \ptr, .Lxchal_ofs_+28
lsi f6, \ptr, .Lxchal_ofs_+32
lsi f7, \ptr, .Lxchal_ofs_+36
lsi f8, \ptr, .Lxchal_ofs_+40
lsi f9, \ptr, .Lxchal_ofs_+44
lsi f10, \ptr, .Lxchal_ofs_+48
lsi f11, \ptr, .Lxchal_ofs_+52
lsi f12, \ptr, .Lxchal_ofs_+56
lsi f13, \ptr, .Lxchal_ofs_+60
lsi f14, \ptr, .Lxchal_ofs_+64
lsi f15, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 948, 4, 4
@@ -203,26 +203,26 @@
// Custom caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 948, 4, 4
ssi f0, \ptr, .Lxchal_ofs_+0
ssi f1, \ptr, .Lxchal_ofs_+4
ssi f2, \ptr, .Lxchal_ofs_+8
ssi f3, \ptr, .Lxchal_ofs_+12
ssi f4, \ptr, .Lxchal_ofs_+16
ssi f5, \ptr, .Lxchal_ofs_+20
ssi f6, \ptr, .Lxchal_ofs_+24
ssi f7, \ptr, .Lxchal_ofs_+28
ssi f8, \ptr, .Lxchal_ofs_+32
ssi f9, \ptr, .Lxchal_ofs_+36
ssi f10, \ptr, .Lxchal_ofs_+40
ssi f11, \ptr, .Lxchal_ofs_+44
ssi f12, \ptr, .Lxchal_ofs_+48
ssi f13, \ptr, .Lxchal_ofs_+52
ssi f14, \ptr, .Lxchal_ofs_+56
ssi f15, \ptr, .Lxchal_ofs_+60
rur.FCR \at1 // ureg 232
s32i \at1, \ptr, .Lxchal_ofs_+0
s32i \at1, \ptr, .Lxchal_ofs_+64
rur.FSR \at1 // ureg 233
s32i \at1, \ptr, .Lxchal_ofs_+4
ssi f0, \ptr, .Lxchal_ofs_+8
ssi f1, \ptr, .Lxchal_ofs_+12
ssi f2, \ptr, .Lxchal_ofs_+16
ssi f3, \ptr, .Lxchal_ofs_+20
ssi f4, \ptr, .Lxchal_ofs_+24
ssi f5, \ptr, .Lxchal_ofs_+28
ssi f6, \ptr, .Lxchal_ofs_+32
ssi f7, \ptr, .Lxchal_ofs_+36
ssi f8, \ptr, .Lxchal_ofs_+40
ssi f9, \ptr, .Lxchal_ofs_+44
ssi f10, \ptr, .Lxchal_ofs_+48
ssi f11, \ptr, .Lxchal_ofs_+52
ssi f12, \ptr, .Lxchal_ofs_+56
ssi f13, \ptr, .Lxchal_ofs_+60
ssi f14, \ptr, .Lxchal_ofs_+64
ssi f15, \ptr, .Lxchal_ofs_+68
s32i \at1, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 948, 4, 4
@@ -245,26 +245,26 @@
// Custom caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 948, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
lsi f0, \ptr, .Lxchal_ofs_+0
lsi f1, \ptr, .Lxchal_ofs_+4
lsi f2, \ptr, .Lxchal_ofs_+8
lsi f3, \ptr, .Lxchal_ofs_+12
lsi f4, \ptr, .Lxchal_ofs_+16
lsi f5, \ptr, .Lxchal_ofs_+20
lsi f6, \ptr, .Lxchal_ofs_+24
lsi f7, \ptr, .Lxchal_ofs_+28
lsi f8, \ptr, .Lxchal_ofs_+32
lsi f9, \ptr, .Lxchal_ofs_+36
lsi f10, \ptr, .Lxchal_ofs_+40
lsi f11, \ptr, .Lxchal_ofs_+44
lsi f12, \ptr, .Lxchal_ofs_+48
lsi f13, \ptr, .Lxchal_ofs_+52
lsi f14, \ptr, .Lxchal_ofs_+56
lsi f15, \ptr, .Lxchal_ofs_+60
l32i \at1, \ptr, .Lxchal_ofs_+64
wur.FCR \at1 // ureg 232
l32i \at1, \ptr, .Lxchal_ofs_+4
l32i \at1, \ptr, .Lxchal_ofs_+68
wur.FSR \at1 // ureg 233
lsi f0, \ptr, .Lxchal_ofs_+8
lsi f1, \ptr, .Lxchal_ofs_+12
lsi f2, \ptr, .Lxchal_ofs_+16
lsi f3, \ptr, .Lxchal_ofs_+20
lsi f4, \ptr, .Lxchal_ofs_+24
lsi f5, \ptr, .Lxchal_ofs_+28
lsi f6, \ptr, .Lxchal_ofs_+32
lsi f7, \ptr, .Lxchal_ofs_+36
lsi f8, \ptr, .Lxchal_ofs_+40
lsi f9, \ptr, .Lxchal_ofs_+44
lsi f10, \ptr, .Lxchal_ofs_+48
lsi f11, \ptr, .Lxchal_ofs_+52
lsi f12, \ptr, .Lxchal_ofs_+56
lsi f13, \ptr, .Lxchal_ofs_+60
lsi f14, \ptr, .Lxchal_ofs_+64
lsi f15, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 948, 4, 4
@@ -1,10 +1,16 @@
set(srcs "test_app_main.c")
if(CONFIG_IDF_TARGET_ARCH_RISCV AND CONFIG_SOC_CPU_HAS_HWLOOP)
if(CONFIG_SOC_CPU_HAS_HWLOOP)
list(APPEND srcs "xesppie_loops.S")
endif()
if(CONFIG_SOC_CPU_HAS_FPU OR CONFIG_SOC_CPU_HAS_PIE)
list(APPEND srcs "coproc_regs.c")
endif()
if(CONFIG_IDF_TARGET_ARCH_RISCV)
set(ext_comp "riscv")
endif()
idf_component_register(SRCS ${srcs}
INCLUDE_DIRS ""
REQUIRES esp_gdbstub)
REQUIRES esp_gdbstub ${ext_comp})
target_compile_options(${COMPONENT_LIB} PRIVATE "-Wno-unused-label")
@@ -0,0 +1,199 @@
/*
* SPDX-FileCopyrightText: 2025-2026 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#if CONFIG_IDF_TARGET_ARCH_RISCV
#include "riscv/rvruntime-frames.h"
#endif
#include "soc/soc_caps.h"
static SemaphoreHandle_t sem = NULL;
#if SOC_CPU_HAS_PIE
static volatile bool test_pie_ready = false;
#endif
#if SOC_CPU_HAS_FPU
static volatile bool test_fpu_ready = false;
#endif
#if SOC_CPU_HAS_PIE
static void test_pie(void * arg)
{
RvPIESaveArea pie_regs_sample, pie_regs;
register void* ptr asm("a5");
memset(pie_regs_sample.q0, 0x10 + (uint32_t) arg, sizeof(pie_regs_sample.q0));
memset(pie_regs_sample.q1, 0x11 + (uint32_t) arg, sizeof(pie_regs_sample.q1));
memset(pie_regs_sample.q2, 0x12 + (uint32_t) arg, sizeof(pie_regs_sample.q2));
memset(pie_regs_sample.q3, 0x13 + (uint32_t) arg, sizeof(pie_regs_sample.q3));
memset(pie_regs_sample.q4, 0x14 + (uint32_t) arg, sizeof(pie_regs_sample.q4));
memset(pie_regs_sample.q5, 0x15 + (uint32_t) arg, sizeof(pie_regs_sample.q5));
memset(pie_regs_sample.q6, 0x16 + (uint32_t) arg, sizeof(pie_regs_sample.q6));
memset(pie_regs_sample.q7, 0x17 + (uint32_t) arg, sizeof(pie_regs_sample.q7));
memset(pie_regs_sample.qacc_l_l, 0x18 + (uint32_t) arg, sizeof(pie_regs_sample.qacc_l_l));
memset(pie_regs_sample.qacc_l_h, 0x19 + (uint32_t) arg, sizeof(pie_regs_sample.qacc_l_h));
memset(pie_regs_sample.qacc_h_l, 0x1a + (uint32_t) arg, sizeof(pie_regs_sample.qacc_h_l));
memset(pie_regs_sample.qacc_h_h, 0x1b + (uint32_t) arg, sizeof(pie_regs_sample.qacc_h_h));
memset(pie_regs_sample.ua_state, 0x1c + (uint32_t) arg, sizeof(pie_regs_sample.ua_state));
memset(&pie_regs_sample.xacc, 0x1d + (uint32_t) arg, sizeof(uint8_t) * 5 /* 40-bits */);
pie_start:
asm volatile ("nop");
while (!test_pie_ready) {
vTaskDelay(50 / portTICK_PERIOD_MS);
}
__asm__ volatile ("mv %0, %1" : "=r"(ptr) : "r" (&pie_regs)); /* ptr = &pie_regs; */
__asm__ volatile ("esp.vst.128.ip q0, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q1, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q2, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q3, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q4, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q5, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q6, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.vst.128.ip q7, %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.l.l.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.l.h.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.h.l.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.qacc.h.h.128.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.ua.state.ip %0, 16" :: "r" (ptr));
__asm__ volatile ("esp.st.s.xacc.ip %0, 0" :: "r" (ptr));
if (!memcmp(&pie_regs_sample, &pie_regs, sizeof(pie_regs))) {
xSemaphoreGive((SemaphoreHandle_t) sem);
}
vTaskDelete(NULL);
}
#endif
#if SOC_CPU_HAS_FPU
static void test_fpu(void * arg)
{
#if CONFIG_IDF_TARGET_ARCH_XTENSA
struct {
float f[16];
} fpu_regs_sample, fpu_regs;
#endif
#if CONFIG_IDF_TARGET_ARCH_RISCV
RvFPUSaveArea fpu_regs_sample, fpu_regs;
#endif
uint32_t *ptr = (uint32_t *)&fpu_regs_sample;
for (int i = 0; i < sizeof(fpu_regs_sample)/sizeof(uint32_t); i++) {
ptr[i] = i + (int) arg;
}
/* Set FPU owner to current task by calling an instruction */
#if CONFIG_IDF_TARGET_ARCH_XTENSA
__asm__ volatile ("ssi f0, %0, 0" :: "a" (&fpu_regs));
#endif
#if CONFIG_IDF_TARGET_ARCH_RISCV
__asm__ volatile ("fsw ft0, %0" : "=m" (fpu_regs.ft0));
#endif
fpu_start:
asm volatile ("nop");
while (!test_fpu_ready) {
vTaskDelay(50 / portTICK_PERIOD_MS);
}
#if CONFIG_IDF_TARGET_ARCH_XTENSA
__asm__ volatile ("ssi f0, %0, 0" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f1, %0, 4" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f2, %0, 8" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f3, %0, 12" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f4, %0, 16" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f5, %0, 20" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f6, %0, 24" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f7, %0, 28" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f8, %0, 32" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f9, %0, 36" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f10, %0, 40" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f11, %0, 44" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f12, %0, 48" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f13, %0, 52" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f14, %0, 56" :: "a" (&fpu_regs));
__asm__ volatile ("ssi f15, %0, 60" :: "a" (&fpu_regs));
#endif
#if CONFIG_IDF_TARGET_ARCH_RISCV
__asm__ volatile ("fsw ft0, %0" : "=m" (fpu_regs.ft0));
__asm__ volatile ("fsw ft1, %0" : "=m" (fpu_regs.ft1));
__asm__ volatile ("fsw ft2, %0" : "=m" (fpu_regs.ft2));
__asm__ volatile ("fsw ft3, %0" : "=m" (fpu_regs.ft3));
__asm__ volatile ("fsw ft4, %0" : "=m" (fpu_regs.ft4));
__asm__ volatile ("fsw ft5, %0" : "=m" (fpu_regs.ft5));
__asm__ volatile ("fsw ft6, %0" : "=m" (fpu_regs.ft6));
__asm__ volatile ("fsw ft7, %0" : "=m" (fpu_regs.ft7));
__asm__ volatile ("fsw fs0, %0" : "=m" (fpu_regs.fs0));
__asm__ volatile ("fsw fs1, %0" : "=m" (fpu_regs.fs1));
__asm__ volatile ("fsw fa0, %0" : "=m" (fpu_regs.fa0));
__asm__ volatile ("fsw fa1, %0" : "=m" (fpu_regs.fa1));
__asm__ volatile ("fsw fa2, %0" : "=m" (fpu_regs.fa2));
__asm__ volatile ("fsw fa3, %0" : "=m" (fpu_regs.fa3));
__asm__ volatile ("fsw fa4, %0" : "=m" (fpu_regs.fa4));
__asm__ volatile ("fsw fa5, %0" : "=m" (fpu_regs.fa5));
__asm__ volatile ("fsw fa6, %0" : "=m" (fpu_regs.fa6));
__asm__ volatile ("fsw fa7, %0" : "=m" (fpu_regs.fa7));
__asm__ volatile ("fsw fs2, %0" : "=m" (fpu_regs.fs2));
__asm__ volatile ("fsw fs3, %0" : "=m" (fpu_regs.fs3));
__asm__ volatile ("fsw fs4, %0" : "=m" (fpu_regs.fs4));
__asm__ volatile ("fsw fs5, %0" : "=m" (fpu_regs.fs5));
__asm__ volatile ("fsw fs6, %0" : "=m" (fpu_regs.fs6));
__asm__ volatile ("fsw fs7, %0" : "=m" (fpu_regs.fs7));
__asm__ volatile ("fsw fs8, %0" : "=m" (fpu_regs.fs8));
__asm__ volatile ("fsw fs9, %0" : "=m" (fpu_regs.fs9));
__asm__ volatile ("fsw fs10, %0" : "=m" (fpu_regs.fs10));
__asm__ volatile ("fsw fs11, %0" : "=m" (fpu_regs.fs11));
__asm__ volatile ("fsw ft8, %0" : "=m" (fpu_regs.ft8));
__asm__ volatile ("fsw ft9, %0" : "=m" (fpu_regs.ft9));
__asm__ volatile ("fsw ft10, %0" : "=m" (fpu_regs.ft10));
__asm__ volatile ("fsw ft11, %0" : "=m" (fpu_regs.ft11));
__asm__ volatile ("csrr %0, fcsr" : "=r" (fpu_regs.fcsr));
#endif
if (!memcmp(&fpu_regs_sample, &fpu_regs, sizeof(fpu_regs))) {
xSemaphoreGive((SemaphoreHandle_t) sem);
}
vTaskDelete(NULL);
}
#endif
/* TODO: IDF-12550. Extend test for both CPU0 and CPU1 */
void test_coproc_regs(void) {
sem = xSemaphoreCreateCounting(2, 0);
#if SOC_CPU_HAS_FPU
xTaskCreatePinnedToCore(test_fpu, "test_fpu_1", 4096, (void *)1, 10, NULL, 0);
xTaskCreatePinnedToCore(test_fpu, "test_fpu_2", 4096, (void *)2, 10, NULL, 0);
for (int i = 0; i < 2;) {
if (xSemaphoreTake(sem, 500 / portTICK_PERIOD_MS)) {
i++;
}
}
vTaskDelay(10); // Allow tasks to clean up
fpu_succeed:
#endif
#if SOC_CPU_HAS_PIE
xTaskCreatePinnedToCore(test_pie, "test_pie_1", 4096, (void *)1, 10, NULL, 0);
xTaskCreatePinnedToCore(test_pie, "test_pie_2", 4096, (void *)2, 10, NULL, 0);
for (int i = 0; i < 2;) {
if (xSemaphoreTake(sem, 500 / portTICK_PERIOD_MS)) {
i++;
}
}
vTaskDelay(10); // Allow tasks to clean up
pie_succeed:
#endif
vSemaphoreDelete(sem);
}
@@ -1,18 +1,28 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdio.h>
#include "soc/soc_caps.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "sdkconfig.h"
#define TEST_COPROCESSOR_REGISTERS (SOC_CPU_HAS_FPU || SOC_CPU_HAS_PIE)
#define TEST_HWLOOP_INSTRUCTIONS (__riscv && SOC_CPU_HAS_HWLOOP)
int var_1;
int var_2;
int do_panic;
int start_testing;
#if TEST_COPROCESSOR_REGISTERS
int do_test_coproc_regs;
#endif
#if TEST_HWLOOP_INSTRUCTIONS
int do_test_xesppie_loops;
#endif
void foo(void)
{
@@ -21,14 +31,27 @@ void foo(void)
}
void test_xesppie_loops(void);
void test_coproc_regs(void);
void app_main(void)
{
printf("tested app is running.\n");
printf("waiting start_testing variable to be changed.\n");
while (!start_testing) { /* set via GDB */
vTaskDelay(100 / portTICK_PERIOD_MS);
}
vTaskDelay(5000 / portTICK_PERIOD_MS);
#if TEST_HWLOOP_INSTRUCTIONS
if (do_test_xesppie_loops) { /* set via GDB */
test_xesppie_loops();
return;
}
#endif
#if SOC_CPU_HAS_HWLOOP
test_xesppie_loops();
#if TEST_COPROCESSOR_REGISTERS
if (do_test_coproc_regs) { /* set via GDB */
test_coproc_regs();
return;
}
#endif
while(1) {
@@ -1,10 +1,11 @@
# SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
# SPDX-FileCopyrightText: 2023-2026 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import os
import os.path as path
import sys
from collections.abc import Callable
from typing import Any
from typing import Dict
from typing import Optional
import pytest
from pytest_embedded_idf.utils import idf_parametrize
@@ -23,12 +24,12 @@ def get_line_number(lookup: str, offset: int = 0) -> int:
def start_gdb(dut: PanicTestDut) -> None:
dut.expect_exact('tested app is running.')
dut.expect_exact('waiting start_testing variable to be changed.')
dut.write(b'\x03') # send Ctrl-C
dut.start_gdb_for_gdbstub()
def run_and_break(dut: PanicTestDut, cmd: str) -> Dict[Any, Any]:
def run_and_break(dut: PanicTestDut, cmd: str) -> dict[Any, Any]:
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('running', 'result', responses) is not None
if not dut.find_gdb_response('stopped', 'notify', responses): # have not stopped on breakpoint yet
@@ -39,12 +40,38 @@ def run_and_break(dut: PanicTestDut, cmd: str) -> Dict[Any, Any]:
return payload
def dut_set_variable(dut: PanicTestDut, var_name: str, value: int) -> None:
cmd = f'-gdb-set {var_name}={value}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
def dut_enable_test(dut: PanicTestDut, testcase: Optional[str] = None) -> None:
dut_set_variable(dut, 'start_testing', 1)
# enable specific testcase (otherwise default testcase)
if testcase:
dut_set_variable(dut, f'do_test_{testcase}', 1)
def dut_get_threads(dut: PanicTestDut) -> Any:
cmd = '-thread-info'
responses = dut.gdb_write(cmd)
if not responses[0]['message']:
responses = dut.gdb_write(cmd)
assert responses is not None
return responses[0]['payload']['threads']
@pytest.mark.generic
@idf_parametrize('target', ['esp32p4'], indirect=['target'])
@pytest.mark.temp_skip_ci(targets=['esp32p4'], reason='p4 rev3 migration, IDF-13142')
def test_hwloop_jump(dut: PanicTestDut) -> None:
start_gdb(dut)
# enable coprocessors registers testing
dut_enable_test(dut, 'xesppie_loops')
cmd = '-break-insert --source xesppie_loops.S --function test_loop_start'
response = dut.find_gdb_response('done', 'result', dut.gdb_write(cmd))
assert response is not None
@@ -99,12 +126,182 @@ def test_hwloop_jump(dut: PanicTestDut) -> None:
assert payload['stopped-threads'] == 'all'
def check_registers_numbers(dut: PanicTestDut) -> None:
cmd = '-data-list-register-values d'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
registers = responses[0]['payload']['register-values']
assert len(registers) == 83 # 80 registers supported + xesppie misc + pseudo frm, fflags
r_id = 0
for r in registers:
assert int(r['number']) == r_id
if r_id == 4211: # check if value of q0 register is uint128
assert 'uint128' in r['value']
if r_id == 64:
r_id = 68 # fcsr
elif r_id == 68:
r_id = 4211 # q0
else:
r_id += 1
def set_riscv_float_registers(dut: PanicTestDut, t_id: int, addition: int) -> None:
cmd = f'-thread-select {t_id}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
for i in range(32):
cmd = f'-data-write-register-values d {33 + i} {i + addition}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
# Note that it's a gap between the last floating register number and fcsr register number.
cmd = f'-data-write-register-values d 68 {32 + addition}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
def set_xtensa_float_registers(dut: PanicTestDut, t_id: int, addition: int) -> None:
"""Set Xtensa FPU registers via GDB.
Xtensa FPU register numbers:
- f0-f15: registers 87-102
- fcr: register 103
- fsr: register 104
"""
cmd = f'-thread-select {t_id}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
if dut.target == 'esp32':
fpu_current_register = 87
elif dut.target == 'esp32s3':
fpu_current_register = 84
else:
raise ValueError(f'Unsupported target: {dut.target}')
for i in range(18): # 16 f* registers + fcr + fsr
cmd = f'-data-write-register-values d {fpu_current_register} {i + addition}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
fpu_current_register += 1
def set_riscv_pie_registers(dut: PanicTestDut, t_id: int, addition: int) -> None:
cmd = f'-thread-select {t_id}'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
def set_gdb_128_bit_register(reg: str, byte: int) -> None:
val64 = f'0x{hex(byte)[2:] * 8}'
value = f'{{{val64}, {val64}}}'
cmd = f'-interpreter-exec console "set ${reg}.v2_int64={value}"'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
for i in range(8):
set_gdb_128_bit_register(f'q{i}', 0x10 + i + addition)
set_gdb_128_bit_register('qacc_l_l', 0x18 + addition)
set_gdb_128_bit_register('qacc_l_h', 0x19 + addition)
set_gdb_128_bit_register('qacc_h_l', 0x1A + addition)
set_gdb_128_bit_register('qacc_h_h', 0x1B + addition)
set_gdb_128_bit_register('ua_state', 0x1C + addition)
xacc_val = ','.join([hex(0x1D + addition)] * 5)
cmd = f'-interpreter-exec console "set $xacc={{{xacc_val}}}"'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
def coproc_registers_test(dut: PanicTestDut, regs_type: str, set_registers: Callable) -> None:
# set start test breakpoint
cmd = f'-break-insert --source coproc_regs.c --function test_{regs_type} --label {regs_type}_start'
response = dut.find_gdb_response('done', 'result', dut.gdb_write(cmd))
assert response is not None
# stop when the second task is stopped
for i in range(2):
cmd = '-exec-continue'
payload = run_and_break(dut, cmd)
assert payload['reason'] == 'breakpoint-hit'
assert payload['frame']['func'] == f'test_{regs_type}'
assert payload['stopped-threads'] == 'all'
threads = dut_get_threads(dut)
"""
Set expected values to both testing tasks.
This will test setting register for both:
- Task coproc owner (direct registers write)
- Other tasks (write registers to task's stack)
"""
found_count = 0
for t in threads:
for task_num in [1, 2]:
if f'test_{regs_type}_{task_num}' in t['details']:
set_registers(dut, t['id'], task_num)
found_count += 1
assert found_count == 2, f'Expected 2 coproc tasks, found {found_count}'
dut_set_variable(dut, f'test_{regs_type}_ready', 1)
cmd = '-break-delete'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
cmd = f'-break-insert --source coproc_regs.c --function test_coproc_regs --label {regs_type}_succeed'
response = dut.find_gdb_response('done', 'result', dut.gdb_write(cmd))
assert response is not None
cmd = '-exec-continue'
payload = run_and_break(dut, cmd)
assert payload['reason'] == 'breakpoint-hit'
assert payload['frame']['func'] == 'test_coproc_regs'
assert payload['stopped-threads'] == 'all'
threads = dut_get_threads(dut)
found_count = 0
for t in threads:
for task_num in [1, 2]:
if f'test_{regs_type}_{task_num}' in t['details']:
found_count += 1
assert found_count == 0, f'Expected 0 coproc tasks, found {found_count}'
@pytest.mark.generic
@idf_parametrize('target', ['esp32', 'esp32s3', 'esp32p4'], indirect=['target'])
@pytest.mark.temp_skip_ci(targets=['esp32p4'], reason='p4 rev3 migration, IDF-13142')
def test_coproc_registers(dut: PanicTestDut) -> None:
start_gdb(dut)
# enable coprocessors registers testing
dut_enable_test(dut, 'coproc_regs')
if dut.is_xtensa:
coproc_registers_test(dut, 'fpu', set_xtensa_float_registers)
else:
check_registers_numbers(dut)
coproc_registers_test(dut, 'fpu', set_riscv_float_registers)
if dut.target == 'esp32p4':
coproc_registers_test(dut, 'pie', set_riscv_pie_registers)
@pytest.mark.generic
@idf_parametrize('target', ['supported_targets'], indirect=['target'])
@pytest.mark.temp_skip_ci(targets=['esp32p4'], reason='p4 rev3 migration, IDF-13142')
def test_gdbstub_runtime(dut: PanicTestDut) -> None:
start_gdb(dut)
dut_enable_test(dut)
# Test breakpoint
cmd = '-break-insert --source test_app_main.c --function app_main --label label_1'
response = dut.find_gdb_response('done', 'result', dut.gdb_write(cmd))
@@ -175,23 +372,18 @@ def test_gdbstub_runtime(dut: PanicTestDut) -> None:
assert dut.find_gdb_response('done', 'result', responses) is not None
cmd = '-exec-continue'
payload = run_and_break(dut, cmd)
assert payload['reason'] == 'signal-received'
assert payload['reason'] == 'watchpoint-trigger'
assert int(payload['value']['new']) == int(payload['value']['old']) + 2
assert payload['frame']['func'] == 'foo'
assert payload['frame']['line'] == str(get_line_number('var_2--;'))
assert payload['stopped-threads'] == 'all'
# Uncomment this when implement send reason to gdb: GCC-313
#
# assert payload['reason'] == 'watchpoint-trigger'
# assert int(payload['value']['new']) == int(payload['value']['old']) + 1
# assert payload['frame']['line'] == '14'
cmd = '-break-delete 2'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
# test set variable
cmd = '-gdb-set do_panic=1'
responses = dut.gdb_write(cmd)
assert dut.find_gdb_response('done', 'result', responses) is not None
dut_set_variable(dut, 'do_panic', 1)
# test panic handling
cmd = '-exec-continue'
@@ -211,6 +403,8 @@ def test_gdbstub_runtime(dut: PanicTestDut) -> None:
def test_gdbstub_runtime_xtensa_stepping_bug(dut: PanicTestDut) -> None:
start_gdb(dut)
dut_enable_test(dut)
# Test breakpoint
cmd = '-break-insert --source test_app_main.c --function app_main --label label_1'
response = dut.find_gdb_response('done', 'result', dut.gdb_write(cmd))