diff --git a/components/esp_hw_support/include/esp_private/sleep_cpu.h b/components/esp_hw_support/include/esp_private/sleep_cpu.h index 49ed6d5a45..5ace9e067f 100644 --- a/components/esp_hw_support/include/esp_private/sleep_cpu.h +++ b/components/esp_hw_support/include/esp_private/sleep_cpu.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -7,6 +7,9 @@ #pragma once #include #include "sdkconfig.h" +#include "stdbool.h" +#include "esp_err.h" +#include "soc/soc_caps.h" #ifdef __cplusplus extern "C" { @@ -18,8 +21,7 @@ extern "C" { * This file contains declarations of cpu retention related functions in light sleep mode. */ -#if SOC_PM_SUPPORT_CPU_PD - +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP || SOC_PM_SUPPORT_CPU_PD /** * @brief Whether to allow the cpu power domain to be powered off. * @@ -27,7 +29,9 @@ extern "C" { * for cpu retention, the cpu power domain can be powered off. */ bool cpu_domain_pd_allowed(void); +#endif +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP /** * @brief Configure the parameters of the CPU domain during the sleep process * @@ -38,10 +42,7 @@ bool cpu_domain_pd_allowed(void); */ esp_err_t sleep_cpu_configure(bool light_sleep_enable); -#endif - -#if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL - +#if SOC_PM_CPU_RETENTION_BY_RTCCNTL /** * @brief Enable cpu retention of some modules. * @@ -57,16 +58,32 @@ void sleep_enable_cpu_retention(void); * retention of moudles such as CPU and I/D-cache tag memory. */ void sleep_disable_cpu_retention(void); +#endif // SOC_PM_CPU_RETENTION_BY_RTCCNTL -#endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL - - -#if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW - +#if SOC_PM_CPU_RETENTION_BY_SW esp_err_t esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool), uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp); +#endif // SOC_PM_CPU_RETENTION_BY_SW +#endif // CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP -#endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW +#if !CONFIG_FREERTOS_UNICORE && CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP +/** + * Do sleep prepare for other smp cores + */ +void sleep_smp_cpu_sleep_prepare(void); + +/** + * Do wakeup prepare for other smp cores + */ +void sleep_smp_cpu_wakeup_prepare(void); + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP +/** + * Notify the other core that this sleep does not require retention. + */ +void esp_sleep_cpu_skip_retention(void); +#endif // CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP +#endif // !CONFIG_FREERTOS_UNICORE && CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP #ifdef __cplusplus } diff --git a/components/esp_hw_support/include/esp_sleep.h b/components/esp_hw_support/include/esp_sleep.h index 29c2af96d8..018820e364 100644 --- a/components/esp_hw_support/include/esp_sleep.h +++ b/components/esp_hw_support/include/esp_sleep.h @@ -713,7 +713,7 @@ void esp_default_wake_deep_sleep(void); */ void esp_deep_sleep_disable_rom_logging(void); -#ifdef SOC_PM_SUPPORT_CPU_PD +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP #if SOC_PM_CPU_RETENTION_BY_RTCCNTL /** @@ -752,7 +752,7 @@ esp_err_t esp_sleep_cpu_retention_init(void); * Release system retention memory. */ esp_err_t esp_sleep_cpu_retention_deinit(void); -#endif +#endif // CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP /** * @brief Configure to isolate all GPIO pins in sleep state diff --git a/components/esp_hw_support/lowpower/CMakeLists.txt b/components/esp_hw_support/lowpower/CMakeLists.txt index bbcfdc7753..c951006ad2 100644 --- a/components/esp_hw_support/lowpower/CMakeLists.txt +++ b/components/esp_hw_support/lowpower/CMakeLists.txt @@ -4,7 +4,7 @@ endif() set(srcs) -if(CONFIG_SOC_PM_SUPPORT_CPU_PD) +if(CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP) list(APPEND srcs "cpu_retention/port/${target}/sleep_cpu.c") if(CONFIG_SOC_PM_CPU_RETENTION_BY_SW) list(APPEND srcs "cpu_retention/port/${target}/sleep_cpu_asm.S") diff --git a/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/rvsleep-frames.h b/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/rvsleep-frames.h new file mode 100644 index 0000000000..2371cd8eb6 --- /dev/null +++ b/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/rvsleep-frames.h @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __RVSLEEP_FRAMES_H__ +#define __RVSLEEP_FRAMES_H__ + +#include "sdkconfig.h" + +/* Align a value up to nearest n-byte boundary, where n is a power of 2. */ +#define ALIGNUP(n, val) (((val) + (n) - 1) & -(n)) + +#ifdef STRUCT_BEGIN +#undef STRUCT_BEGIN +#undef STRUCT_FIELD +#undef STRUCT_AFIELD +#undef STRUCT_END +#endif + +#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) +#ifdef __clang__ +#define STRUCT_BEGIN .set RV_STRUCT_OFFSET, 0 +#define STRUCT_FIELD(ctype,size,asname,name) .set asname, RV_STRUCT_OFFSET; .set RV_STRUCT_OFFSET, asname + size +#define STRUCT_AFIELD(ctype,size,asname,name,n) .set asname, RV_STRUCT_OFFSET;\ + .set RV_STRUCT_OFFSET, asname + (size)*(n); +#define STRUCT_END(sname) .set sname##Size, RV_STRUCT_OFFSET; +#else // __clang__ +#define STRUCT_BEGIN .pushsection .text; .struct 0 +#define STRUCT_FIELD(ctype,size,asname,name) asname: .space size +#define STRUCT_AFIELD(ctype,size,asname,name,n) asname: .space (size)*(n) +#define STRUCT_END(sname) sname##Size:; .popsection +#endif // __clang__ +#else +#define STRUCT_BEGIN typedef struct { +#define STRUCT_FIELD(ctype,size,asname,name) ctype name; +#define STRUCT_AFIELD(ctype,size,asname,name,n) ctype name[n]; +#define STRUCT_END(sname) } sname; +#endif + +/* + * ------------------------------------------------------------------------------- + * RISC-V CORE CRITICAL REGISTER CONTEXT LAYOUT FOR SLEEP + * ------------------------------------------------------------------------------- + */ +STRUCT_BEGIN + STRUCT_FIELD (long, 4, RV_SLP_CTX_MEPC, mepc) /* Machine Exception Program Counter */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_RA, ra) /* Return address */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_SP, sp) /* Stack pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_GP, gp) /* Global pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_TP, tp) /* Thread pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T0, t0) /* Temporary/alternate link register */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T1, t1) /* t1-2: Temporaries */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T2, t2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S0, s0) /* Saved register/frame pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_S1, s1) /* Saved register */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_A0, a0) /* a0-1: Function arguments/return address */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_A1, a1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A2, a2) /* a2-7: Function arguments */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_A3, a3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A4, a4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A5, a5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A6, a6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A7, a7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S2, s2) /* s2-11: Saved registers */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_S3, s3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S4, s4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S5, s5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S6, s6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S7, s7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S8, s8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S9, s9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S10, s10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S11, s11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_T3, t3) /* t3-6: Temporaries */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T4, t4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_T5, t5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_T6, t6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MSTATUS, mstatus) /* Machine Status */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MTVEC, mtvec) /* Machine Trap-Vector Base Address */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MTVT, mtvt) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCAUSE, mcause) /* Machine Trap Cause */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MTVAL, mtval) /* Machine Trap Value */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MIE, mie) /* Machine intr enable */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MIP, mip) /* Machine intr pending */ + + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMUFUNC, pmufunc) /* A field is used to identify whether it is going + * to sleep or has just been awakened. We use the + * lowest 2 bits as indication infomation, 3 means + * being awakened, 1 means going to sleep */ +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + STRUCT_FIELD (long, 4, RV_SLP_CSF_CTX_CRC, frame_crc) /* Used to check RvCoreCriticalSleepFrame integrity */ +#endif +STRUCT_END(RvCoreCriticalSleepFrame) + +#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) +#define RV_SLEEP_CTX_SZ1 RvCoreCriticalSleepFrameSize +#else +#define RV_SLEEP_CTX_SZ1 sizeof(RvCoreCriticalSleepFrame) +#endif + +/* + * Sleep stack frame size, after align up to 16 bytes boundary + */ +#define RV_SLEEP_CTX_FRMSZ (ALIGNUP(0x10, RV_SLEEP_CTX_SZ1)) + +/* + * ------------------------------------------------------------------------------- + * RISC-V CORE NON-CRITICAL REGISTER CONTEXT LAYOUT FOR SLEEP + * ------------------------------------------------------------------------------- + */ +STRUCT_BEGIN + STRUCT_FIELD (long, 4, RV_SLP_CTX_MSCRATCH, mscratch) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MISA, misa) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TSELECT, tselect) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA1, tdata1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA2, tdata2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TCONTROL, tcontrol) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR0, pmpaddr0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR1, pmpaddr1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR2, pmpaddr2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR3, pmpaddr3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR4, pmpaddr4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR5, pmpaddr5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR6, pmpaddr6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR7, pmpaddr7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR8, pmpaddr8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR9, pmpaddr9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR10, pmpaddr10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR11, pmpaddr11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR12, pmpaddr12) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR13, pmpaddr13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR14, pmpaddr14) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR15, pmpaddr15) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG0, pmpcfg0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG1, pmpcfg1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG2, pmpcfg2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG3, pmpcfg3) + + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR0, pmaaddr0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR1, pmaaddr1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR2, pmaaddr2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR3, pmaaddr3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR4, pmaaddr4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR5, pmaaddr5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR6, pmaaddr6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR7, pmaaddr7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR8, pmaaddr8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR9, pmaaddr9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR10, pmaaddr10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR11, pmaaddr11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR12, pmaaddr12) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR13, pmaaddr13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR14, pmaaddr14) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR15, pmaaddr15) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG0, pmacfg0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG1, pmacfg1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG2, pmacfg2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG3, pmacfg3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG4, pmacfg4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG5, pmacfg5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG6, pmacfg6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG7, pmacfg7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG8, pmacfg8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG9, pmacfg9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG10, pmacfg10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG11, pmacfg11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG12, pmacfg12) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG13, pmacfg13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG14, pmacfg14) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG15, pmacfg15) + + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCYCLE, mcycle) +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + STRUCT_FIELD (long, 4, RV_SLP_NCSF_CTX_CRC, frame_crc) /* Used to check RvCoreNonCriticalSleepFrame integrity */ +#endif +STRUCT_END(RvCoreNonCriticalSleepFrame) + +#endif /* #ifndef __RVSLEEP_FRAMES_H__ */ diff --git a/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/sleep_cpu.c b/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/sleep_cpu.c new file mode 100644 index 0000000000..3b18daad91 --- /dev/null +++ b/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/sleep_cpu.c @@ -0,0 +1,607 @@ +/* + * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include "esp_attr.h" +#include "esp_check.h" +#include "esp_ipc_isr.h" +#include "esp_sleep.h" +#include "esp_log.h" +#include "esp_crc.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "esp_heap_caps.h" +#include "riscv/csr.h" +#include "soc/cache_reg.h" +#include "soc/clic_reg.h" +#include "soc/rtc_periph.h" +#include "soc/soc_caps.h" +#include "soc/hp_sys_clkrst_reg.h" +#include "esp_private/sleep_cpu.h" +#include "esp_private/sleep_event.h" +#include "sdkconfig.h" +#include "esp_private/esp_pmu.h" + +#include "esp32p4/rom/ets_sys.h" +#include "esp32p4/rom/rtc.h" +#include "esp32p4/rom/cache.h" +#include "rvsleep-frames.h" + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME +#include "esp_private/system_internal.h" +#include "hal/clk_gate_ll.h" +#include "hal/uart_hal.h" +#endif + + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE +#include +#include "soc/hp_system_reg.h" +typedef enum { + SMP_IDLE, + SMP_BACKUP_START, + SMP_BACKUP_DONE, + SMP_RESTORE_START, + SMP_RESTORE_DONE, + SMP_SKIP_RETENTION, +} smp_retention_state_t; + +static TCM_DRAM_ATTR smp_retention_state_t s_smp_retention_state[portNUM_PROCESSORS]; +#endif + +static __attribute__((unused)) const char *TAG = "sleep"; + +typedef struct { + uint32_t start; + uint32_t end; +} cpu_domain_dev_regs_region_t; + +typedef struct { + cpu_domain_dev_regs_region_t *region; + int region_num; + uint32_t *regs_frame; +} cpu_domain_dev_sleep_frame_t; + +/** + * Internal structure which holds all requested light sleep cpu retention parameters + */ +typedef struct { + struct { + RvCoreCriticalSleepFrame *critical_frame[portNUM_PROCESSORS]; + RvCoreNonCriticalSleepFrame *non_critical_frame[portNUM_PROCESSORS]; + cpu_domain_dev_sleep_frame_t *cache_config_frame; + cpu_domain_dev_sleep_frame_t *clic_frame[portNUM_PROCESSORS]; + } retent; +} sleep_cpu_retention_t; + +static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention; + +extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame[portNUM_PROCESSORS]; + +static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num) +{ + const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num; + int regs_frame_sz = 0; + for (int num = 0; num < region_num; num++) { + regs_frame_sz += regions[num].end - regions[num].start; + } + void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL); + if (frame) { + cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t)); + memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t)); + void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz; + memset(regs_frame, 0, regs_frame_sz); + *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) { + .region = region, + .region_num = region_num, + .regs_frame = (uint32_t *)regs_frame + }; + } + return frame; +} + +static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void) +{ + const static cpu_domain_dev_regs_region_t regions[] = { + { .start = CACHE_L1_ICACHE_CTRL_REG, .end = CACHE_L1_BYPASS_CACHE_CONF_REG + 4 }, + { .start = CACHE_L2_CACHE_CTRL_REG, .end = CACHE_L2_CACHE_BLOCKSIZE_CONF_REG + 4 } + }; + return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0])); +} + +static inline void * cpu_domain_clic_sleep_frame_alloc_and_init(uint8_t core_id) +{ + const static cpu_domain_dev_regs_region_t regions[portNUM_PROCESSORS][2] = { + [0 ... portNUM_PROCESSORS - 1] = { + { .start = CLIC_INT_CONFIG_REG, .end = CLIC_INT_THRESH_REG + 4 }, + { .start = CLIC_INT_CTRL_REG(0), .end = CLIC_INT_CTRL_REG(47) + 4 }, + } + }; + return cpu_domain_dev_sleep_frame_alloc_and_init(regions[core_id], sizeof(regions[core_id]) / sizeof(cpu_domain_dev_regs_region_t)); +} + +static esp_err_t esp_sleep_cpu_retention_init_impl(void) +{ + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + if (s_cpu_retention.retent.critical_frame[core_id] == NULL) { + void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.critical_frame[core_id] = (RvCoreCriticalSleepFrame *)frame; + rv_core_critical_regs_frame[core_id] = (RvCoreCriticalSleepFrame *)frame; + } + if (s_cpu_retention.retent.non_critical_frame[core_id] == NULL) { + void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.non_critical_frame[core_id] = (RvCoreNonCriticalSleepFrame *)frame; + } + } + if (s_cpu_retention.retent.cache_config_frame == NULL) { + void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init(); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame; + } + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + if (s_cpu_retention.retent.clic_frame[core_id] == NULL) { + void *frame = cpu_domain_clic_sleep_frame_alloc_and_init(core_id); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.clic_frame[core_id] = (cpu_domain_dev_sleep_frame_t *)frame; + } + } +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + atomic_init(&s_smp_retention_state[core_id], SMP_IDLE); + } +#endif + return ESP_OK; +err: + esp_sleep_cpu_retention_deinit(); + return ESP_ERR_NO_MEM; +} + +static esp_err_t esp_sleep_cpu_retention_deinit_impl(void) +{ + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + if (s_cpu_retention.retent.critical_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.critical_frame[core_id]); + s_cpu_retention.retent.critical_frame[core_id] = NULL; + rv_core_critical_regs_frame[core_id] = NULL; + } + if (s_cpu_retention.retent.non_critical_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame[core_id]); + s_cpu_retention.retent.non_critical_frame[core_id] = NULL; + } + } + if (s_cpu_retention.retent.cache_config_frame) { + heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame); + s_cpu_retention.retent.cache_config_frame = NULL; + } + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + if (s_cpu_retention.retent.clic_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.clic_frame[core_id]); + s_cpu_retention.retent.clic_frame[core_id] = NULL; + } + } + return ESP_OK; +} + +FORCE_INLINE_ATTR uint32_t save_mstatus_and_disable_global_int(void) +{ + return RV_READ_MSTATUS_AND_DISABLE_INTR(); +} + +FORCE_INLINE_ATTR void restore_mstatus(uint32_t mstatus_val) +{ + RV_WRITE_CSR(mstatus, mstatus_val); +} + +static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void) +{ + RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame[esp_cpu_get_core_id()]; + + frame->mscratch = RV_READ_CSR(mscratch); + frame->misa = RV_READ_CSR(misa); + frame->tselect = RV_READ_CSR(tselect); + frame->tdata1 = RV_READ_CSR(tdata1); + frame->tdata2 = RV_READ_CSR(tdata2); + frame->tcontrol = RV_READ_CSR(tcontrol); + + frame->pmpaddr0 = RV_READ_CSR(pmpaddr0); + frame->pmpaddr1 = RV_READ_CSR(pmpaddr1); + frame->pmpaddr2 = RV_READ_CSR(pmpaddr2); + frame->pmpaddr3 = RV_READ_CSR(pmpaddr3); + frame->pmpaddr4 = RV_READ_CSR(pmpaddr4); + frame->pmpaddr5 = RV_READ_CSR(pmpaddr5); + frame->pmpaddr6 = RV_READ_CSR(pmpaddr6); + frame->pmpaddr7 = RV_READ_CSR(pmpaddr7); + frame->pmpaddr8 = RV_READ_CSR(pmpaddr8); + frame->pmpaddr9 = RV_READ_CSR(pmpaddr9); + frame->pmpaddr10 = RV_READ_CSR(pmpaddr10); + frame->pmpaddr11 = RV_READ_CSR(pmpaddr11); + frame->pmpaddr12 = RV_READ_CSR(pmpaddr12); + frame->pmpaddr13 = RV_READ_CSR(pmpaddr13); + frame->pmpaddr14 = RV_READ_CSR(pmpaddr14); + frame->pmpaddr15 = RV_READ_CSR(pmpaddr15); + frame->pmpcfg0 = RV_READ_CSR(pmpcfg0); + frame->pmpcfg1 = RV_READ_CSR(pmpcfg1); + frame->pmpcfg2 = RV_READ_CSR(pmpcfg2); + frame->pmpcfg3 = RV_READ_CSR(pmpcfg3); + + frame->pmaaddr0 = RV_READ_CSR(CSR_PMAADDR(0)); + frame->pmaaddr1 = RV_READ_CSR(CSR_PMAADDR(1)); + frame->pmaaddr2 = RV_READ_CSR(CSR_PMAADDR(2)); + frame->pmaaddr3 = RV_READ_CSR(CSR_PMAADDR(3)); + frame->pmaaddr4 = RV_READ_CSR(CSR_PMAADDR(4)); + frame->pmaaddr5 = RV_READ_CSR(CSR_PMAADDR(5)); + frame->pmaaddr6 = RV_READ_CSR(CSR_PMAADDR(6)); + frame->pmaaddr7 = RV_READ_CSR(CSR_PMAADDR(7)); + frame->pmaaddr8 = RV_READ_CSR(CSR_PMAADDR(8)); + frame->pmaaddr9 = RV_READ_CSR(CSR_PMAADDR(9)); + frame->pmaaddr10 = RV_READ_CSR(CSR_PMAADDR(10)); + frame->pmaaddr11 = RV_READ_CSR(CSR_PMAADDR(11)); + frame->pmaaddr12 = RV_READ_CSR(CSR_PMAADDR(12)); + frame->pmaaddr13 = RV_READ_CSR(CSR_PMAADDR(13)); + frame->pmaaddr14 = RV_READ_CSR(CSR_PMAADDR(14)); + frame->pmaaddr15 = RV_READ_CSR(CSR_PMAADDR(15)); + frame->pmacfg0 = RV_READ_CSR(CSR_PMACFG(0)); + frame->pmacfg1 = RV_READ_CSR(CSR_PMACFG(1)); + frame->pmacfg2 = RV_READ_CSR(CSR_PMACFG(2)); + frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(3)); + frame->pmacfg4 = RV_READ_CSR(CSR_PMACFG(4)); + frame->pmacfg5 = RV_READ_CSR(CSR_PMACFG(5)); + frame->pmacfg6 = RV_READ_CSR(CSR_PMACFG(6)); + frame->pmacfg7 = RV_READ_CSR(CSR_PMACFG(7)); + frame->pmacfg8 = RV_READ_CSR(CSR_PMACFG(8)); + frame->pmacfg9 = RV_READ_CSR(CSR_PMACFG(9)); + frame->pmacfg10 = RV_READ_CSR(CSR_PMACFG(10)); + frame->pmacfg11 = RV_READ_CSR(CSR_PMACFG(11)); + frame->pmacfg12 = RV_READ_CSR(CSR_PMACFG(12)); + frame->pmacfg13 = RV_READ_CSR(CSR_PMACFG(13)); + frame->pmacfg14 = RV_READ_CSR(CSR_PMACFG(14)); + frame->pmacfg15 = RV_READ_CSR(CSR_PMACFG(15)); + frame->mcycle = RV_READ_CSR(mcycle); + return frame; +} + +static IRAM_ATTR void rv_core_noncritical_regs_restore(void) +{ + RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame[esp_cpu_get_core_id()]; + + RV_WRITE_CSR(mscratch, frame->mscratch); + RV_WRITE_CSR(misa, frame->misa); + RV_WRITE_CSR(tselect, frame->tselect); + RV_WRITE_CSR(tdata1, frame->tdata1); + RV_WRITE_CSR(tdata2, frame->tdata2); + RV_WRITE_CSR(tcontrol, frame->tcontrol); + RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0); + RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1); + RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2); + RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3); + RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4); + RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5); + RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6); + RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7); + RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8); + RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9); + RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10); + RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11); + RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12); + RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13); + RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14); + RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15); + RV_WRITE_CSR(pmpcfg0, frame->pmpcfg0); + RV_WRITE_CSR(pmpcfg1, frame->pmpcfg1); + RV_WRITE_CSR(pmpcfg2, frame->pmpcfg2); + RV_WRITE_CSR(pmpcfg3, frame->pmpcfg3); + + RV_WRITE_CSR(CSR_PMAADDR(0), frame->pmaaddr0); + RV_WRITE_CSR(CSR_PMAADDR(1), frame->pmaaddr1); + RV_WRITE_CSR(CSR_PMAADDR(2), frame->pmaaddr2); + RV_WRITE_CSR(CSR_PMAADDR(3), frame->pmaaddr3); + RV_WRITE_CSR(CSR_PMAADDR(4), frame->pmaaddr4); + RV_WRITE_CSR(CSR_PMAADDR(5), frame->pmaaddr5); + RV_WRITE_CSR(CSR_PMAADDR(6), frame->pmaaddr6); + RV_WRITE_CSR(CSR_PMAADDR(7), frame->pmaaddr7); + RV_WRITE_CSR(CSR_PMAADDR(8), frame->pmaaddr8); + RV_WRITE_CSR(CSR_PMAADDR(9), frame->pmaaddr9); + RV_WRITE_CSR(CSR_PMAADDR(10),frame->pmaaddr10); + RV_WRITE_CSR(CSR_PMAADDR(11),frame->pmaaddr11); + RV_WRITE_CSR(CSR_PMAADDR(12),frame->pmaaddr12); + RV_WRITE_CSR(CSR_PMAADDR(13),frame->pmaaddr13); + RV_WRITE_CSR(CSR_PMAADDR(14),frame->pmaaddr14); + RV_WRITE_CSR(CSR_PMAADDR(15),frame->pmaaddr15); + RV_WRITE_CSR(CSR_PMACFG(0), frame->pmacfg0); + RV_WRITE_CSR(CSR_PMACFG(1), frame->pmacfg1); + RV_WRITE_CSR(CSR_PMACFG(2), frame->pmacfg2); + RV_WRITE_CSR(CSR_PMACFG(3), frame->pmacfg3); + RV_WRITE_CSR(CSR_PMACFG(4), frame->pmacfg4); + RV_WRITE_CSR(CSR_PMACFG(5), frame->pmacfg5); + RV_WRITE_CSR(CSR_PMACFG(6), frame->pmacfg6); + RV_WRITE_CSR(CSR_PMACFG(7), frame->pmacfg7); + RV_WRITE_CSR(CSR_PMACFG(8), frame->pmacfg8); + RV_WRITE_CSR(CSR_PMACFG(9), frame->pmacfg9); + RV_WRITE_CSR(CSR_PMACFG(10), frame->pmacfg10); + RV_WRITE_CSR(CSR_PMACFG(11), frame->pmacfg11); + RV_WRITE_CSR(CSR_PMACFG(12), frame->pmacfg12); + RV_WRITE_CSR(CSR_PMACFG(13), frame->pmacfg13); + RV_WRITE_CSR(CSR_PMACFG(14), frame->pmacfg14); + RV_WRITE_CSR(CSR_PMACFG(15), frame->pmacfg15); + RV_WRITE_CSR(mcycle, frame->mcycle); +} + +static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame) +{ + assert(frame); + cpu_domain_dev_regs_region_t *region = frame->region; + uint32_t *regs_frame = frame->regs_frame; + + int offset = 0; + for (int i = 0; i < frame->region_num; i++) { + for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) { + regs_frame[offset++] = *(uint32_t *)addr; + } + } +} + +static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame) +{ + assert(frame); + cpu_domain_dev_regs_region_t *region = frame->region; + uint32_t *regs_frame = frame->regs_frame; + + int offset = 0; + for (int i = 0; i < frame->region_num; i++) { + for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) { + *(uint32_t *)addr = regs_frame[offset++]; + } + } +} + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME +static IRAM_ATTR void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr) +{ + *(frame_crc_ptr) = esp_crc32_le(0, (void *)frame_ptr, frame_check_size); +} + +static IRAM_ATTR void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr) +{ + if(*(frame_crc_ptr) != esp_crc32_le(0, (void *)(frame_ptr), frame_check_size)){ + // resume uarts + for (int i = 0; i < SOC_UART_NUM; ++i) { + if (!uart_ll_is_enabled(i)) { + continue; + } + uart_ll_force_xon(i); + } + + /* Since it is still in the critical now, use ESP_EARLY_LOG */ + ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted"); + esp_restart_noos(); + } +} +#endif + +extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void); +extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void); +typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool); + +static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep, + uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) +{ + uint8_t core_id = esp_cpu_get_core_id(); + rv_core_critical_regs_save(); + + RvCoreCriticalSleepFrame * frame = s_cpu_retention.retent.critical_frame[core_id]; + if ((frame->pmufunc & 0x3) == 0x1) { + esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_END, (void *)0); +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */ + update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc)); +#endif + REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore); + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_DONE); + while (atomic_load(&s_smp_retention_state[!core_id]) != SMP_BACKUP_DONE) { + ; + } +#endif + + return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp); + } +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + else { + validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc)); + } +#endif + + return pmu_sleep_finish(); +} + +esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool), + uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) +{ + esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0); + uint32_t mstatus = save_mstatus_and_disable_global_int(); + uint8_t core_id = esp_cpu_get_core_id(); +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START); +#endif + cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]); + cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame); + rv_core_noncritical_regs_save(); + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame[core_id]; + /* Minus sizeof(long) is for bypass `frame_crc` field */ + update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc)); +#endif + + esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp); + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc)); +#endif + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + // Start core1 + if (core_id == 0) { + REG_SET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN); + REG_CLR_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL); + } + + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_START); +#endif + + rv_core_noncritical_regs_restore(); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]); + restore_mstatus(mstatus); + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE); +#endif + return err; +} + +esp_err_t esp_sleep_cpu_retention_init(void) +{ + return esp_sleep_cpu_retention_init_impl(); +} + +esp_err_t esp_sleep_cpu_retention_deinit(void) +{ + return esp_sleep_cpu_retention_deinit_impl(); +} + +bool cpu_domain_pd_allowed(void) +{ + bool allowed = true; + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + allowed &= (s_cpu_retention.retent.critical_frame[core_id] != NULL); + allowed &= (s_cpu_retention.retent.non_critical_frame[core_id] != NULL); + } + allowed &= (s_cpu_retention.retent.cache_config_frame != NULL); + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + allowed &= (s_cpu_retention.retent.clic_frame[core_id] != NULL); + } + return allowed; +} + +esp_err_t sleep_cpu_configure(bool light_sleep_enable) +{ +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP + if (light_sleep_enable) { + ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep."); + } else { + ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory"); + } +#endif + return ESP_OK; +} + +#if !CONFIG_FREERTOS_UNICORE +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP +static TCM_IRAM_ATTR void smp_core_do_retention(void) +{ + uint8_t core_id = esp_cpu_get_core_id(); + + if (core_id == 0) { + WRITE_PERI_REG(HP_SYSTEM_CPU_INT_FROM_CPU_2_REG, 0); + } else { + WRITE_PERI_REG(HP_SYSTEM_CPU_INT_FROM_CPU_3_REG, 0); + } + + // Wait another core start to do retention + bool smp_skip_retention = false; + while (1) { + smp_retention_state_t another_core_state = atomic_load(&s_smp_retention_state[!core_id]); + if (another_core_state == SMP_SKIP_RETENTION) { + // If another core skips the retention, the current core should also have to skip it. + smp_skip_retention = true; + break; + } else if (another_core_state == SMP_BACKUP_START) { + break; + } + } + + if (!smp_skip_retention) { + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START); + rv_core_noncritical_regs_save(); + cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]); + rv_core_critical_regs_save(); + RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id]; + if ((frame_critical->pmufunc & 0x3) == 0x1) { + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_DONE); + // wait another core trigger sleep and wakeup + esp_cpu_wait_for_intr(); + while (1) { + ; + } + } else { + // Start core1 + if (core_id == 0) { + REG_SET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN); + REG_CLR_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL); + } + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_START); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]); + rv_core_noncritical_regs_restore(); + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE); + } + } + // wait another core out sleep + while (atomic_load(&s_smp_retention_state[!core_id]) != SMP_IDLE) { + ; + } + atomic_store(&s_smp_retention_state[core_id], SMP_IDLE); +} + + +IRAM_ATTR void esp_sleep_cpu_skip_retention(void) { + atomic_store(&s_smp_retention_state[esp_cpu_get_core_id()], SMP_SKIP_RETENTION); +} +#endif + +void sleep_smp_cpu_sleep_prepare(void) +{ +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP + while (atomic_load(&s_smp_retention_state[!esp_cpu_get_core_id()]) != SMP_IDLE) { + ; + } + esp_ipc_isr_call((esp_ipc_isr_func_t)smp_core_do_retention, NULL); +#else + esp_ipc_isr_stall_other_cpu(); +#endif +} + +void sleep_smp_cpu_wakeup_prepare(void) +{ +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP + uint8_t core_id = esp_cpu_get_core_id(); + if (atomic_load(&s_smp_retention_state[core_id]) == SMP_RESTORE_DONE) { + while (atomic_load(&s_smp_retention_state[!core_id]) != SMP_RESTORE_DONE) { + ; + } + } + atomic_store(&s_smp_retention_state[core_id], SMP_IDLE); +#else + esp_ipc_isr_release_other_cpu(); +#endif +} +#endif //!CONFIG_FREERTOS_UNICORE diff --git a/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/sleep_cpu_asm.S b/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/sleep_cpu_asm.S new file mode 100644 index 0000000000..ed46453f31 --- /dev/null +++ b/components/esp_hw_support/lowpower/cpu_retention/port/esp32p4/sleep_cpu_asm.S @@ -0,0 +1,268 @@ +/* + * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "soc/soc.h" +#include "rvsleep-frames.h" +#include "freertos/FreeRTOSConfig.h" +#include "sdkconfig.h" + +#include "soc/cache_reg.h" +#define MTVT (0x307) + + .section .tcm.data,"aw" + .global rv_core_critical_regs_frame + .type rv_core_critical_regs_frame,@object + .align 4 +rv_core_critical_regs_frame: + .rept (portNUM_PROCESSORS) + .word 0 + .endr + +/* +-------------------------------------------------------------------------------- + This assembly subroutine is used to save the critical registers of the CPU + core to the internal RAM before sleep, and modify the PMU control flag to + indicate that the system needs to sleep. When the subroutine returns, it + will return the memory pointer that saves the context information of the CPU + critical registers. +-------------------------------------------------------------------------------- +*/ + + .section .tcm.text,"ax" + .global rv_core_critical_regs_save + .type rv_core_critical_regs_save,@function + .align 4 + +rv_core_critical_regs_save: + + /* arrived here in critical section. we need: + save riscv core critical registers to RvCoreCriticalSleepFrame + */ + csrw mscratch, t0 /* use mscratch as temp storage */ + la a0, rv_core_critical_regs_frame + csrr t1, mhartid + slli t1, t1, 2 + add a0, a0, t1 + lw t0, 0(a0) /* t0 pointer to RvCoreCriticalSleepFrame object */ + + sw ra, RV_SLP_CTX_RA(t0) + sw sp, RV_SLP_CTX_SP(t0) + sw gp, RV_SLP_CTX_GP(t0) + sw tp, RV_SLP_CTX_TP(t0) + sw t1, RV_SLP_CTX_T1(t0) + sw t2, RV_SLP_CTX_T2(t0) + sw s0, RV_SLP_CTX_S0(t0) + sw s1, RV_SLP_CTX_S1(t0) + + /* a0 is caller saved, so it does not need to be saved, but it should be the + pointer value of RvCoreCriticalSleepFrame for return. + */ + mv a0, t0 + sw a0, RV_SLP_CTX_A0(t0) + sw a1, RV_SLP_CTX_A1(t0) + sw a2, RV_SLP_CTX_A2(t0) + sw a3, RV_SLP_CTX_A3(t0) + sw a4, RV_SLP_CTX_A4(t0) + sw a5, RV_SLP_CTX_A5(t0) + sw a6, RV_SLP_CTX_A6(t0) + sw a7, RV_SLP_CTX_A7(t0) + sw s2, RV_SLP_CTX_S2(t0) + sw s3, RV_SLP_CTX_S3(t0) + sw s4, RV_SLP_CTX_S4(t0) + sw s5, RV_SLP_CTX_S5(t0) + sw s6, RV_SLP_CTX_S6(t0) + sw s7, RV_SLP_CTX_S7(t0) + sw s8, RV_SLP_CTX_S8(t0) + sw s9, RV_SLP_CTX_S9(t0) + sw s10, RV_SLP_CTX_S10(t0) + sw s11, RV_SLP_CTX_S11(t0) + sw t3, RV_SLP_CTX_T3(t0) + sw t4, RV_SLP_CTX_T4(t0) + sw t5, RV_SLP_CTX_T5(t0) + sw t6, RV_SLP_CTX_T6(t0) + + csrr t1, mstatus + sw t1, RV_SLP_CTX_MSTATUS(t0) + csrr t2, mtvec + sw t2, RV_SLP_CTX_MTVEC(t0) + csrr t3, mcause + sw t3, RV_SLP_CTX_MCAUSE(t0) + csrr t4, MTVT + sw t4, RV_SLP_CTX_MTVT(t0) + csrr t1, mtval + sw t1, RV_SLP_CTX_MTVAL(t0) + csrr t2, mie + sw t2, RV_SLP_CTX_MIE(t0) + csrr t3, mip + sw t3, RV_SLP_CTX_MIP(t0) + csrr t1, mepc + sw t1, RV_SLP_CTX_MEPC(t0) + + /* + !!! Let idf knows it's going to sleep !!! + + RV_SLP_STK_PMUFUNC field is used to identify whether it is going to sleep or + has just been awakened. We use the lowest 2 bits as indication information, + 3 means being awakened, 1 means going to sleep. + */ + li t1, ~0x3 + lw t2, RV_SLP_CTX_PMUFUNC(t0) + and t2, t1, t2 + ori t2, t2, 0x1 + sw t2, RV_SLP_CTX_PMUFUNC(t0) + + mv t3, t0 + csrr t0, mscratch + sw t0, RV_SLP_CTX_T0(t3) + + /* writeback dcache is required here!!! */ + la t0, CACHE_SYNC_MAP_REG + li t1, 0x10 /* map l1 dcache */ + sw t1, 0x0(t0) /* set EXTMEM_CACHE_SYNC_MAP_REG bit 4 */ + la t2, CACHE_SYNC_ADDR_REG + sw zero, 0x0(t2) /* clear EXTMEM_CACHE_SYNC_ADDR_REG */ + la t0, CACHE_SYNC_SIZE_REG + sw zero, 0x0(t0) /* clear EXTMEM_CACHE_SYNC_SIZE_REG */ + + la t1, CACHE_SYNC_CTRL_REG + lw t2, 0x0(t1) + ori t2, t2, 0x4 + sw t2, 0x0(t1) + + li t0, 0x10 /* SYNC_DONE bit */ +wait_sync_done: + lw t2, 0x0(t1) + and t2, t0, t2 + beqz t2, wait_sync_done + + lw t0, RV_SLP_CTX_T0(t3) + lw t1, RV_SLP_CTX_T1(t3) + lw t2, RV_SLP_CTX_T2(t3) + lw t3, RV_SLP_CTX_T3(t3) + + ret + + .size rv_core_critical_regs_save, . - rv_core_critical_regs_save + +/* +-------------------------------------------------------------------------------- + This assembly subroutine is used to restore the CPU core critical register + context before sleep after system wakes up, modify the PMU control + information, and return the critical register context memory object pointer. + After the subroutine returns, continue to restore other modules of the + system. +-------------------------------------------------------------------------------- +*/ + + .section .iram1,"ax" + .global rv_core_critical_regs_restore + .weak rv_core_critical_regs_restore + .type rv_core_critical_regs_restore,@function + .global _rv_core_critical_regs_restore + .type _rv_core_critical_regs_restore,@function + .align 4 + +_rv_core_critical_regs_restore: /* export a strong symbol to jump to here, used + * for a static callback */ + nop + +rv_core_critical_regs_restore: + /* Invalidate L1 Cache by Core 0*/ + csrr t0, mhartid + bnez t0, start_restore +/* Core 0 is wakeup core, Invalidate L1 Cache here */ + /* Invalidate L1 cache is required here!!! */ + la t0, CACHE_SYNC_MAP_REG + li t1, 0x7 /* map l1 i/dcache */ + sw t1, 0x0(t0) /* set EXTMEM_CACHE_SYNC_MAP_REG bit 4 */ + la t2, CACHE_SYNC_ADDR_REG + sw zero, 0x0(t2) /* clear EXTMEM_CACHE_SYNC_ADDR_REG */ + la t0, CACHE_SYNC_SIZE_REG + sw zero, 0x0(t0) /* clear EXTMEM_CACHE_SYNC_SIZE_REG */ + + la t1, CACHE_SYNC_CTRL_REG + lw t2, 0x0(t1) + ori t2, t2, 0x1 + sw t2, 0x0(t1) + + li t0, 0x10 /* SYNC_DONE bit */ +wait_cache_sync_done1: + lw t2, 0x0(t1) + and t2, t0, t2 + beqz t2, wait_cache_sync_done1 + +start_restore: + la t0, rv_core_critical_regs_frame + csrr t1, mhartid + slli t1, t1, 2 + add t0, t0, t1 + lw t0, 0(t0) /* t0 pointer to RvCoreCriticalSleepFrame object */ + beqz t0, .skip_restore /* make sure we do not jump to zero address */ + + /* + !!! Let idf knows it's sleep awake. !!! + + RV_SLP_STK_PMUFUNC field is used to identify whether it is going to sleep or + has just been awakened. We use the lowest 2 bits as indication information, + 3 means being awakened, 1 means going to sleep. + */ + lw t1, RV_SLP_CTX_PMUFUNC(t0) + ori t1, t1, 0x3 + sw t1, RV_SLP_CTX_PMUFUNC(t0) + + lw t2, RV_SLP_CTX_MEPC(t0) + csrw mepc, t2 + lw t3, RV_SLP_CTX_MIP(t0) + csrw mip, t3 + lw t1, RV_SLP_CTX_MIE(t0) + csrw mie, t1 + lw t2, RV_SLP_CTX_MSTATUS(t0) + csrw mstatus, t2 + lw t4, RV_SLP_CTX_MTVT(t0) + csrw MTVT, t4 + lw t3, RV_SLP_CTX_MTVEC(t0) + csrw mtvec, t3 + lw t1, RV_SLP_CTX_MCAUSE(t0) + csrw mcause, t1 + lw t2, RV_SLP_CTX_MTVAL(t0) + csrw mtval, t2 + + lw t6, RV_SLP_CTX_T6(t0) + lw t5, RV_SLP_CTX_T5(t0) + lw t4, RV_SLP_CTX_T4(t0) + lw t3, RV_SLP_CTX_T3(t0) + lw s11, RV_SLP_CTX_S11(t0) + lw s10, RV_SLP_CTX_S10(t0) + lw s9, RV_SLP_CTX_S9(t0) + lw s8, RV_SLP_CTX_S8(t0) + lw s7, RV_SLP_CTX_S7(t0) + lw s6, RV_SLP_CTX_S6(t0) + lw s5, RV_SLP_CTX_S5(t0) + lw s4, RV_SLP_CTX_S4(t0) + lw s3, RV_SLP_CTX_S3(t0) + lw s2, RV_SLP_CTX_S2(t0) + lw a7, RV_SLP_CTX_A7(t0) + lw a6, RV_SLP_CTX_A6(t0) + lw a5, RV_SLP_CTX_A5(t0) + lw a4, RV_SLP_CTX_A4(t0) + lw a3, RV_SLP_CTX_A3(t0) + lw a2, RV_SLP_CTX_A2(t0) + lw a1, RV_SLP_CTX_A1(t0) + lw a0, RV_SLP_CTX_A0(t0) + lw s1, RV_SLP_CTX_S1(t0) + lw s0, RV_SLP_CTX_S0(t0) + lw t2, RV_SLP_CTX_T2(t0) + lw t1, RV_SLP_CTX_T1(t0) + lw tp, RV_SLP_CTX_TP(t0) + lw gp, RV_SLP_CTX_GP(t0) + lw sp, RV_SLP_CTX_SP(t0) + lw ra, RV_SLP_CTX_RA(t0) + lw t0, RV_SLP_CTX_T0(t0) + +.skip_restore: + ret + + .size rv_core_critical_regs_restore, . - rv_core_critical_regs_restore diff --git a/components/esp_hw_support/sleep_modes.c b/components/esp_hw_support/sleep_modes.c index 38a9f34633..17f3beb032 100644 --- a/components/esp_hw_support/sleep_modes.c +++ b/components/esp_hw_support/sleep_modes.c @@ -572,7 +572,7 @@ FORCE_INLINE_ATTR void misc_modules_sleep_prepare(bool deep_sleep) #if CONFIG_GPIO_ESP32_SUPPORT_SWITCH_SLP_PULL gpio_sleep_mode_config_apply(); #endif -#if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && SOC_PM_CPU_RETENTION_BY_RTCCNTL sleep_enable_cpu_retention(); #endif #if REGI2C_ANA_CALI_PD_WORKAROUND @@ -601,7 +601,7 @@ FORCE_INLINE_ATTR void misc_modules_wake_prepare(void) sar_periph_ctrl_power_enable(); #endif -#if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && SOC_PM_CPU_RETENTION_BY_RTCCNTL sleep_disable_cpu_retention(); #endif #if CONFIG_GPIO_ESP32_SUPPORT_SWITCH_SLP_PULL @@ -828,6 +828,9 @@ static esp_err_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags, esp_sleep_mode_t m if (should_skip_sleep) { result = ESP_ERR_SLEEP_REJECT; +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE && SOC_PM_CPU_RETENTION_BY_SW + esp_sleep_cpu_skip_retention(); +#endif } else { #if CONFIG_ESP_SLEEP_DEBUG if (s_sleep_ctx != NULL) { @@ -884,13 +887,17 @@ static esp_err_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags, esp_sleep_mode_t m #endif #if SOC_PMU_SUPPORTED -#if SOC_PM_CPU_RETENTION_BY_SW +#if SOC_PM_CPU_RETENTION_BY_SW && CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP esp_sleep_execute_event_callbacks(SLEEP_EVENT_HW_GOTO_SLEEP, (void *)0); - if (pd_flags & PMU_SLEEP_PD_CPU) { + if (pd_flags & (PMU_SLEEP_PD_CPU | PMU_SLEEP_PD_TOP)) { result = esp_sleep_cpu_retention(pmu_sleep_start, s_config.wakeup_triggers, reject_triggers, config.power.hp_sys.dig_power.mem_dslp, deep_sleep); } else #endif { +#if !CONFIG_FREERTOS_UNICORE && CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && SOC_PM_CPU_RETENTION_BY_SW + // Skip smp retention if CPU power domain power-down is not allowed + esp_sleep_cpu_skip_retention(); +#endif result = call_rtc_sleep_start(reject_triggers, config.power.hp_sys.dig_power.mem_dslp, deep_sleep); } esp_sleep_execute_event_callbacks(SLEEP_EVENT_HW_EXIT_SLEEP, (void *)0); @@ -1179,7 +1186,13 @@ esp_err_t esp_light_sleep_start(void) } #endif +#if !CONFIG_FREERTOS_UNICORE +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && SOC_PM_CPU_RETENTION_BY_SW + sleep_smp_cpu_sleep_prepare(); +#else esp_ipc_isr_stall_other_cpu(); +#endif +#endif #if CONFIG_ESP_SLEEP_CACHE_SAFE_ASSERTION && CONFIG_PM_SLP_IRAM_OPT /* Cache Suspend 0: if CONFIG_PM_SLP_IRAM_OPT is enabled, suspend cache here so that the access to flash @@ -1291,6 +1304,11 @@ esp_err_t esp_light_sleep_start(void) // Enter sleep, then wait for flash to be ready on wakeup err = esp_light_sleep_inner(pd_flags, flash_enable_time_us); } +#if !CONFIG_FREERTOS_UNICORE && CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && SOC_PM_CPU_RETENTION_BY_SW + if (err != ESP_OK) { + esp_sleep_cpu_skip_retention(); + } +#endif // light sleep wakeup flag only makes sense after a successful light sleep s_light_sleep_wakeup = (err == ESP_OK); @@ -1322,7 +1340,14 @@ esp_err_t esp_light_sleep_start(void) resume_cache(); #endif +#if !CONFIG_FREERTOS_UNICORE +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && SOC_PM_CPU_RETENTION_BY_SW + sleep_smp_cpu_wakeup_prepare(); +#else esp_ipc_isr_release_other_cpu(); +#endif +#endif + if (!wdt_was_enabled) { wdt_hal_write_protect_disable(&rtc_wdt_ctx); wdt_hal_disable(&rtc_wdt_ctx); diff --git a/components/esp_pm/Kconfig b/components/esp_pm/Kconfig index bfae695173..5134b7d988 100644 --- a/components/esp_pm/Kconfig +++ b/components/esp_pm/Kconfig @@ -141,7 +141,8 @@ menu "Power Management" config PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP bool "Power down Digital Peripheral in light sleep (EXPERIMENTAL)" - depends on SOC_PAU_SUPPORTED + depends on SOC_PM_SUPPORT_TOP_PD + select PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP default n #TODO: enable by default if periph init/deinit management supported (WIFI-5252) help If enabled, digital peripherals will be powered down in light sleep, it will reduce sleep diff --git a/components/esp_pm/linker.lf b/components/esp_pm/linker.lf index 66493f6852..d4be371ce4 100644 --- a/components/esp_pm/linker.lf +++ b/components/esp_pm/linker.lf @@ -27,9 +27,9 @@ entries: systimer (noflash) if GPIO_ESP32_SUPPORT_SWITCH_SLP_PULL = y: sleep_gpio:gpio_sleep_mode_config_apply (noflash) - if SOC_PM_CPU_RETENTION_BY_RTCCNTL = y && (SOC_PM_SUPPORT_CPU_PD = y || SOC_PM_SUPPORT_TAGMEM_PD = y): + if SOC_PM_CPU_RETENTION_BY_RTCCNTL = y && (PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP = y || SOC_PM_SUPPORT_TAGMEM_PD = y): sleep_cpu:sleep_enable_cpu_retention (noflash) - if SOC_PM_SUPPORT_CPU_PD = y: + if PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP = y: sleep_cpu:cpu_domain_pd_allowed (noflash) if SOC_PM_SUPPORT_TOP_PD = y: sleep_clock:clock_domain_pd_allowed (noflash) diff --git a/components/esp_pm/pm_impl.c b/components/esp_pm/pm_impl.c index ff3dcffcca..62209081c6 100644 --- a/components/esp_pm/pm_impl.c +++ b/components/esp_pm/pm_impl.c @@ -370,7 +370,7 @@ static esp_err_t esp_pm_sleep_configure(const void *vconfig) esp_err_t err = ESP_OK; const esp_pm_config_t* config = (const esp_pm_config_t*) vconfig; -#if SOC_PM_SUPPORT_CPU_PD +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP err = sleep_cpu_configure(config->light_sleep_enable); if (err != ESP_OK) { return err; diff --git a/components/esp_rom/include/esp32p4/rom/rtc.h b/components/esp_rom/include/esp32p4/rom/rtc.h index 61baa33304..52b4f6835c 100644 --- a/components/esp_rom/include/esp32p4/rom/rtc.h +++ b/components/esp_rom/include/esp32p4/rom/rtc.h @@ -72,6 +72,7 @@ extern "C" { * 0 -- light sleep * 1 -- deep sleep */ +#define LIGHT_SLEEP_WAKE_STUB_ADDR_REG LP_SYSTEM_REG_LP_STORE8_REG #define SLEEP_MODE_REG LP_SYSTEM_REG_LP_STORE8_REG typedef enum { diff --git a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in index 113e6e91d8..4da0a19281 100644 --- a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in @@ -347,10 +347,6 @@ config SOC_CPU_HAS_FLEXIBLE_INTC bool default y -config SOC_INT_PLIC_SUPPORTED - bool - default n - config SOC_INT_CLIC_SUPPORTED bool default y @@ -1365,7 +1361,7 @@ config SOC_PM_SUPPORT_DEEPSLEEP_CHECK_STUB_ONLY config SOC_PM_CPU_RETENTION_BY_SW bool - default n + default y config SOC_PM_PAU_LINK_NUM int diff --git a/components/soc/esp32p4/include/soc/cache_reg.h b/components/soc/esp32p4/include/soc/cache_reg.h index df6ad7fdc9..b3cc3cd039 100644 --- a/components/soc/esp32p4/include/soc/cache_reg.h +++ b/components/soc/esp32p4/include/soc/cache_reg.h @@ -5,7 +5,6 @@ */ #pragma once -#include #include "soc/soc.h" #ifdef __cplusplus extern "C" { diff --git a/components/soc/esp32p4/include/soc/soc_caps.h b/components/soc/esp32p4/include/soc/soc_caps.h index dc4d629791..fce0f67512 100644 --- a/components/soc/esp32p4/include/soc/soc_caps.h +++ b/components/soc/esp32p4/include/soc/soc_caps.h @@ -151,7 +151,6 @@ #define SOC_CPU_CORES_NUM (2U) #define SOC_CPU_INTR_NUM 32 #define SOC_CPU_HAS_FLEXIBLE_INTC 1 -#define SOC_INT_PLIC_SUPPORTED 0 //riscv platform-level interrupt controller #define SOC_INT_CLIC_SUPPORTED 1 #define SOC_INT_HW_NESTED_SUPPORTED 1 // Support for hardware interrupts nesting #define SOC_BRANCH_PREDICTOR_SUPPORTED 1 @@ -561,7 +560,6 @@ // TODO: IDF-5351 (Copy from esp32c3, need check) /*-------------------------- Power Management CAPS ----------------------------*/ #define SOC_PM_SUPPORT_WIFI_WAKEUP (1) -// #define SOC_PM_SUPPORT_CPU_PD (1) //TODO: IDF-7528 #define SOC_PM_SUPPORT_XTAL32K_PD (1) #define SOC_PM_SUPPORT_RC32K_PD (1) #define SOC_PM_SUPPORT_RC_FAST_PD (1) @@ -571,7 +569,7 @@ #define SOC_PM_SUPPORT_DEEPSLEEP_CHECK_STUB_ONLY (1) /*!