Merge branch 'feat/rmt_support_esp32c5' into 'master'

Basic RMT driver support on esp32c5

See merge request espressif/esp-idf!29215
pull/13306/head
morris 2024-03-14 11:52:01 +08:00
commit 79d8057a8c
19 zmienionych plików z 1164 dodań i 61 usunięć

Wyświetl plik

@ -86,4 +86,15 @@ menu "Driver Configurations"
endmenu # Legacy Timer Group Driver Configurations endmenu # Legacy Timer Group Driver Configurations
menu "Legacy RMT Driver Configurations"
depends on SOC_RMT_SUPPORTED
config RMT_SUPPRESS_DEPRECATE_WARN
bool "Suppress legacy driver deprecated warning"
default n
help
Wether to suppress the deprecation warnings when using legacy rmt driver (driver/rmt.h).
If you want to continue using the legacy driver, and don't want to see related deprecation warnings,
you can enable this option.
endmenu # Legacy RMT Driver Configurations
endmenu # Driver configurations endmenu # Driver configurations

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -13,6 +13,7 @@
#include "esp_check.h" #include "esp_check.h"
#include "driver/gpio.h" #include "driver/gpio.h"
#include "esp_private/periph_ctrl.h" #include "esp_private/periph_ctrl.h"
#include "esp_private/gpio.h"
#include "driver/rmt_types_legacy.h" #include "driver/rmt_types_legacy.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
@ -537,7 +538,7 @@ esp_err_t rmt_set_gpio(rmt_channel_t channel, rmt_mode_t mode, gpio_num_t gpio_n
ESP_RETURN_ON_FALSE(((GPIO_IS_VALID_GPIO(gpio_num) && (mode == RMT_MODE_RX)) || ESP_RETURN_ON_FALSE(((GPIO_IS_VALID_GPIO(gpio_num) && (mode == RMT_MODE_RX)) ||
(GPIO_IS_VALID_OUTPUT_GPIO(gpio_num) && (mode == RMT_MODE_TX))), ESP_ERR_INVALID_ARG, TAG, RMT_GPIO_ERROR_STR); (GPIO_IS_VALID_OUTPUT_GPIO(gpio_num) && (mode == RMT_MODE_TX))), ESP_ERR_INVALID_ARG, TAG, RMT_GPIO_ERROR_STR);
gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[gpio_num], PIN_FUNC_GPIO); gpio_func_sel(gpio_num, PIN_FUNC_GPIO);
if (mode == RMT_MODE_TX) { if (mode == RMT_MODE_TX) {
ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR); ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
gpio_set_direction(gpio_num, GPIO_MODE_OUTPUT); gpio_set_direction(gpio_num, GPIO_MODE_OUTPUT);

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -18,6 +18,7 @@
#include "ir_tools.h" #include "ir_tools.h"
#include "driver/rmt.h" #include "driver/rmt.h"
#include "soc/rmt_periph.h" #include "soc/rmt_periph.h"
#include "esp_private/gpio.h"
#define RMT_RX_CHANNEL_ENCODING_START (SOC_RMT_CHANNELS_PER_GROUP-SOC_RMT_TX_CANDIDATES_PER_GROUP) #define RMT_RX_CHANNEL_ENCODING_START (SOC_RMT_CHANNELS_PER_GROUP-SOC_RMT_TX_CANDIDATES_PER_GROUP)
#define RMT_TX_CHANNEL_ENCODING_END (SOC_RMT_TX_CANDIDATES_PER_GROUP-1) #define RMT_TX_CHANNEL_ENCODING_END (SOC_RMT_TX_CANDIDATES_PER_GROUP-1)
@ -71,7 +72,7 @@ static void rmt_setup_testbench(int tx_channel, int rx_channel, uint32_t flags)
} }
// Routing internal signals by IO Matrix (bind rmt tx and rx signal on the same GPIO) // Routing internal signals by IO Matrix (bind rmt tx and rx signal on the same GPIO)
gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[RMT_DATA_IO], PIN_FUNC_GPIO); gpio_func_sel(RMT_DATA_IO, PIN_FUNC_GPIO);
TEST_ESP_OK(gpio_set_direction(RMT_DATA_IO, GPIO_MODE_INPUT_OUTPUT)); TEST_ESP_OK(gpio_set_direction(RMT_DATA_IO, GPIO_MODE_INPUT_OUTPUT));
if (tx_channel >= 0) { if (tx_channel >= 0) {
esp_rom_gpio_connect_out_signal(RMT_DATA_IO, rmt_periph_signals.groups[0].channels[tx_channel].tx_sig, 0, 0); esp_rom_gpio_connect_out_signal(RMT_DATA_IO, rmt_periph_signals.groups[0].channels[tx_channel].tx_sig, 0, 0);

Wyświetl plik

@ -18,14 +18,6 @@ menu "ESP-Driver:RMT Configurations"
so that the receive function can be IRAM-safe and able to be called when the flash cache is disabled. so that the receive function can be IRAM-safe and able to be called when the flash cache is disabled.
Enabling this option can improve driver performance as well. Enabling this option can improve driver performance as well.
config RMT_SUPPRESS_DEPRECATE_WARN
bool "Suppress legacy driver deprecated warning"
default n
help
Wether to suppress the deprecation warnings when using legacy rmt driver (driver/rmt.h).
If you want to continue using the legacy driver, and don't want to see related deprecation warnings,
you can enable this option.
config RMT_ENABLE_DEBUG_LOG config RMT_ENABLE_DEBUG_LOG
bool "Enable debug log" bool "Enable debug log"
default n default n

Wyświetl plik

@ -25,6 +25,7 @@
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_private/gdma.h" #include "esp_private/gdma.h"
#include "esp_private/esp_gpio_reserve.h" #include "esp_private/esp_gpio_reserve.h"
#include "esp_private/gpio.h"
#include "driver/rmt_common.h" #include "driver/rmt_common.h"
#ifdef __cplusplus #ifdef __cplusplus
@ -67,6 +68,9 @@ typedef dma_descriptor_align4_t rmt_dma_descriptor_t;
#define RMT_GET_NON_CACHE_ADDR(addr) (addr) #define RMT_GET_NON_CACHE_ADDR(addr) (addr)
#endif #endif
#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define ALIGN_DOWN(num, align) ((num) & ~((align) - 1))
typedef struct { typedef struct {
struct { struct {
rmt_symbol_word_t symbols[SOC_RMT_MEM_WORDS_PER_CHANNEL]; rmt_symbol_word_t symbols[SOC_RMT_MEM_WORDS_PER_CHANNEL];

Wyświetl plik

@ -28,9 +28,6 @@
#include "driver/rmt_rx.h" #include "driver/rmt_rx.h"
#include "rmt_private.h" #include "rmt_private.h"
#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define ALIGN_DOWN(num, align) ((num) & ~((align) - 1))
static const char *TAG = "rmt"; static const char *TAG = "rmt";
static esp_err_t rmt_del_rx_channel(rmt_channel_handle_t channel); static esp_err_t rmt_del_rx_channel(rmt_channel_handle_t channel);
@ -207,10 +204,20 @@ esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
// the alignment should meet both the DMA and cache requirement // the alignment should meet both the DMA and cache requirement
size_t alignment = MAX(data_cache_line_size, RMT_DMA_DESC_ALIGN); size_t alignment = MAX(data_cache_line_size, RMT_DMA_DESC_ALIGN);
rx_channel->dma_nodes = heap_caps_aligned_calloc(alignment, num_dma_nodes, sizeof(rmt_dma_descriptor_t), mem_caps); size_t dma_nodes_size = ALIGN_UP(num_dma_nodes * sizeof(rmt_dma_descriptor_t), alignment);
ESP_GOTO_ON_FALSE(rx_channel->dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel DMA nodes"); rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(alignment, 1, dma_nodes_size, mem_caps);
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel DMA nodes");
rx_channel->dma_nodes = dma_nodes;
// do memory sync only when the data cache exists
if (data_cache_line_size) {
// write back and then invalidate the cached dma_nodes, we will skip the cache (by non-cacheable address) when access the dma_nodes
// even the cache auto-write back happens, there's no risk the dma_nodes will be overwritten
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, dma_nodes_size,
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
err, TAG, "cache sync failed");
}
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity // we will use the non-cached address to manipulate the DMA descriptor, for simplicity
rx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(rx_channel->dma_nodes); rx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
} }
rx_channel->num_dma_nodes = num_dma_nodes; rx_channel->num_dma_nodes = num_dma_nodes;
// register the channel to group // register the channel to group
@ -284,7 +291,7 @@ esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_
esp_rom_gpio_connect_in_signal(config->gpio_num, esp_rom_gpio_connect_in_signal(config->gpio_num,
rmt_periph_signals.groups[group_id].channels[channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP].rx_sig, rmt_periph_signals.groups[group_id].channels[channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP].rx_sig,
config->flags.invert_in); config->flags.invert_in);
gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_num], PIN_FUNC_GPIO); gpio_func_sel(config->gpio_num, PIN_FUNC_GPIO);
// initialize other members of rx channel // initialize other members of rx channel
portMUX_INITIALIZE(&rx_channel->base.spinlock); portMUX_INITIALIZE(&rx_channel->base.spinlock);
@ -351,11 +358,11 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base); rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base);
size_t per_dma_block_size = 0; size_t per_dma_block_size = 0;
size_t last_dma_block_size = 0; size_t last_dma_block_size = 0;
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (channel->dma_chan) { if (channel->dma_chan) {
// Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet. // Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet.
ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not allocated from internal RAM"); ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not allocated from internal RAM");
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
// DMA doesn't have alignment requirement for SRAM buffer if the burst mode is not enabled, // DMA doesn't have alignment requirement for SRAM buffer if the burst mode is not enabled,
// but we need to make sure the buffer is aligned to cache line size // but we need to make sure the buffer is aligned to cache line size
uint32_t align_mask = data_cache_line_size ? (data_cache_line_size - 1) : 0; uint32_t align_mask = data_cache_line_size ? (data_cache_line_size - 1) : 0;
@ -395,6 +402,10 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
if (channel->dma_chan) { if (channel->dma_chan) {
#if SOC_RMT_SUPPORT_DMA #if SOC_RMT_SUPPORT_DMA
// invalidate the user buffer, in case cache auto-write back happens and breaks the data just written by the DMA
if (data_cache_line_size) {
ESP_RETURN_ON_ERROR_ISR(esp_cache_msync(buffer, buffer_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C), TAG, "cache sync failed");
}
rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, per_dma_block_size, last_dma_block_size); rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, per_dma_block_size, last_dma_block_size);
gdma_reset(channel->dma_chan); gdma_reset(channel->dma_chan);
gdma_start(channel->dma_chan, (intptr_t)rx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA gdma_start(channel->dma_chan, (intptr_t)rx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA

Wyświetl plik

@ -23,6 +23,7 @@
#include "hal/gpio_hal.h" #include "hal/gpio_hal.h"
#include "hal/cache_hal.h" #include "hal/cache_hal.h"
#include "hal/cache_ll.h" #include "hal/cache_ll.h"
#include "esp_cache.h"
#include "driver/gpio.h" #include "driver/gpio.h"
#include "driver/rmt_tx.h" #include "driver/rmt_tx.h"
#include "rmt_private.h" #include "rmt_private.h"
@ -54,10 +55,20 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
// the alignment should meet both the DMA and cache requirement // the alignment should meet both the DMA and cache requirement
size_t alignment = MAX(data_cache_line_size, sizeof(rmt_symbol_word_t)); size_t alignment = MAX(data_cache_line_size, sizeof(rmt_symbol_word_t));
rmt_symbol_word_t *dma_mem_base = heap_caps_aligned_calloc(alignment, config->mem_block_symbols, sizeof(rmt_symbol_word_t), size_t dma_mem_base_size = ALIGN_UP(config->mem_block_symbols * sizeof(rmt_symbol_word_t), alignment);
rmt_symbol_word_t *dma_mem_base = heap_caps_aligned_calloc(alignment, 1, dma_mem_base_size,
RMT_MEM_ALLOC_CAPS | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); RMT_MEM_ALLOC_CAPS | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
ESP_RETURN_ON_FALSE(dma_mem_base, ESP_ERR_NO_MEM, TAG, "no mem for tx DMA buffer"); ESP_RETURN_ON_FALSE(dma_mem_base, ESP_ERR_NO_MEM, TAG, "no mem for tx DMA buffer");
tx_channel->dma_mem_base = dma_mem_base; tx_channel->dma_mem_base = dma_mem_base;
// do memory sync only when the data cache exists
if (data_cache_line_size) {
// write back and then invalidate the cache, we will skip the cache (by non-cacheable address) when access the dma_mem_base
// even the cache auto-write back happens, there's no risk the dma_mem_base will be overwritten
ESP_RETURN_ON_ERROR(esp_cache_msync(dma_mem_base, dma_mem_base_size,
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
TAG, "cache sync failed");
}
// we use the non-cached address to manipulate this DMA buffer
tx_channel->dma_mem_base_nc = (rmt_symbol_word_t *)RMT_GET_NON_CACHE_ADDR(dma_mem_base); tx_channel->dma_mem_base_nc = (rmt_symbol_word_t *)RMT_GET_NON_CACHE_ADDR(dma_mem_base);
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) { for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
// each descriptor shares half of the DMA buffer // each descriptor shares half of the DMA buffer
@ -258,10 +269,18 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
// the alignment should meet both the DMA and cache requirement // the alignment should meet both the DMA and cache requirement
size_t alignment = MAX(data_cache_line_size, RMT_DMA_DESC_ALIGN); size_t alignment = MAX(data_cache_line_size, RMT_DMA_DESC_ALIGN);
tx_channel->dma_nodes = heap_caps_aligned_calloc(alignment, RMT_DMA_NODES_PING_PONG, sizeof(rmt_dma_descriptor_t), mem_caps); size_t dma_nodes_mem_size = ALIGN_UP(RMT_DMA_NODES_PING_PONG * sizeof(rmt_dma_descriptor_t), alignment);
ESP_GOTO_ON_FALSE(tx_channel->dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for tx DMA nodes"); rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(alignment, 1, dma_nodes_mem_size, mem_caps);
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for tx DMA nodes");
tx_channel->dma_nodes = dma_nodes;
// write back and then invalidate the cached dma_nodes, we will skip the cache (by non-cacheable address) when access the dma_nodes
if (data_cache_line_size) {
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, dma_nodes_mem_size,
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
err, TAG, "cache sync failed");
}
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity // we will use the non-cached address to manipulate the DMA descriptor, for simplicity
tx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(tx_channel->dma_nodes); tx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
} }
// create transaction queues // create transaction queues
ESP_GOTO_ON_ERROR(rmt_tx_create_trans_queue(tx_channel, config), err, TAG, "install trans queues failed"); ESP_GOTO_ON_ERROR(rmt_tx_create_trans_queue(tx_channel, config), err, TAG, "install trans queues failed");
@ -336,7 +355,7 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
esp_rom_gpio_connect_out_signal(config->gpio_num, esp_rom_gpio_connect_out_signal(config->gpio_num,
rmt_periph_signals.groups[group_id].channels[channel_id + RMT_TX_CHANNEL_OFFSET_IN_GROUP].tx_sig, rmt_periph_signals.groups[group_id].channels[channel_id + RMT_TX_CHANNEL_OFFSET_IN_GROUP].tx_sig,
config->flags.invert_out, false); config->flags.invert_out, false);
gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_num], PIN_FUNC_GPIO); gpio_func_sel(config->gpio_num, PIN_FUNC_GPIO);
tx_channel->base.gpio_num = config->gpio_num; tx_channel->base.gpio_num = config->gpio_num;
portMUX_INITIALIZE(&tx_channel->base.spinlock); portMUX_INITIALIZE(&tx_channel->base.spinlock);

Wyświetl plik

@ -51,8 +51,8 @@ uint64_t esp_gpio_revoke(uint64_t gpio_mask);
* *
* @param gpio_mask Mask of the GPIOs to be checked * @param gpio_mask Mask of the GPIOs to be checked
* @return * @return
* - true Aay of the given GPIO(s) is reserved * - true Any of the given GPIO(s) is reserved
* - false Aay of the given GPIO(s) is not reserved * - false Any of the given GPIO(s) is not reserved
*/ */
bool esp_gpio_is_reserved(uint64_t gpio_mask); bool esp_gpio_is_reserved(uint64_t gpio_mask);

Wyświetl plik

@ -0,0 +1,885 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @note TX and RX channels are index from 0 in the LL driver, i.e. tx_channel = [0,1], rx_channel = [0,1]
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include "hal/misc.h"
#include "hal/assert.h"
#include "hal/rmt_types.h"
#include "soc/rmt_struct.h"
#include "soc/pcr_struct.h"
#ifdef __cplusplus
extern "C" {
#endif
#define RMT_LL_EVENT_TX_DONE(channel) (1 << (channel))
#define RMT_LL_EVENT_TX_THRES(channel) (1 << ((channel) + 8))
#define RMT_LL_EVENT_TX_LOOP_END(channel) (1 << ((channel) + 12))
#define RMT_LL_EVENT_TX_ERROR(channel) (1 << ((channel) + 4))
#define RMT_LL_EVENT_RX_DONE(channel) (1 << ((channel) + 2))
#define RMT_LL_EVENT_RX_THRES(channel) (1 << ((channel) + 10))
#define RMT_LL_EVENT_RX_ERROR(channel) (1 << ((channel) + 6))
#define RMT_LL_EVENT_TX_MASK(channel) (RMT_LL_EVENT_TX_DONE(channel) | RMT_LL_EVENT_TX_THRES(channel) | RMT_LL_EVENT_TX_LOOP_END(channel))
#define RMT_LL_EVENT_RX_MASK(channel) (RMT_LL_EVENT_RX_DONE(channel) | RMT_LL_EVENT_RX_THRES(channel))
#define RMT_LL_MAX_LOOP_COUNT_PER_BATCH 1023
#define RMT_LL_MAX_FILTER_VALUE 255
#define RMT_LL_MAX_IDLE_VALUE 32767
typedef enum {
RMT_LL_MEM_OWNER_SW = 0,
RMT_LL_MEM_OWNER_HW = 1,
} rmt_ll_mem_owner_t;
/**
* @brief Enable the bus clock for RMT module
*
* @param group_id Group ID
* @param enable true to enable, false to disable
*/
static inline void rmt_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.rmt_conf.rmt_clk_en = enable;
}
/**
* @brief Reset the RMT module
*
* @param group_id Group ID
*/
static inline void rmt_ll_reset_register(int group_id)
{
(void)group_id;
PCR.rmt_conf.rmt_rst_en = 1;
PCR.rmt_conf.rmt_rst_en = 0;
}
/**
* @brief Enable clock gate for register and memory
*
* @param dev Peripheral instance address
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_enable_periph_clock(rmt_dev_t *dev, bool enable)
{
dev->sys_conf.clk_en = enable; // register clock gating
dev->sys_conf.mem_clk_force_on = enable; // memory clock gating
}
/**
* @brief Power down memory
*
* @param dev Peripheral instance address
* @param enable True to power down, False to power up
*/
static inline void rmt_ll_power_down_mem(rmt_dev_t *dev, bool enable)
{
dev->sys_conf.mem_force_pu = !enable;
dev->sys_conf.mem_force_pd = enable;
}
/**
* @brief Enable APB accessing RMT memory in nonfifo mode
*
* @param dev Peripheral instance address
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_enable_mem_access_nonfifo(rmt_dev_t *dev, bool enable)
{
dev->sys_conf.apb_fifo_mask = enable;
}
/**
* @brief Set clock source and divider for RMT channel group
*
* @param dev Peripheral instance address
* @param channel not used as clock source is set for all channels
* @param src Clock source
* @param divider_integral Integral part of the divider
* @param divider_denominator Denominator part of the divider
* @param divider_numerator Numerator part of the divider
*/
static inline void rmt_ll_set_group_clock_src(rmt_dev_t *dev, uint32_t channel, rmt_clock_source_t src,
uint32_t divider_integral, uint32_t divider_denominator, uint32_t divider_numerator)
{
// Formula: rmt_sclk = module_clock_src / (1 + div_num + div_a / div_b)
(void)channel; // the source clock is set for all channels
HAL_ASSERT(divider_integral >= 1);
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.rmt_sclk_conf, rmt_sclk_div_num, divider_integral - 1);
PCR.rmt_sclk_conf.rmt_sclk_div_a = divider_numerator;
PCR.rmt_sclk_conf.rmt_sclk_div_b = divider_denominator;
switch (src) {
case RMT_CLK_SRC_PLL_F80M:
PCR.rmt_sclk_conf.rmt_sclk_sel = 2;
break;
case RMT_CLK_SRC_RC_FAST:
PCR.rmt_sclk_conf.rmt_sclk_sel = 1;
break;
case RMT_CLK_SRC_XTAL:
PCR.rmt_sclk_conf.rmt_sclk_sel = 0;
break;
default:
HAL_ASSERT(false);
break;
}
}
/**
* @brief Enable RMT peripheral source clock
*
* @param dev Peripheral instance address
* @param en True to enable, False to disable
*/
static inline void rmt_ll_enable_group_clock(rmt_dev_t *dev, bool en)
{
(void)dev;
PCR.rmt_sclk_conf.rmt_sclk_en = en;
}
////////////////////////////////////////TX Channel Specific/////////////////////////////////////////////////////////////
/**
* @brief Reset clock divider for TX channels by mask
*
* @param dev Peripheral instance address
* @param channel_mask Mask of TX channels
*/
static inline void rmt_ll_tx_reset_channels_clock_div(rmt_dev_t *dev, uint32_t channel_mask)
{
// write 1 to reset
dev->ref_cnt_rst.val |= channel_mask & 0x03;
}
/**
* @brief Set TX channel clock divider
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param div Division value
*/
static inline void rmt_ll_tx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
{
HAL_ASSERT(div >= 1 && div <= 256 && "divider out of range");
// limit the maximum divider to 256
if (div >= 256) {
div = 0; // 0 means 256 division
}
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chnconf0[channel], div_cnt_chn, div);
}
/**
* @brief Reset RMT reading pointer for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
{
dev->chnconf0[channel].mem_rd_rst_chn = 1;
dev->chnconf0[channel].mem_rd_rst_chn = 0;
dev->chnconf0[channel].apb_mem_rst_chn = 1;
dev->chnconf0[channel].apb_mem_rst_chn = 0;
}
/**
* @brief Start transmitting for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_start(rmt_dev_t *dev, uint32_t channel)
{
// update other configuration registers before start transmitting
dev->chnconf0[channel].conf_update_chn = 1;
dev->chnconf0[channel].tx_start_chn = 1;
}
/**
* @brief Stop transmitting for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_stop(rmt_dev_t *dev, uint32_t channel)
{
dev->chnconf0[channel].tx_stop_chn = 1;
// stop won't take place until configurations updated
dev->chnconf0[channel].conf_update_chn = 1;
}
/**
* @brief Set memory block number for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param block_num memory block number
*/
static inline void rmt_ll_tx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
{
dev->chnconf0[channel].mem_size_chn = block_num;
}
/**
* @brief Enable TX wrap
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_tx_enable_wrap(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chnconf0[channel].mem_tx_wrap_en_chn = enable;
}
/**
* @brief Enable transmitting in a loop
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param enable True to enable, False to disable
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_enable_loop(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chnconf0[channel].tx_conti_mode_chn = enable;
}
/**
* @brief Set loop count for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param count TX loop count
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_set_loop_count(rmt_dev_t *dev, uint32_t channel, uint32_t count)
{
HAL_ASSERT(count <= RMT_LL_MAX_LOOP_COUNT_PER_BATCH && "loop count out of range");
dev->chn_tx_lim[channel].tx_loop_num_chn = count;
}
/**
* @brief Reset loop count for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_reset_loop_count(rmt_dev_t *dev, uint32_t channel)
{
dev->chn_tx_lim[channel].loop_count_reset_chn = 1;
dev->chn_tx_lim[channel].loop_count_reset_chn = 0;
}
/**
* @brief Enable loop count for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param enable True to enable, False to disable
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_enable_loop_count(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chn_tx_lim[channel].tx_loop_cnt_en_chn = enable;
}
/**
* @brief Enable loop stop at count value automatically
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param enable True to enable, False to disable
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_enable_loop_autostop(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chn_tx_lim[channel].loop_stop_en_chn = enable;
}
/**
* @brief Enable transmit multiple channels synchronously
*
* @param dev Peripheral instance address
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_tx_enable_sync(rmt_dev_t *dev, bool enable)
{
dev->tx_sim.tx_sim_en = enable;
}
/**
* @brief Clear the TX channels synchronous group
*
* @param dev Peripheral instance address
*/
static inline void rmt_ll_tx_clear_sync_group(rmt_dev_t *dev)
{
dev->tx_sim.val &= ~(0x03);
}
/**
* @brief Add TX channels to the synchronous group
*
* @param dev Peripheral instance address
* @param channel_mask Mask of TX channels to be added to the synchronous group
*/
static inline void rmt_ll_tx_sync_group_add_channels(rmt_dev_t *dev, uint32_t channel_mask)
{
dev->tx_sim.val |= (channel_mask & 0x03);
}
/**
* @brief Remove TX channels from the synchronous group
*
* @param dev Peripheral instance address
* @param channel_mask Mask of TX channels to be removed from the synchronous group
*/
static inline void rmt_ll_tx_sync_group_remove_channels(rmt_dev_t *dev, uint32_t channel_mask)
{
dev->tx_sim.val &= ~channel_mask;
}
/**
* @brief Fix the output level when TX channel is in IDLE state
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param level IDLE level (1 => high, 0 => low)
* @param enable True to fix the IDLE level, otherwise the IDLE level is determined by EOF encoder
*/
__attribute__((always_inline))
static inline void rmt_ll_tx_fix_idle_level(rmt_dev_t *dev, uint32_t channel, uint8_t level, bool enable)
{
dev->chnconf0[channel].idle_out_en_chn = enable;
dev->chnconf0[channel].idle_out_lv_chn = level;
}
/**
* @brief Set the amount of RMT symbols that can trigger the limitation interrupt
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param limit Specify the number of symbols
*/
static inline void rmt_ll_tx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
{
dev->chn_tx_lim[channel].tx_lim_chn = limit;
}
/**
* @brief Set high and low duration of carrier signal
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param high_ticks Duration of high level
* @param low_ticks Duration of low level
*/
static inline void rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
{
HAL_ASSERT(high_ticks >= 1 && high_ticks <= 65536 && low_ticks >= 1 && low_ticks <= 65536 && "out of range high/low ticks");
// ticks=0 means 65536 in hardware
if (high_ticks >= 65536) {
high_ticks = 0;
}
if (low_ticks >= 65536) {
low_ticks = 0;
}
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chncarrier_duty[channel], carrier_high_chn, high_ticks);
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chncarrier_duty[channel], carrier_low_chn, low_ticks);
}
/**
* @brief Enable modulating carrier signal to TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_tx_enable_carrier_modulation(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chnconf0[channel].carrier_en_chn = enable;
}
/**
* @brief Set on high or low to modulate the carrier signal
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param level Which level to modulate on (0=>low level, 1=>high level)
*/
static inline void rmt_ll_tx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
{
dev->chnconf0[channel].carrier_out_lv_chn = level;
}
/**
* @brief Enable to always output carrier signal, regardless of a valid data transmission
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @param enable True to output carrier signal in all RMT state, False to only ouput carrier signal for effective data
*/
static inline void rmt_ll_tx_enable_carrier_always_on(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chnconf0[channel].carrier_eff_en_chn = !enable;
}
////////////////////////////////////////RX Channel Specific/////////////////////////////////////////////////////////////
/**
* @brief Reset clock divider for RX channels by mask
*
* @param dev Peripheral instance address
* @param channel_mask Mask of RX channels
*/
static inline void rmt_ll_rx_reset_channels_clock_div(rmt_dev_t *dev, uint32_t channel_mask)
{
// write 1 to reset
dev->ref_cnt_rst.val |= ((channel_mask & 0x03) << 2);
}
/**
* @brief Set RX channel clock divider
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param div Division value
*/
static inline void rmt_ll_rx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
{
HAL_ASSERT(div >= 1 && div <= 256 && "divider out of range");
// limit the maximum divider to 256
if (div >= 256) {
div = 0; // 0 means 256 division
}
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chmconf[channel].conf0, div_cnt_chm, div);
}
/**
* @brief Reset RMT writing pointer for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
*/
__attribute__((always_inline))
static inline void rmt_ll_rx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
{
dev->chmconf[channel].conf1.mem_wr_rst_chm = 1;
dev->chmconf[channel].conf1.mem_wr_rst_chm = 0;
dev->chmconf[channel].conf1.apb_mem_rst_chm = 1;
dev->chmconf[channel].conf1.apb_mem_rst_chm = 0;
}
/**
* @brief Enable receiving for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param enable True to enable, False to disable
*/
__attribute__((always_inline))
static inline void rmt_ll_rx_enable(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chmconf[channel].conf1.rx_en_chm = enable;
// rx won't be enabled until configurations updated
dev->chmconf[channel].conf1.conf_update_chm = 1;
}
/**
* @brief Set memory block number for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param block_num memory block number
*/
static inline void rmt_ll_rx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
{
dev->chmconf[channel].conf0.mem_size_chm = block_num;
}
/**
* @brief Set the time length for RX channel before going into IDLE state
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param thres Time length threshold
*/
__attribute__((always_inline))
static inline void rmt_ll_rx_set_idle_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
{
dev->chmconf[channel].conf0.idle_thres_chm = thres;
}
/**
* @brief Set RMT memory owner for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param owner Memory owner
*/
__attribute__((always_inline))
static inline void rmt_ll_rx_set_mem_owner(rmt_dev_t *dev, uint32_t channel, rmt_ll_mem_owner_t owner)
{
dev->chmconf[channel].conf1.mem_owner_chm = owner;
}
/**
* @brief Enable filter for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX chanenl number
* @param enable True to enable, False to disable
*/
__attribute__((always_inline))
static inline void rmt_ll_rx_enable_filter(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chmconf[channel].conf1.rx_filter_en_chm = enable;
}
/**
* @brief Set RX channel filter threshold (i.e. the maximum width of one pulse signal that would be treated as a noise)
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param thres Filter threshold
*/
__attribute__((always_inline))
static inline void rmt_ll_rx_set_filter_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
{
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chmconf[channel].conf1, rx_filter_thres_chm, thres);
}
/**
* @brief Get RMT memory write cursor offset
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @return writer offset
*/
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_memory_writer_offset(rmt_dev_t *dev, uint32_t channel)
{
return dev->chmstatus[channel].mem_waddr_ex_chm - (channel + 2) * 48;
}
/**
* @brief Set the amount of RMT symbols that can trigger the limitation interrupt
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param limit Specify the number of symbols
*/
static inline void rmt_ll_rx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
{
dev->chm_rx_lim[channel].rx_lim_chm = limit;
}
/**
* @brief Set high and low duration of carrier signal
*
* @param dev dev Peripheral instance address
* @param channel RMT TX channel number
* @param high_ticks Duration of high level
* @param low_ticks Duration of low level
*/
static inline void rmt_ll_rx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
{
HAL_ASSERT(high_ticks >= 1 && high_ticks <= 65536 && low_ticks >= 1 && low_ticks <= 65536 && "out of range high/low ticks");
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chm_rx_carrier_rm[channel], carrier_high_thres_chm, high_ticks - 1);
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chm_rx_carrier_rm[channel], carrier_low_thres_chm, low_ticks - 1);
}
/**
* @brief Enable demodulating the carrier on RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_rx_enable_carrier_demodulation(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chmconf[channel].conf0.carrier_en_chm = enable;
}
/**
* @brief Set on high or low to demodulate the carrier signal
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param level Which level to demodulate (0=>low level, 1=>high level)
*/
static inline void rmt_ll_rx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
{
dev->chmconf[channel].conf0.carrier_out_lv_chm = level;
}
/**
* @brief Enable RX wrap
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @param enable True to enable, False to disable
*/
static inline void rmt_ll_rx_enable_wrap(rmt_dev_t *dev, uint32_t channel, bool enable)
{
dev->chmconf[channel].conf1.mem_rx_wrap_en_chm = enable;
}
//////////////////////////////////////////Interrupt Specific////////////////////////////////////////////////////////////
/**
* @brief Enable RMT interrupt for specific event mask
*
* @param dev Peripheral instance address
* @param mask Event mask
* @param enable True to enable, False to disable
*/
__attribute__((always_inline))
static inline void rmt_ll_enable_interrupt(rmt_dev_t *dev, uint32_t mask, bool enable)
{
if (enable) {
dev->int_ena.val |= mask;
} else {
dev->int_ena.val &= ~mask;
}
}
/**
* @brief Clear RMT interrupt status by mask
*
* @param dev Peripheral instance address
* @param mask Interupt status mask
*/
__attribute__((always_inline))
static inline void rmt_ll_clear_interrupt_status(rmt_dev_t *dev, uint32_t mask)
{
dev->int_clr.val = mask;
}
/**
* @brief Get interrupt status register address
*
* @param dev Peripheral instance address
* @return Register address
*/
static inline volatile void *rmt_ll_get_interrupt_status_reg(rmt_dev_t *dev)
{
return &dev->int_st;
}
/**
* @brief Get interrupt status for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @return Interrupt status
*/
__attribute__((always_inline))
static inline uint32_t rmt_ll_tx_get_interrupt_status(rmt_dev_t *dev, uint32_t channel)
{
return dev->int_st.val & RMT_LL_EVENT_TX_MASK(channel);
}
/**
* @brief Get interrupt raw status for TX channel
*
* @param dev Peripheral instance address
* @param channel RMT TX channel number
* @return Interrupt raw status
*/
static inline uint32_t rmt_ll_tx_get_interrupt_status_raw(rmt_dev_t *dev, uint32_t channel)
{
return dev->int_raw.val & (RMT_LL_EVENT_TX_MASK(channel) | RMT_LL_EVENT_TX_ERROR(channel));
}
/**
* @brief Get interrupt raw status for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @return Interrupt raw status
*/
static inline uint32_t rmt_ll_rx_get_interrupt_status_raw(rmt_dev_t *dev, uint32_t channel)
{
return dev->int_raw.val & (RMT_LL_EVENT_RX_MASK(channel) | RMT_LL_EVENT_RX_ERROR(channel));
}
/**
* @brief Get interrupt status for RX channel
*
* @param dev Peripheral instance address
* @param channel RMT RX channel number
* @return Interrupt status
*/
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_interrupt_status(rmt_dev_t *dev, uint32_t channel)
{
return dev->int_st.val & RMT_LL_EVENT_RX_MASK(channel);
}
//////////////////////////////////////////Deprecated Functions//////////////////////////////////////////////////////////
/////////////////////////////The following functions are only used by the legacy driver/////////////////////////////////
/////////////////////////////They might be removed in the next major release (ESP-IDF 6.0)//////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__attribute__((always_inline))
static inline uint32_t rmt_ll_tx_get_status_word(rmt_dev_t *dev, uint32_t channel)
{
return dev->chnstatus[channel].val;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_status_word(rmt_dev_t *dev, uint32_t channel)
{
return dev->chmstatus[channel].val;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_tx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
{
uint32_t div = HAL_FORCE_READ_U32_REG_FIELD(dev->chnconf0[channel], div_cnt_chn);
return div == 0 ? 256 : div;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
{
uint32_t div = HAL_FORCE_READ_U32_REG_FIELD(dev->chmconf[channel].conf0, div_cnt_chm);
return div == 0 ? 256 : div;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_idle_thres(rmt_dev_t *dev, uint32_t channel)
{
return dev->chmconf[channel].conf0.idle_thres_chm;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_tx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
{
return dev->chnconf0[channel].mem_size_chn;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
{
return dev->chmconf[channel].conf0.mem_size_chm;
}
__attribute__((always_inline))
static inline bool rmt_ll_tx_is_loop_enabled(rmt_dev_t *dev, uint32_t channel)
{
return dev->chnconf0[channel].tx_conti_mode_chn;
}
__attribute__((always_inline))
static inline rmt_clock_source_t rmt_ll_get_group_clock_src(rmt_dev_t *dev, uint32_t channel)
{
rmt_clock_source_t clk_src = RMT_CLK_SRC_PLL_F80M;
switch (PCR.rmt_sclk_conf.rmt_sclk_sel) {
case 2:
clk_src = RMT_CLK_SRC_PLL_F80M;
break;
case 1:
clk_src = RMT_CLK_SRC_RC_FAST;
break;
case 0:
clk_src = RMT_CLK_SRC_XTAL;
break;
}
return clk_src;
}
__attribute__((always_inline))
static inline bool rmt_ll_tx_is_idle_enabled(rmt_dev_t *dev, uint32_t channel)
{
return dev->chnconf0[channel].idle_out_en_chn;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_tx_get_idle_level(rmt_dev_t *dev, uint32_t channel)
{
return dev->chnconf0[channel].idle_out_lv_chn;
}
static inline bool rmt_ll_is_mem_powered_down(rmt_dev_t *dev)
{
// the RTC domain can also power down RMT memory
// so it's probably not enough to detect whether it's powered down or not
// mem_force_pd has higher priority than mem_force_pu
return (dev->sys_conf.mem_force_pd) || !(dev->sys_conf.mem_force_pu);
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_mem_owner(rmt_dev_t *dev, uint32_t channel)
{
return dev->chmconf[channel].conf1.mem_owner_chm;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_rx_get_limit(rmt_dev_t *dev, uint32_t channel)
{
return dev->chm_rx_lim[channel].rx_lim_chm;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_tx_end_interrupt_status(rmt_dev_t *dev)
{
return dev->int_st.val & 0x03;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_rx_end_interrupt_status(rmt_dev_t *dev)
{
return (dev->int_st.val >> 2) & 0x03;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_tx_err_interrupt_status(rmt_dev_t *dev)
{
return (dev->int_st.val >> 4) & 0x03;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_rx_err_interrupt_status(rmt_dev_t *dev)
{
return (dev->int_st.val >> 6) & 0x03;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t *dev)
{
return (dev->int_st.val >> 8) & 0x03;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_rx_thres_interrupt_status(rmt_dev_t *dev)
{
return (dev->int_st.val >> 10) & 0x03;
}
__attribute__((always_inline))
static inline uint32_t rmt_ll_get_tx_loop_interrupt_status(rmt_dev_t *dev)
{
return (dev->int_st.val >> 12) & 0x03;
}
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -39,6 +39,10 @@ config SOC_RTC_MEM_SUPPORTED
bool bool
default y default y
config SOC_RMT_SUPPORTED
bool
default y
config SOC_GPSPI_SUPPORTED config SOC_GPSPI_SUPPORTED
bool bool
default y default y
@ -271,6 +275,58 @@ config SOC_MMU_DI_VADDR_SHARED
bool bool
default y default y
config SOC_RMT_GROUPS
int
default 1
config SOC_RMT_TX_CANDIDATES_PER_GROUP
int
default 2
config SOC_RMT_RX_CANDIDATES_PER_GROUP
int
default 2
config SOC_RMT_CHANNELS_PER_GROUP
int
default 4
config SOC_RMT_MEM_WORDS_PER_CHANNEL
int
default 48
config SOC_RMT_SUPPORT_RX_PINGPONG
bool
default y
config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
bool
default y
config SOC_RMT_SUPPORT_TX_LOOP_COUNT
bool
default y
config SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
bool
default y
config SOC_RMT_SUPPORT_TX_SYNCHRO
bool
default y
config SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY
bool
default y
config SOC_RMT_SUPPORT_XTAL
bool
default y
config SOC_RSA_MAX_BIT_LEN config SOC_RSA_MAX_BIT_LEN
int int
default 3072 default 3072

Wyświetl plik

@ -193,12 +193,12 @@ typedef enum {
/** /**
* @brief Array initializer for all supported clock sources of RMT * @brief Array initializer for all supported clock sources of RMT
*/ */
#define SOC_RMT_CLKS {SOC_MOD_CLK_PLL_F80M, SOC_MOD_CLK_RC_FAST, SOC_MOD_CLK_XTAL} #define SOC_RMT_CLKS {SOC_MOD_CLK_PLL_F80M,/* SOC_MOD_CLK_RC_FAST,*/ SOC_MOD_CLK_XTAL}
/** /**
* @brief Type of RMT clock source * @brief Type of RMT clock source
*/ */
typedef enum { // TODO: [ESP32C5] IDF-8726 (inherit from C6) typedef enum {
RMT_CLK_SRC_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< Select PLL_F80M as the source clock */ RMT_CLK_SRC_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< Select PLL_F80M as the source clock */
RMT_CLK_SRC_RC_FAST = SOC_MOD_CLK_RC_FAST, /*!< Select RC_FAST as the source clock */ RMT_CLK_SRC_RC_FAST = SOC_MOD_CLK_RC_FAST, /*!< Select RC_FAST as the source clock */
RMT_CLK_SRC_XTAL = SOC_MOD_CLK_XTAL, /*!< Select XTAL as the source clock */ RMT_CLK_SRC_XTAL = SOC_MOD_CLK_XTAL, /*!< Select XTAL as the source clock */
@ -208,7 +208,7 @@ typedef enum { // TODO: [ESP32C5] IDF-8726 (inherit from C6)
/** /**
* @brief Type of RMT clock source, reserved for the legacy RMT driver * @brief Type of RMT clock source, reserved for the legacy RMT driver
*/ */
typedef enum { // TODO: [ESP32C5] IDF-8726 (inherit from C6) typedef enum {
RMT_BASECLK_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock is PLL_F80M */ RMT_BASECLK_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock is PLL_F80M */
RMT_BASECLK_XTAL = SOC_MOD_CLK_XTAL, /*!< RMT source clock is XTAL */ RMT_BASECLK_XTAL = SOC_MOD_CLK_XTAL, /*!< RMT source clock is XTAL */
RMT_BASECLK_DEFAULT = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock default choice is PLL_F80M */ RMT_BASECLK_DEFAULT = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock default choice is PLL_F80M */

Wyświetl plik

@ -39,7 +39,7 @@
#define SOC_RTC_FAST_MEM_SUPPORTED 1 #define SOC_RTC_FAST_MEM_SUPPORTED 1
#define SOC_RTC_MEM_SUPPORTED 1 #define SOC_RTC_MEM_SUPPORTED 1
// #define SOC_I2S_SUPPORTED 1 // TODO: [ESP32C5] IDF-8713, IDF-8714 // #define SOC_I2S_SUPPORTED 1 // TODO: [ESP32C5] IDF-8713, IDF-8714
// #define SOC_RMT_SUPPORTED 1 // TODO: [ESP32C5] IDF-8726 #define SOC_RMT_SUPPORTED 1
// #define SOC_SDM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8687 // #define SOC_SDM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8687
#define SOC_GPSPI_SUPPORTED 1 #define SOC_GPSPI_SUPPORTED 1
// #define SOC_LEDC_SUPPORTED 1 // TODO: [ESP32C5] IDF-8684 // #define SOC_LEDC_SUPPORTED 1 // TODO: [ESP32C5] IDF-8684
@ -246,7 +246,6 @@
#define SOC_I2C_SLAVE_CAN_GET_STRETCH_CAUSE (1) #define SOC_I2C_SLAVE_CAN_GET_STRETCH_CAUSE (1)
#define SOC_I2C_SLAVE_SUPPORT_I2CRAM_ACCESS (1) #define SOC_I2C_SLAVE_SUPPORT_I2CRAM_ACCESS (1)
/*-------------------------- LP_I2C CAPS -------------------------------------*/ /*-------------------------- LP_I2C CAPS -------------------------------------*/
// ESP32-C5 has 1 LP_I2C // ESP32-C5 has 1 LP_I2C
// #define SOC_LP_I2C_NUM (1U) // #define SOC_LP_I2C_NUM (1U)
@ -297,19 +296,19 @@
// #define SOC_PCNT_SUPPORT_RUNTIME_THRES_UPDATE 1 // #define SOC_PCNT_SUPPORT_RUNTIME_THRES_UPDATE 1
/*--------------------------- RMT CAPS ---------------------------------------*/ /*--------------------------- RMT CAPS ---------------------------------------*/
// #define SOC_RMT_GROUPS 1U /*!< One RMT group */ #define SOC_RMT_GROUPS 1U /*!< One RMT group */
// #define SOC_RMT_TX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Transmit */ #define SOC_RMT_TX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Transmit */
// #define SOC_RMT_RX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Receive */ #define SOC_RMT_RX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Receive */
// #define SOC_RMT_CHANNELS_PER_GROUP 4 /*!< Total 4 channels */ #define SOC_RMT_CHANNELS_PER_GROUP 4 /*!< Total 4 channels */
// #define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */ #define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
// #define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */ #define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
// #define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */ #define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
// #define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */ #define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
// #define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */ #define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
// #define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */ #define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
// #define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */ #define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */
// #define SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY 1 /*!< TX carrier can be modulated to data phase only */ #define SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY 1 /*!< TX carrier can be modulated to data phase only */
// #define SOC_RMT_SUPPORT_XTAL 1 /*!< Support set XTAL clock as the RMT clock source */ #define SOC_RMT_SUPPORT_XTAL 1 /*!< Support set XTAL clock as the RMT clock source */
// #define SOC_RMT_SUPPORT_RC_FAST 1 /*!< Support set RC_FAST as the RMT clock source */ // #define SOC_RMT_SUPPORT_RC_FAST 1 /*!< Support set RC_FAST as the RMT clock source */
/*-------------------------- MCPWM CAPS --------------------------------------*/ /*-------------------------- MCPWM CAPS --------------------------------------*/

Wyświetl plik

@ -12,6 +12,7 @@ PROVIDE ( SPIMEM1 = 0x60003000 );
PROVIDE ( I2C0 = 0x60004000 ); PROVIDE ( I2C0 = 0x60004000 );
PROVIDE ( UHCI0 = 0x60005000 ); PROVIDE ( UHCI0 = 0x60005000 );
PROVIDE ( RMT = 0x60006000 ); PROVIDE ( RMT = 0x60006000 );
PROVIDE ( RMTMEM = 0x60006400 );
PROVIDE ( LEDC = 0x60007000 ); PROVIDE ( LEDC = 0x60007000 );
PROVIDE ( TIMERG0 = 0x60008000 ); PROVIDE ( TIMERG0 = 0x60008000 );
PROVIDE ( TIMERG1 = 0x60009000 ); PROVIDE ( TIMERG1 = 0x60009000 );

Wyświetl plik

@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/rmt_periph.h"
#include "soc/gpio_sig_map.h"
const rmt_signal_conn_t rmt_periph_signals = {
.groups = {
[0] = {
.module = PERIPH_RMT_MODULE,
.irq = ETS_RMT_INTR_SOURCE,
.channels = {
[0] = {
.tx_sig = RMT_SIG_OUT0_IDX,
.rx_sig = -1
},
[1] = {
.tx_sig = RMT_SIG_OUT1_IDX,
.rx_sig = -1
},
[2] = {
.tx_sig = -1,
.rx_sig = RMT_SIG_IN0_IDX
},
[3] = {
.tx_sig = -1,
.rx_sig = RMT_SIG_IN1_IDX
},
}
}
}
};

Wyświetl plik

@ -151,6 +151,58 @@ config SOC_MMU_DI_VADDR_SHARED
bool bool
default y default y
config SOC_RMT_GROUPS
int
default 1
config SOC_RMT_TX_CANDIDATES_PER_GROUP
int
default 2
config SOC_RMT_RX_CANDIDATES_PER_GROUP
int
default 2
config SOC_RMT_CHANNELS_PER_GROUP
int
default 4
config SOC_RMT_MEM_WORDS_PER_CHANNEL
int
default 48
config SOC_RMT_SUPPORT_RX_PINGPONG
bool
default y
config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
bool
default y
config SOC_RMT_SUPPORT_TX_LOOP_COUNT
bool
default y
config SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
bool
default y
config SOC_RMT_SUPPORT_TX_SYNCHRO
bool
default y
config SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY
bool
default y
config SOC_RMT_SUPPORT_XTAL
bool
default y
config SOC_RSA_MAX_BIT_LEN config SOC_RSA_MAX_BIT_LEN
int int
default 3072 default 3072

Wyświetl plik

@ -194,12 +194,12 @@ typedef enum {
/** /**
* @brief Array initializer for all supported clock sources of RMT * @brief Array initializer for all supported clock sources of RMT
*/ */
#define SOC_RMT_CLKS {SOC_MOD_CLK_PLL_F80M, SOC_MOD_CLK_RC_FAST, SOC_MOD_CLK_XTAL} #define SOC_RMT_CLKS {/*SOC_MOD_CLK_PLL_F80M, SOC_MOD_CLK_RC_FAST,*/ SOC_MOD_CLK_XTAL}
/** /**
* @brief Type of RMT clock source * @brief Type of RMT clock source
*/ */
typedef enum { // TODO: [ESP32C5] IDF-8726 (inherit from C6) typedef enum {
RMT_CLK_SRC_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< Select PLL_F80M as the source clock */ RMT_CLK_SRC_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< Select PLL_F80M as the source clock */
RMT_CLK_SRC_RC_FAST = SOC_MOD_CLK_RC_FAST, /*!< Select RC_FAST as the source clock */ RMT_CLK_SRC_RC_FAST = SOC_MOD_CLK_RC_FAST, /*!< Select RC_FAST as the source clock */
RMT_CLK_SRC_XTAL = SOC_MOD_CLK_XTAL, /*!< Select XTAL as the source clock */ RMT_CLK_SRC_XTAL = SOC_MOD_CLK_XTAL, /*!< Select XTAL as the source clock */
@ -209,7 +209,7 @@ typedef enum { // TODO: [ESP32C5] IDF-8726 (inherit from C6)
/** /**
* @brief Type of RMT clock source, reserved for the legacy RMT driver * @brief Type of RMT clock source, reserved for the legacy RMT driver
*/ */
typedef enum { // TODO: [ESP32C5] IDF-8726 (inherit from C6) typedef enum {
RMT_BASECLK_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock is PLL_F80M */ RMT_BASECLK_PLL_F80M = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock is PLL_F80M */
RMT_BASECLK_XTAL = SOC_MOD_CLK_XTAL, /*!< RMT source clock is XTAL */ RMT_BASECLK_XTAL = SOC_MOD_CLK_XTAL, /*!< RMT source clock is XTAL */
RMT_BASECLK_DEFAULT = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock default choice is PLL_F80M */ RMT_BASECLK_DEFAULT = SOC_MOD_CLK_PLL_F80M, /*!< RMT source clock default choice is PLL_F80M */

Wyświetl plik

@ -293,19 +293,19 @@
// #define SOC_PCNT_SUPPORT_RUNTIME_THRES_UPDATE 1 // #define SOC_PCNT_SUPPORT_RUNTIME_THRES_UPDATE 1
/*--------------------------- RMT CAPS ---------------------------------------*/ /*--------------------------- RMT CAPS ---------------------------------------*/
// #define SOC_RMT_GROUPS 1U /*!< One RMT group */ #define SOC_RMT_GROUPS 1U /*!< One RMT group */
// #define SOC_RMT_TX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Transmit */ #define SOC_RMT_TX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Transmit */
// #define SOC_RMT_RX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Receive */ #define SOC_RMT_RX_CANDIDATES_PER_GROUP 2 /*!< Number of channels that capable of Receive */
// #define SOC_RMT_CHANNELS_PER_GROUP 4 /*!< Total 4 channels */ #define SOC_RMT_CHANNELS_PER_GROUP 4 /*!< Total 4 channels */
// #define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */ #define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
// #define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */ #define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
// #define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */ #define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
// #define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */ #define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
// #define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */ #define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
// #define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */ #define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
// #define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */ #define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */
// #define SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY 1 /*!< TX carrier can be modulated to data phase only */ #define SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY 1 /*!< TX carrier can be modulated to data phase only */
// #define SOC_RMT_SUPPORT_XTAL 1 /*!< Support set XTAL clock as the RMT clock source */ #define SOC_RMT_SUPPORT_XTAL 1 /*!< Support set XTAL clock as the RMT clock source */
// #define SOC_RMT_SUPPORT_RC_FAST 1 /*!< Support set RC_FAST as the RMT clock source */ // #define SOC_RMT_SUPPORT_RC_FAST 1 /*!< Support set RC_FAST as the RMT clock source */
/*-------------------------- MCPWM CAPS --------------------------------------*/ /*-------------------------- MCPWM CAPS --------------------------------------*/

Wyświetl plik

@ -11,6 +11,7 @@ PROVIDE ( SPIMEM1 = 0x60003000 );
PROVIDE ( I2C = 0x60004000 ); PROVIDE ( I2C = 0x60004000 );
PROVIDE ( UHCI = 0x60005000 ); PROVIDE ( UHCI = 0x60005000 );
PROVIDE ( RMT = 0x60006000 ); PROVIDE ( RMT = 0x60006000 );
PROVIDE ( RMTMEM = 0x60006400 );
PROVIDE ( LEDC = 0x60007000 ); PROVIDE ( LEDC = 0x60007000 );
PROVIDE ( TIMERG0 = 0x60008000 ); PROVIDE ( TIMERG0 = 0x60008000 );
PROVIDE ( TIMERG1 = 0x60009000 ); PROVIDE ( TIMERG1 = 0x60009000 );

Wyświetl plik

@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/rmt_periph.h"
#include "soc/gpio_sig_map.h"
const rmt_signal_conn_t rmt_periph_signals = {
.groups = {
[0] = {
.module = PERIPH_RMT_MODULE,
.irq = ETS_RMT_INTR_SOURCE,
.channels = {
[0] = {
.tx_sig = RMT_SIG_OUT0_IDX,
.rx_sig = -1
},
[1] = {
.tx_sig = RMT_SIG_OUT1_IDX,
.rx_sig = -1
},
[2] = {
.tx_sig = -1,
.rx_sig = RMT_SIG_IN0_IDX
},
[3] = {
.tx_sig = -1,
.rx_sig = RMT_SIG_IN1_IDX
},
}
}
}
};