feat(dw_gdma): channel allocator driver

pull/12852/head
morris 2023-11-30 11:15:34 +08:00
rodzic 558392b998
commit bf00021b37
16 zmienionych plików z 2388 dodań i 198 usunięć

Wyświetl plik

@ -82,6 +82,10 @@ if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "dma/gdma_etm.c")
endif()
if(CONFIG_SOC_DW_GDMA_SUPPORTED)
list(APPEND srcs "dma/dw_gdma.c")
endif()
if(CONFIG_SOC_SYSTIMER_SUPPORTED)
list(APPEND srcs "port/${target}/systimer.c")
endif()

Wyświetl plik

@ -232,31 +232,7 @@ menu "Hardware Settings"
Note that, this option only controls the ETM related driver log, won't affect other drivers.
endmenu # ETM Configuration
menu "GDMA Configuration"
depends on SOC_GDMA_SUPPORTED
config GDMA_CTRL_FUNC_IN_IRAM
bool "Place GDMA control functions into IRAM"
default n
help
Place GDMA control functions (like start/stop/append/reset) into IRAM,
so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context.
Enabling this option can improve driver performance as well.
config GDMA_ISR_IRAM_SAFE
bool "GDMA ISR IRAM-Safe"
default n
help
This will ensure the GDMA interrupt handler is IRAM-Safe, allow to avoid flash
cache misses, and also be able to run whilst the cache is disabled.
(e.g. SPI Flash write).
config GDMA_ENABLE_DEBUG_LOG
bool "Enable debug log"
default n
help
Wether to enable the debug log message for GDMA driver.
Note that, this option only controls the GDMA driver log, won't affect other drivers.
endmenu # GDMA Configuration
rsource "./dma/Kconfig.dma"
menu "Main XTAL Config"
choice XTAL_FREQ_SEL

Wyświetl plik

@ -75,3 +75,13 @@ classDiagram
class gptimer_etm_task_t {
}
```
## DMA Service
With the increasing demand, the hardware design of DMA is changing along the way. At first, each peripheral has a dedicated DMA controller. Later, a centralized DMA controller is introduced, which is called `GDMA` in the software.
There may be multiple GDMA instances on a chip, some is attached to the AHB bus and some is attached to the AXI bus. But their functionalities are almost the same.
Some high-performance peripherals, such as MIPI, require DMA to provide more functions, such as hardware handshake mechanism, address growth mode, out-of-order transmission and so on. Therefore, a new DMA controller, called `DW_GDMA` was born. The prefix *DW* is taken from *DesignWare*.
Please note that the specific DMA controller to be used for peripherals is determined by the specific chip. It is possible that, on chip A, SPI works with AHB GDMA, while on chip B, SPI works with AXI GDMA.

Wyświetl plik

@ -0,0 +1,57 @@
menu "GDMA Configurations"
depends on SOC_GDMA_SUPPORTED
config GDMA_CTRL_FUNC_IN_IRAM
bool "Place GDMA control functions in IRAM"
default n
help
Place GDMA control functions (like start/stop/append/reset) into IRAM,
so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context.
config GDMA_ISR_IRAM_SAFE
bool "GDMA ISR IRAM-Safe"
default n
help
This will ensure the GDMA interrupt handler is IRAM-Safe, allow to avoid flash
cache misses, and also be able to run whilst the cache is disabled.
(e.g. SPI Flash write).
config GDMA_ENABLE_DEBUG_LOG
bool "Enable debug log"
default n
help
Wether to enable the debug log message for GDMA driver.
Note that, this option only controls the GDMA driver log, won't affect other drivers.
endmenu # GDMA Configurations
menu "DW_GDMA Configurations"
depends on SOC_DW_GDMA_SUPPORTED
config DW_GDMA_CTRL_FUNC_IN_IRAM
bool
default n
help
Place DW_GDMA control functions (e.g. dw_gdma_channel_continue) into IRAM,
so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context.
config DW_GDMA_SETTER_FUNC_IN_IRAM
bool
default n
help
Place DW_GDMA setter functions (e.g. dw_gdma_channel_set_block_markers) into IRAM,
so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context.
config DW_GDMA_ISR_IRAM_SAFE
bool
default n
help
This will ensure the DW_GDMA interrupt handler is IRAM-Safe, allow to avoid flash
cache misses, and also be able to run whilst the cache is disabled.
(e.g. SPI Flash write).
config DW_GDMA_ENABLE_DEBUG_LOG
bool "Enable debug log"
default n
help
Wether to enable the debug log message for DW_GDMA driver.
Note that, this option only controls the DW_GDMA driver log, won't affect other drivers.
endmenu # DW_GDMA Configurations

Wyświetl plik

@ -0,0 +1,672 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <string.h>
#include <stdatomic.h>
#include <sys/cdefs.h>
#include <sys/lock.h>
#include "sdkconfig.h"
#if CONFIG_DW_GDMA_ENABLE_DEBUG_LOG
// The local log level must be defined before including esp_log.h
// Set the maximum log level for this source file
#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
#endif
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "soc/soc_caps.h"
#include "soc/interrupts.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_intr_alloc.h"
#include "esp_memory_utils.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/dw_gdma.h"
#include "hal/dw_gdma_hal.h"
#include "hal/dw_gdma_ll.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
static const char *TAG = "dw-gdma";
#if !SOC_RCC_IS_INDEPENDENT
// Reset and Clock Control registers are mixing with other peripherals, so we need to use a critical section
#define DW_GDMA_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
#else
#define DW_GDMA_RCC_ATOMIC()
#endif
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#define DW_GDMA_GET_NON_CACHE_ADDR(addr) ((addr) ? CACHE_LL_L2MEM_NON_CACHE_ADDR(addr) : 0)
#else
#define DW_GDMA_GET_NON_CACHE_ADDR(addr) (addr)
#endif
#if CONFIG_DW_GDMA_ISR_IRAM_SAFE || CONFIG_DW_GDMA_CTRL_FUNC_IN_IRAM || DW_GDMA_SETTER_FUNC_IN_IRAM
#define DW_GDMA_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
#else
#define DW_GDMA_MEM_ALLOC_CAPS MALLOC_CAP_DEFAULT
#endif
#if CONFIG_DW_GDMA_ISR_IRAM_SAFE
#define DW_GDMA_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_IRAM)
#else
#define DW_GDMA_INTR_ALLOC_FLAGS 0
#endif
#define DW_GDMA_ALLOW_INTR_PRIORITY_MASK ESP_INTR_FLAG_LOWMED
typedef struct dw_gdma_group_t dw_gdma_group_t;
typedef struct dw_gdma_channel_t dw_gdma_channel_t;
typedef struct dw_gdma_link_list_t {
uint32_t num_items; // number of items in the link list
dw_gdma_link_list_item_t *items; // pointer to the link list items
dw_gdma_link_list_item_t *items_nc; // pointer to the link list items, non-cached
} dw_gdma_link_list_t;
typedef struct {
_lock_t mutex; // platform level mutex lock
dw_gdma_group_t *groups[DW_GDMA_LL_GROUPS]; // array of DMA group instances
int group_ref_counts[DW_GDMA_LL_GROUPS]; // reference count used to protect group install/uninstall
} dw_gdma_platform_t;
struct dw_gdma_group_t {
int group_id; // Group ID, index from 0
dw_gdma_hal_context_t hal; // HAL instance is at group level
int intr_priority; // all channels in the same group should share the same interrupt priority
portMUX_TYPE spinlock; // group level spinlock, protect group level stuffs, e.g. hal object, pair handle slots and reference count of each pair
dw_gdma_channel_t *channels[DW_GDMA_LL_CHANNELS_PER_GROUP]; // handles of DMA channels
};
struct dw_gdma_channel_t {
int chan_id; // channel ID, index from 0
intr_handle_t intr; // per-channel interrupt handle
portMUX_TYPE spinlock; // channel level spinlock
dw_gdma_group_t *group; // pointer to the group which the channel belongs to
void *user_data; // user registered DMA event data
dw_gdma_event_callbacks_t cbs; // Event callbacks
dw_gdma_block_transfer_type_t src_transfer_type; // transfer type for source
dw_gdma_block_transfer_type_t dst_transfer_type; // transfer type for destination
};
// dw_gdma driver platform
static dw_gdma_platform_t s_platform;
static dw_gdma_group_t *dw_gdma_acquire_group_handle(int group_id)
{
bool new_group = false;
dw_gdma_group_t *group = NULL;
// prevent install dw_gdma group concurrently
_lock_acquire(&s_platform.mutex);
if (!s_platform.groups[group_id]) {
// The group is handle is not created yet
group = heap_caps_calloc(1, sizeof(dw_gdma_group_t), DW_GDMA_MEM_ALLOC_CAPS);
if (group) {
new_group = true;
s_platform.groups[group_id] = group;
// enable APB to access DMA registers
DW_GDMA_RCC_ATOMIC() {
dw_gdma_ll_enable_bus_clock(group_id, true);
dw_gdma_ll_reset_register(group_id);
}
// initialize the HAL context
dw_gdma_hal_config_t hal_config = {};
dw_gdma_hal_init(&group->hal, &hal_config);
}
} else {
// the group is installed, we just retrieve it and increase the reference count
group = s_platform.groups[group_id];
}
if (group) {
// someone acquired the group handle means we have a new object that refer to this group
s_platform.group_ref_counts[group_id]++;
}
_lock_release(&s_platform.mutex);
if (new_group) {
portMUX_INITIALIZE(&group->spinlock);
group->group_id = group_id;
group->intr_priority = -1; // interrupt priority not assigned yet
ESP_LOGD(TAG, "new group (%d) at %p", group_id, group);
}
return group;
}
static void dw_gdma_release_group_handle(dw_gdma_group_t *group)
{
int group_id = group->group_id;
bool del_group = false;
_lock_acquire(&s_platform.mutex);
s_platform.group_ref_counts[group_id]--;
if (s_platform.group_ref_counts[group_id] == 0) {
del_group = true;
// the group now is not used by any channel, unregister it from the platform
s_platform.groups[group_id] = NULL;
// deinitialize the HAL context
dw_gdma_hal_deinit(&group->hal);
DW_GDMA_RCC_ATOMIC() {
dw_gdma_ll_enable_bus_clock(group_id, false);
}
}
_lock_release(&s_platform.mutex);
if (del_group) {
free(group);
ESP_LOGD(TAG, "delete group (%d)", group_id);
}
}
static esp_err_t channel_register_to_group(dw_gdma_channel_t *chan)
{
dw_gdma_group_t *group = NULL;
int chan_id = -1;
for (int i = 0; i < DW_GDMA_LL_GROUPS; i++) {
group = dw_gdma_acquire_group_handle(i);
ESP_RETURN_ON_FALSE(group, ESP_ERR_NO_MEM, TAG, "no mem for group(%d)", i);
// loop to search free channel in the group
portENTER_CRITICAL(&group->spinlock);
for (int j = 0; j < DW_GDMA_LL_CHANNELS_PER_GROUP; j++) {
if (group->channels[j] == NULL) {
group->channels[j] = chan;
chan_id = j;
break;
}
}
portEXIT_CRITICAL(&group->spinlock);
if (chan_id < 0) {
dw_gdma_release_group_handle(group);
} else {
chan->group = group;
chan->chan_id = chan_id;
break;
}
}
ESP_RETURN_ON_FALSE(chan_id >= 0, ESP_ERR_NOT_FOUND, TAG, "no free channels");
return ESP_OK;
}
static void channel_unregister_from_group(dw_gdma_channel_t *chan)
{
dw_gdma_group_t *group = chan->group;
int chan_id = chan->chan_id;
portENTER_CRITICAL(&group->spinlock);
group->channels[chan_id] = NULL;
portEXIT_CRITICAL(&group->spinlock);
// channel has a reference on group, release it now
dw_gdma_release_group_handle(group);
}
static esp_err_t channel_destroy(dw_gdma_channel_t *chan)
{
if (chan->group) {
channel_unregister_from_group(chan);
}
free(chan);
return ESP_OK;
}
esp_err_t dw_gdma_new_channel(const dw_gdma_channel_alloc_config_t *config, dw_gdma_channel_handle_t *ret_chan)
{
#if CONFIG_DW_GDMA_ENABLE_DEBUG_LOG
esp_log_level_set(TAG, ESP_LOG_DEBUG);
#endif
esp_err_t ret = ESP_OK;
dw_gdma_channel_t *chan = NULL;
ESP_RETURN_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(config->src.num_outstanding_requests >= 1 && config->src.num_outstanding_requests <= DW_GDMA_LL_MAX_OUTSTANDING_REQUESTS,
ESP_ERR_INVALID_ARG, TAG, "invalid num_outstanding_requests");
ESP_RETURN_ON_FALSE(config->dst.num_outstanding_requests >= 1 && config->dst.num_outstanding_requests <= DW_GDMA_LL_MAX_OUTSTANDING_REQUESTS,
ESP_ERR_INVALID_ARG, TAG, "invalid num_outstanding_request");
ESP_RETURN_ON_FALSE(config->chan_priority >= 0 && config->chan_priority < DW_GDMA_LL_CHANNELS_PER_GROUP,
ESP_ERR_INVALID_ARG, TAG, "invalid channel priority");
if (config->intr_priority) {
ESP_RETURN_ON_FALSE(1 << (config->intr_priority) & DW_GDMA_ALLOW_INTR_PRIORITY_MASK, ESP_ERR_INVALID_ARG,
TAG, "invalid interrupt priority:%d", config->intr_priority);
}
chan = heap_caps_calloc(1, sizeof(dw_gdma_channel_t), DW_GDMA_MEM_ALLOC_CAPS);
ESP_RETURN_ON_FALSE(chan, ESP_ERR_NO_MEM, TAG, "no mem for channel");
// register channel to the group
ESP_GOTO_ON_ERROR(channel_register_to_group(chan), err, TAG, "register to group failed");
dw_gdma_group_t *group = chan->group;
dw_gdma_hal_context_t *hal = &group->hal;
int group_id = group->group_id;
int chan_id = chan->chan_id;
// all channels in the same group should use the same interrupt priority
bool intr_priority_conflict = false;
portENTER_CRITICAL(&group->spinlock);
if (group->intr_priority == -1) {
group->intr_priority = config->intr_priority;
} else if (config->intr_priority != 0) {
intr_priority_conflict = (group->intr_priority != config->intr_priority);
}
portEXIT_CRITICAL(&group->spinlock);
ESP_GOTO_ON_FALSE(!intr_priority_conflict, ESP_ERR_INVALID_STATE, err, TAG, "intr_priority conflict, already is %d but attempt to %d", group->intr_priority, config->intr_priority);
// basic initialization
portMUX_INITIALIZE(&chan->spinlock);
chan->src_transfer_type = config->src.block_transfer_type;
chan->dst_transfer_type = config->dst.block_transfer_type;
// set transfer flow type
dw_gdma_ll_channel_set_trans_flow(hal->dev, chan_id, config->src.role, config->dst.role, config->flow_controller);
// set the transfer type for source and destination
dw_gdma_ll_channel_set_src_multi_block_type(hal->dev, chan_id, config->src.block_transfer_type);
dw_gdma_ll_channel_set_dst_multi_block_type(hal->dev, chan_id, config->dst.block_transfer_type);
// set handshake interface
dw_gdma_ll_channel_set_src_handshake_interface(hal->dev, chan_id, config->src.handshake_type);
dw_gdma_ll_channel_set_dst_handshake_interface(hal->dev, chan_id, config->dst.handshake_type);
// set handshake peripheral
if (config->src.role != DW_GDMA_ROLE_MEM) {
dw_gdma_ll_channel_set_src_handshake_periph(hal->dev, chan_id, config->src.role);
}
if (config->dst.role != DW_GDMA_ROLE_MEM) {
dw_gdma_ll_channel_set_dst_handshake_periph(hal->dev, chan_id, config->dst.role);
}
// set channel priority
dw_gdma_ll_channel_set_priority(hal->dev, chan_id, config->chan_priority);
// set the outstanding request number
dw_gdma_ll_channel_set_src_outstanding_limit(hal->dev, chan_id, config->src.num_outstanding_requests);
dw_gdma_ll_channel_set_dst_outstanding_limit(hal->dev, chan_id, config->dst.num_outstanding_requests);
// set the status fetch address
dw_gdma_ll_channel_set_src_periph_status_addr(hal->dev, chan_id, config->src.status_fetch_addr);
dw_gdma_ll_channel_set_dst_periph_status_addr(hal->dev, chan_id, config->dst.status_fetch_addr);
// enable all channel events (notes, they can't trigger an interrupt until `dw_gdma_ll_channel_enable_intr_propagation` is called)
dw_gdma_ll_channel_enable_intr_generation(hal->dev, chan_id, UINT32_MAX, true);
ESP_LOGD(TAG, "new channel (%d,%d) at %p", group_id, chan_id, chan);
*ret_chan = chan;
return ESP_OK;
err:
if (chan) {
channel_destroy(chan);
}
return ret;
}
esp_err_t dw_gdma_del_channel(dw_gdma_channel_handle_t chan)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_group_t *group = chan->group;
int group_id = group->group_id;
int chan_id = chan->chan_id;
ESP_LOGD(TAG, "del channel (%d,%d)", group_id, chan_id);
// recycle memory resource
ESP_RETURN_ON_ERROR(channel_destroy(chan), TAG, "destroy channel failed");
return ESP_OK;
}
esp_err_t dw_gdma_channel_enable_ctrl(dw_gdma_channel_handle_t chan, bool en_or_dis)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// the atomic is ensured by the hardware, so no lock is needed here
dw_gdma_ll_channel_enable(hal->dev, chan_id, en_or_dis);
return ESP_OK;
}
esp_err_t dw_gdma_channel_suspend_ctrl(dw_gdma_channel_handle_t chan, bool enter_or_exit)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// the atomic is ensured by the hardware, so no lock is needed here
dw_gdma_ll_channel_suspend(hal->dev, chan_id, enter_or_exit);
return ESP_OK;
}
esp_err_t dw_gdma_channel_abort(dw_gdma_channel_handle_t chan)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// the atomic is ensured by the hardware, so no lock is needed here
dw_gdma_ll_channel_abort(hal->dev, chan_id);
return ESP_OK;
}
esp_err_t dw_gdma_channel_lock(dw_gdma_channel_handle_t chan, dw_gdma_lock_level_t level)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// the lock control bit is located in a cfg register, with other configuration bits
portENTER_CRITICAL(&chan->spinlock);
dw_gdma_ll_channel_lock(hal->dev, chan_id, level);
portEXIT_CRITICAL(&chan->spinlock);
return ESP_OK;
}
esp_err_t dw_gdma_channel_unlock(dw_gdma_channel_handle_t chan)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// the lock control bit is located in a cfg register, with other configuration bits
portENTER_CRITICAL(&chan->spinlock);
dw_gdma_ll_channel_unlock(hal->dev, chan_id);
portEXIT_CRITICAL(&chan->spinlock);
return ESP_OK;
}
esp_err_t dw_gdma_channel_continue(dw_gdma_channel_handle_t chan)
{
ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// the atomic is ensured by the hardware, so no lock is needed here
dw_gdma_ll_channel_resume_multi_block_transfer(hal->dev, chan_id);
return ESP_OK;
}
esp_err_t dw_gdma_new_link_list(const dw_gdma_link_list_config_t *config, dw_gdma_link_list_handle_t *ret_list)
{
esp_err_t ret = ESP_OK;
ESP_RETURN_ON_FALSE(ret_list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_link_list_item_t *items = NULL;
dw_gdma_link_list_t *list = NULL;
uint32_t num_items = config->num_items;
list = heap_caps_calloc(1, sizeof(dw_gdma_link_list_t), DW_GDMA_MEM_ALLOC_CAPS);
ESP_GOTO_ON_FALSE(list, ESP_ERR_NO_MEM, err, TAG, "no mem for link list");
// the link list item has a strict alignment requirement, so we allocate it separately
items = heap_caps_aligned_calloc(DW_GDMA_LL_LINK_LIST_ALIGNMENT, num_items,
sizeof(dw_gdma_link_list_item_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
ESP_RETURN_ON_FALSE(items, ESP_ERR_NO_MEM, TAG, "no mem for link list items");
list->num_items = num_items;
list->items = items;
list->items_nc = (dw_gdma_link_list_item_t *)DW_GDMA_GET_NON_CACHE_ADDR(items);
// set up the link list
for (size_t i = 0; i < num_items; i++) {
dw_gdma_ll_lli_set_next_item_addr(list->items_nc + i, (uint32_t)(list->items + i + 1));
// set master port for the link list
dw_gdma_ll_lli_set_link_list_master_port(list->items_nc + i, DW_GDMA_LL_MASTER_PORT_MEMORY);
}
switch (config->link_type) {
case DW_GDMA_LINKED_LIST_TYPE_CIRCULAR:
dw_gdma_ll_lli_set_next_item_addr(list->items_nc + num_items - 1, (uint32_t)(list->items));
break;
case DW_GDMA_LINKED_LIST_TYPE_SINGLY:
dw_gdma_ll_lli_set_next_item_addr(list->items_nc + num_items - 1, 0);
break;
}
ESP_LOGD(TAG, "new link list @%p, items @%p", list, items);
*ret_list = list;
return ESP_OK;
err:
if (list) {
free(list);
}
if (items) {
free(items);
}
return ret;
}
esp_err_t dw_gdma_del_link_list(dw_gdma_link_list_handle_t list)
{
ESP_RETURN_ON_FALSE(list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_LOGD(TAG, "del link list at %p", list);
free(list->items);
free(list);
return ESP_OK;
}
esp_err_t dw_gdma_channel_use_link_list(dw_gdma_channel_handle_t chan, dw_gdma_link_list_handle_t list)
{
ESP_RETURN_ON_FALSE(chan && list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(chan->src_transfer_type == DW_GDMA_BLOCK_TRANSFER_LIST ||
chan->dst_transfer_type == DW_GDMA_BLOCK_TRANSFER_LIST,
ESP_ERR_INVALID_STATE, TAG, "invalid transfer type");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// set master port for the link list
dw_gdma_ll_channel_set_link_list_master_port(hal->dev, chan_id, DW_GDMA_LL_MASTER_PORT_MEMORY);
// set the link list head address
dw_gdma_ll_channel_set_link_list_head_addr(hal->dev, chan_id, (uint32_t)(list->items));
return ESP_OK;
}
dw_gdma_lli_handle_t dw_gdma_link_list_get_item(dw_gdma_link_list_handle_t list, int item_index)
{
ESP_RETURN_ON_FALSE_ISR(list, NULL, TAG, "invalid argument");
ESP_RETURN_ON_FALSE_ISR(item_index < list->num_items, NULL, TAG, "invalid item index");
dw_gdma_link_list_item_t *lli = list->items_nc + item_index;
return lli;
}
esp_err_t dw_gdma_channel_config_transfer(dw_gdma_channel_handle_t chan, const dw_gdma_block_transfer_config_t *config)
{
ESP_RETURN_ON_FALSE(chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(chan->src_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST &&
chan->dst_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST,
ESP_ERR_INVALID_STATE, TAG, "invalid transfer type");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// set memory address
dw_gdma_ll_channel_set_src_addr(hal->dev, chan_id, config->src.addr);
dw_gdma_ll_channel_set_dst_addr(hal->dev, chan_id, config->dst.addr);
// transfer size
dw_gdma_ll_channel_set_trans_block_size(hal->dev, chan_id, config->size);
// [Ctrl0] register
// set master port for the source and destination target
dw_gdma_ll_channel_set_src_master_port(hal->dev, chan_id, config->src.addr);
dw_gdma_ll_channel_set_dst_master_port(hal->dev, chan_id, config->src.addr);
// transfer width
dw_gdma_ll_channel_set_src_trans_width(hal->dev, chan_id, config->src.width);
dw_gdma_ll_channel_set_dst_trans_width(hal->dev, chan_id, config->dst.width);
// set burst items
dw_gdma_ll_channel_set_src_burst_items(hal->dev, chan_id, config->src.burst_items);
dw_gdma_ll_channel_set_dst_burst_items(hal->dev, chan_id, config->dst.burst_items);
// set burst mode
dw_gdma_ll_channel_set_src_burst_mode(hal->dev, chan_id, config->src.burst_mode);
dw_gdma_ll_channel_set_dst_burst_mode(hal->dev, chan_id, config->dst.burst_mode);
// [Ctrl1] register
// set burst length
dw_gdma_ll_channel_set_src_burst_len(hal->dev, chan_id, config->src.burst_len);
dw_gdma_ll_channel_set_dst_burst_len(hal->dev, chan_id, config->dst.burst_len);
// whether to enable the peripheral status write back
dw_gdma_ll_channel_enable_src_periph_status_write_back(hal->dev, chan_id, config->src.flags.en_status_write_back);
dw_gdma_ll_channel_enable_dst_periph_status_write_back(hal->dev, chan_id, config->dst.flags.en_status_write_back);
return ESP_OK;
}
esp_err_t dw_gdma_channel_set_block_markers(dw_gdma_channel_handle_t chan, dw_gdma_block_markers_t markers)
{
ESP_RETURN_ON_FALSE_ISR(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE_ISR(chan->src_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST &&
chan->dst_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST,
ESP_ERR_INVALID_STATE, TAG, "invalid transfer type");
dw_gdma_hal_context_t *hal = &chan->group->hal;
int chan_id = chan->chan_id;
// [Ctrl1] register
// set the block markers
dw_gdma_ll_channel_set_block_markers(hal->dev, chan_id, markers.en_trans_done_intr, markers.is_last, markers.is_valid);
return ESP_OK;
}
esp_err_t dw_gdma_lli_config_transfer(dw_gdma_lli_handle_t lli, dw_gdma_block_transfer_config_t *config)
{
ESP_RETURN_ON_FALSE(lli && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
// set memory address
dw_gdma_ll_lli_set_src_addr(lli, config->src.addr);
dw_gdma_ll_lli_set_dst_addr(lli, config->dst.addr);
// transfer size
dw_gdma_ll_lli_set_trans_block_size(lli, config->size);
// [Ctrl0] register
// set master port for the source and destination target
dw_gdma_ll_lli_set_src_master_port(lli, config->src.addr);
dw_gdma_ll_lli_set_dst_master_port(lli, config->dst.addr);
// transfer width
dw_gdma_ll_lli_set_src_trans_width(lli, config->src.width);
dw_gdma_ll_lli_set_dst_trans_width(lli, config->dst.width);
// set burst items
dw_gdma_ll_lli_set_src_burst_items(lli, config->src.burst_items);
dw_gdma_ll_lli_set_dst_burst_items(lli, config->dst.burst_items);
// set burst mode
dw_gdma_ll_lli_set_src_burst_mode(lli, config->src.burst_mode);
dw_gdma_ll_lli_set_dst_burst_mode(lli, config->dst.burst_mode);
// [Ctrl1] register
// set burst length
dw_gdma_ll_lli_set_src_burst_len(lli, config->src.burst_len);
dw_gdma_ll_lli_set_dst_burst_len(lli, config->dst.burst_len);
// whether to enable the peripheral status write back
dw_gdma_ll_lli_enable_src_periph_status_write_back(lli, config->src.flags.en_status_write_back);
dw_gdma_ll_lli_enable_dst_periph_status_write_back(lli, config->dst.flags.en_status_write_back);
return ESP_OK;
}
esp_err_t dw_gdma_lli_set_block_markers(dw_gdma_lli_handle_t lli, dw_gdma_block_markers_t markers)
{
ESP_RETURN_ON_FALSE_ISR(lli, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
// [Ctrl1] register
// set the block markers
dw_gdma_ll_lli_set_block_markers(lli, markers.en_trans_done_intr, markers.is_last, markers.is_valid);
return ESP_OK;
}
void dw_gdma_channel_default_isr(void *args)
{
dw_gdma_channel_t *chan = (dw_gdma_channel_t *)args;
dw_gdma_group_t *group = chan->group;
dw_gdma_hal_context_t *hal = &group->hal;
int chan_id = chan->chan_id;
bool need_yield = false;
// clear pending interrupt event
uint32_t intr_status = dw_gdma_ll_channel_get_intr_status(hal->dev, chan_id);
dw_gdma_ll_channel_clear_intr(hal->dev, chan_id, intr_status);
// call user callbacks
if (intr_status & DW_GDMA_LL_CHANNEL_EVENT_SHADOWREG_OR_LLI_INVALID_ERR) {
if (chan->cbs.on_invalid_block) {
intptr_t invalid_lli_addr = dw_gdma_ll_channel_get_current_link_list_item_addr(hal->dev, chan_id);
dw_gdma_break_event_data_t edata = {
.invalid_lli = (dw_gdma_lli_handle_t)DW_GDMA_GET_NON_CACHE_ADDR(invalid_lli_addr),
};
if (chan->cbs.on_invalid_block(chan, &edata, chan->user_data)) {
need_yield = true;
}
}
}
if (intr_status & DW_GDMA_LL_CHANNEL_EVENT_BLOCK_TFR_DONE) {
if (chan->cbs.on_block_trans_done) {
dw_gdma_trans_done_event_data_t edata = {};
if (chan->cbs.on_block_trans_done(chan, &edata, chan->user_data)) {
need_yield = true;
}
}
}
if (intr_status & DW_GDMA_LL_CHANNEL_EVENT_DMA_TFR_DONE) {
if (chan->cbs.on_full_trans_done) {
dw_gdma_trans_done_event_data_t edata = {};
if (chan->cbs.on_full_trans_done(chan, &edata, chan->user_data)) {
need_yield = true;
}
}
}
if (need_yield) {
portYIELD_FROM_ISR();
}
}
static esp_err_t dw_gdma_install_channel_interrupt(dw_gdma_channel_t *chan)
{
esp_err_t ret = ESP_OK;
dw_gdma_group_t *group = chan->group;
dw_gdma_hal_context_t *hal = &group->hal;
int chan_id = chan->chan_id;
// clear pending events
dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, UINT32_MAX, false);
dw_gdma_ll_channel_clear_intr(hal->dev, chan_id, UINT32_MAX);
// pre-alloc a interrupt handle, with handler disabled
// DW_GDMA multiple channels share the same interrupt source, so we use a shared interrupt handle
intr_handle_t intr = NULL;
int isr_flags = DW_GDMA_INTR_ALLOC_FLAGS | ESP_INTR_FLAG_SHARED;
if (group->intr_priority) {
isr_flags |= 1 << (group->intr_priority);
} else {
isr_flags |= DW_GDMA_ALLOW_INTR_PRIORITY_MASK;
}
ret = esp_intr_alloc_intrstatus(ETS_DW_GDMA_INTR_SOURCE, isr_flags,
(uint32_t)dw_gdma_ll_get_intr_status_reg(hal->dev), DW_GDMA_LL_CHANNEL_EVENT_MASK(chan_id),
dw_gdma_channel_default_isr, chan, &intr);
ESP_RETURN_ON_ERROR(ret, TAG, "alloc interrupt failed");
ESP_LOGD(TAG, "install interrupt service for channel (%d,%d)", group->group_id, chan_id);
chan->intr = intr;
return ESP_OK;
}
esp_err_t dw_gdma_channel_register_event_callbacks(dw_gdma_channel_handle_t chan, dw_gdma_event_callbacks_t *cbs, void *user_data)
{
ESP_RETURN_ON_FALSE(chan && cbs, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dw_gdma_group_t *group = chan->group;
dw_gdma_hal_context_t *hal = &group->hal;
int chan_id = chan->chan_id;
#if CONFIG_DW_GDMA_ISR_IRAM_SAFE
if (cbs->on_block_trans_done) {
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_block_trans_done), ESP_ERR_INVALID_ARG,
TAG, "on_block_trans_done not in IRAM");
}
if (cbs->on_full_trans_done) {
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_full_trans_done), ESP_ERR_INVALID_ARG,
TAG, "on_full_trans_done not in IRAM");
}
if (cbs->on_invalid_block) {
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_invalid_block), ESP_ERR_INVALID_ARG,
TAG, "on_invalid_block not in IRAM");
}
if (user_data) {
ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
TAG, "user context not in internal RAM");
}
#endif // CONFIG_DW_GDMA_ISR_IRAM_SAFE
// lazy install interrupt service
if (!chan->intr) {
ESP_RETURN_ON_ERROR(dw_gdma_install_channel_interrupt(chan), TAG, "install interrupt service failed");
}
// enable the event to be able to trigger an interrupt
dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, DW_GDMA_LL_CHANNEL_EVENT_BLOCK_TFR_DONE, cbs->on_block_trans_done != NULL);
dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, DW_GDMA_LL_CHANNEL_EVENT_DMA_TFR_DONE, cbs->on_full_trans_done != NULL);
dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, DW_GDMA_LL_CHANNEL_EVENT_SHADOWREG_OR_LLI_INVALID_ERR, cbs->on_invalid_block != NULL);
chan->user_data = user_data;
memcpy(&chan->cbs, cbs, sizeof(dw_gdma_event_callbacks_t));
return ESP_OK;
}
esp_err_t dw_gdma_channel_get_id(dw_gdma_channel_handle_t chan, int *channel_id)
{
ESP_RETURN_ON_FALSE(chan && channel_id, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
*channel_id = chan->chan_id;
return ESP_OK;
}

Wyświetl plik

@ -67,3 +67,18 @@ entries:
gdma_hal_axi: gdma_axi_hal_stop (noflash)
gdma_hal_axi: gdma_axi_hal_append (noflash)
gdma_hal_axi: gdma_axi_hal_reset (noflash)
[mapping:dw_gdma_driver]
archive: libesp_hw_support.a
entries:
# performance optimization, always put the DW_GDMA default interrupt handler in IRAM
if SOC_DW_GDMA_SUPPORTED = y:
dw_gdma: dw_gdma_channel_default_isr (noflash)
# put DW_GDMA control functions in IRAM
if DW_GDMA_CTRL_FUNC_IN_IRAM = y:
dw_gdma: dw_gdma_channel_continue (noflash)
if DW_GDMA_SETTER_FUNC_IN_IRAM = y:
dw_gdma: dw_gdma_channel_set_block_markers (noflash)
dw_gdma: dw_gdma_lli_set_block_markers (noflash)

Wyświetl plik

@ -0,0 +1,405 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdbool.h>
#include "esp_err.h"
#include "hal/dw_gdma_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Type of DW_GDMA channel handle
*/
typedef struct dw_gdma_channel_t *dw_gdma_channel_handle_t;
/**
* @brief Type of DW_GDMA link list handle
*/
typedef struct dw_gdma_link_list_t *dw_gdma_link_list_handle_t;
/**
* @brief Type of DW_GDMA link list item handle
*/
typedef struct dw_gdma_link_list_item_t *dw_gdma_lli_handle_t;
/**
* @brief A group of channel's static configurations
*
* @note By static, we mean these channel end configurations shouldn't be changed after the DMA channel is created.
*/
typedef struct {
dw_gdma_block_transfer_type_t block_transfer_type; /*!< Block transfer type */
dw_gdma_role_t role; /*!< Role of the DMA channel end */
dw_gdma_handshake_type_t handshake_type; /*!< Handshake type */
uint8_t num_outstanding_requests; /*!< Number of R/W requests that the AXI master can issue to the slave before receiving a response.
Suggest value range: [1,16] */
uint32_t status_fetch_addr; /*!< Address where to fetch the status of the peripheral */
} dw_gdma_channel_static_config_t;
/**
* @brief Configurations for allocating a DMA channel
*/
typedef struct {
dw_gdma_channel_static_config_t src; /*!< source end static configuration */
dw_gdma_channel_static_config_t dst; /*!< destination end static configuration */
dw_gdma_flow_controller_t flow_controller; /*!< Transfer flow controller */
int chan_priority; /*!< DMA channel priority */
int intr_priority; /*!< DMA interrupt priority,
if set to 0, the driver will try to allocate an interrupt with a relative low priority (1,2,3) */
} dw_gdma_channel_alloc_config_t;
/**
* @brief Create a DMA channel
*
* @param[in] config Channel allocation configuration
* @param[out] ret_chan Returned channel handle
* @return
* - ESP_OK: Create DMA channel successfully
* - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
* - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
* - ESP_FAIL: Create DMA channel failed because of other error
*/
esp_err_t dw_gdma_new_channel(const dw_gdma_channel_alloc_config_t *config, dw_gdma_channel_handle_t *ret_chan);
/**
* @brief Delete DMA channel
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @return
* - ESP_OK: Delete DMA channel successfully
* - ESP_ERR_INVALID_ARG: Delete DMA channel failed because of invalid argument
* - ESP_FAIL: Delete DMA channel failed because of other error
*/
esp_err_t dw_gdma_del_channel(dw_gdma_channel_handle_t chan);
/**
* @brief Get the DMA channel ID
*
* @note This API breaks the encapsulation of DW_GDMA Channel Object.
* With the returned channel ID, you can even bypass all other driver API and access Low Level API directly.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[out] channel_id Returned channel ID
* @return
* - ESP_OK: Get DW_GDMA channel ID successfully
* - ESP_ERR_INVALID_ARG: Get DW_GDMA channel ID failed because of invalid argument
* - ESP_FAIL: Get DW_GDMA channel ID failed because of other error
*/
esp_err_t dw_gdma_channel_get_id(dw_gdma_channel_handle_t chan, int *channel_id);
/**
* @brief A group of channel's dynamic configurations
*
* @note By dynamic, we mean these channel end configurations can be changed in each transfer.
*/
typedef struct {
uint32_t addr; /*!< Memory address */
dw_gdma_transfer_width_t width; /*!< Transfer width */
dw_gdma_burst_mode_t burst_mode; /*!< Burst mode */
dw_gdma_burst_items_t burst_items; /*!< Number of data items that are contained in one burst transaction */
uint8_t burst_len; /*!< Burst transaction length, if set to 0, the hardware will apply a possible value as burst length */
struct {
uint32_t en_status_write_back: 1; /*!< Enable peripheral status write back */
} flags;
} dw_gdma_channel_dynamic_config_t;
/**
* @brief Channel block transfer configurations
*/
typedef struct {
dw_gdma_channel_dynamic_config_t src; /*!< source configuration */
dw_gdma_channel_dynamic_config_t dst; /*!< destination configuration */
size_t size; /*!< Transfer size */
} dw_gdma_block_transfer_config_t;
/**
* @brief Configure transfer parameters for a DMA channel
*
* @note This is an "all-in-one" function for set up the block transfer.
* @note This function can't work with Link-List transfer type. For Link-List transfer, please use `dw_gdma_lli_config_transfer` instead.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] config Block transfer configurations
* @return
* - ESP_OK: Configure DMA channel block transfer successfully
* - ESP_ERR_INVALID_ARG: Configure DMA channel block transfer failed because of invalid argument
* - ESP_ERR_INVALID_STATE: Configure DMA channel block transfer failed because the channel has Link-List transfer type
* - ESP_FAIL: Configure DMA channel block transfer failed because of other error
*/
esp_err_t dw_gdma_channel_config_transfer(dw_gdma_channel_handle_t chan, const dw_gdma_block_transfer_config_t *config);
/**
* @brief Enable or disable a DMA channel
*
* @note Before enabling a channel, you need to setup the channel transfer by either `dw_gdma_channel_config_transfer` or `dw_gdma_lli_config_transfer`
* @note When a DMA channel is disabled, the DMA engine will stop working. You need to reconfigure the channel before enabling it again.
* @note After all block transfers are completed, the DMA channel will be disabled automatically.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] en_or_dis True to enable, false to disable the DMA channel
* @return
* - ESP_OK: Enable or disable DMA channel successfully
* - ESP_ERR_INVALID_ARG: Enable or disable DMA channel failed because of invalid argument
* - ESP_FAIL: Enable or disable DMA channel failed because of other error
*/
esp_err_t dw_gdma_channel_enable_ctrl(dw_gdma_channel_handle_t chan, bool en_or_dis);
/**
* @brief Suspend or resume a DMA channel
*
* @note When a DMA channel is suspended, the DMA engine will stop working gracefully and the channel's status will be saved.
* @note The channel will exit the suspend state automatically if it is disabled.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] enter_or_exit True to suspend, false to resume the DMA channel
* @return
* - ESP_OK: Suspend or resume DMA channel successfully
* - ESP_ERR_INVALID_ARG: Suspend or resume DMA channel failed because of invalid argument
* - ESP_FAIL: Suspend or resume DMA channel failed because of other error
*/
esp_err_t dw_gdma_channel_suspend_ctrl(dw_gdma_channel_handle_t chan, bool enter_or_exit);
/**
* @brief Abort the DMA channel
*
* @note If the channel is aborted, it will be diabled immediately, which may cause AXI bus protocol violation.
* @note This function is recommended to only be used when the channel hangs. Recommend to try `dw_gdma_channel_enable_ctrl` first, then opt for aborting.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @return
* - ESP_OK: Abort DMA channel successfully
* - ESP_ERR_INVALID_ARG: Abort DMA channel failed because of invalid argument
* - ESP_FAIL: Abort DMA channel failed because of other error
*/
esp_err_t dw_gdma_channel_abort(dw_gdma_channel_handle_t chan);
/**
* @brief Lock the DMA channel at specific transfer level
*
* @note When a DMA channel is locked, no other channels are granted control of the master bus for the duration specified by the lock level.
* @note Only lock the channel if you want to exclusive access to the master bus.
* @note Channel locking feature is only for M2M transfer.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] level Transfer level
* @return
* - ESP_OK: Lock DMA channel successfully
* - ESP_ERR_INVALID_ARG: Lock DMA channel failed because of invalid argument
* - ESP_FAIL: Lock DMA channel failed because of other error
*/
esp_err_t dw_gdma_channel_lock(dw_gdma_channel_handle_t chan, dw_gdma_lock_level_t level);
/**
* @brief Unlock the DMA channel
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @return
* - ESP_OK: Unlock DMA channel successfully
* - ESP_ERR_INVALID_ARG: Unlock DMA channel failed because of invalid argument
* - ESP_FAIL: Unlock DMA channel failed because of other error
*/
esp_err_t dw_gdma_channel_unlock(dw_gdma_channel_handle_t chan);
/**
* @brief Continue the temporarily stopped DMA transfer because of invalid block
*
* @note You should only call this API when the block becomes valid again,
* by calling `dw_gdma_lli_set_block_markers`/`dw_gdma_channel_set_block_markers` with `is_valid` set to true.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @return
* - ESP_OK: Continue DMA transfer successfully
* - ESP_ERR_INVALID_ARG: Continue DMA transfer failed because of invalid argument
* - ESP_FAIL: Continue DMA transfer failed because of other error
*/
esp_err_t dw_gdma_channel_continue(dw_gdma_channel_handle_t chan);
/**
* @brief Type of DW_GDMA trans done event data
*/
typedef struct {
} dw_gdma_trans_done_event_data_t;
/**
* @brief Type of DW_GDMA trans_done event callback
* @param chan GDMA channel handle, created from `dw_gdma_new_channel`
* @param event_data GDMA event data
* @param user_data User registered data from `dw_gdma_channel_register_event_callbacks`
*
* @return Whether a task switch is needed after the callback function returns,
* this is usually due to the callback wakes up some high priority task.
*/
typedef bool (*dw_gdma_trans_done_event_callback_t)(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data);
/**
* @brief Type of DW_GDMA break event data
*/
typedef struct {
dw_gdma_lli_handle_t invalid_lli; /*!< Invalid link list item */
} dw_gdma_break_event_data_t;
/**
* @brief Type of DW_GDMA break event callback
* @param chan GDMA channel handle, created from `dw_gdma_new_channel`
* @param event_data GDMA event data
* @param user_data User registered data from `dw_gdma_channel_register_event_callbacks`
*
* @return Whether a task switch is needed after the callback function returns,
* this is usually due to the callback wakes up some high priority task.
*/
typedef bool (*dw_gdma_break_event_callback_t)(dw_gdma_channel_handle_t chan, const dw_gdma_break_event_data_t *event_data, void *user_data);
/**
* @brief Group of supported DW_GDMA callbacks
* @note The callbacks are all running under ISR environment
*/
typedef struct {
dw_gdma_trans_done_event_callback_t on_block_trans_done; /*!< Invoked when a block transfer is completed */
dw_gdma_trans_done_event_callback_t on_full_trans_done; /*!< Invoked when all block transfers are completed */
dw_gdma_break_event_callback_t on_invalid_block; /*!< Invoked when an invalid block is detected */
} dw_gdma_event_callbacks_t;
/**
* @brief Set DW_GDMA event callbacks for a channel
* @note This API will lazy install the DW_GDMA interrupt service
*
* @param[in] chan DW_GDMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] cbs Group of callback functions
* @param[in] user_data User data, which will be passed to callback functions directly
* @return
* - ESP_OK: Set event callbacks successfully
* - ESP_ERR_INVALID_ARG: Set event callbacks failed because of invalid argument
* - ESP_FAIL: Set event callbacks failed because of other error
*/
esp_err_t dw_gdma_channel_register_event_callbacks(dw_gdma_channel_handle_t chan, dw_gdma_event_callbacks_t *cbs, void *user_data);
/**
* @brief DMA link list type
*/
typedef enum {
DW_GDMA_LINKED_LIST_TYPE_SINGLY, /*!< Singly linked list */
DW_GDMA_LINKED_LIST_TYPE_CIRCULAR, /*!< Circular linked list */
} dw_gdma_link_list_type_t;
/**
* @brief DMA link list configurations
*/
typedef struct {
uint32_t num_items; //!< Number of link list items
dw_gdma_link_list_type_t link_type; //!< Link list type
} dw_gdma_link_list_config_t;
/**
* @brief Create a DMA link list
*
* @param[in] config Link list configurations
* @param[out] ret_list Returned link list handle
* @return
* - ESP_OK: Create DMA link list successfully
* - ESP_ERR_INVALID_ARG: Create DMA link list failed because of invalid argument
* - ESP_ERR_NO_MEM: Create DMA link list failed because out of memory
* - ESP_FAIL: Create DMA link list failed because of other error
*/
esp_err_t dw_gdma_new_link_list(const dw_gdma_link_list_config_t *config, dw_gdma_link_list_handle_t *ret_list);
/**
* @brief Delete a DMA link list
*
* @param[in] list Link list handle, allocated by `dw_gdma_new_link_list`
* @return
* - ESP_OK: Delete DMA link list successfully
* - ESP_ERR_INVALID_ARG: Delete DMA link list failed because of invalid argument
* - ESP_FAIL: Delete DMA link list failed because of other error
*/
esp_err_t dw_gdma_del_link_list(dw_gdma_link_list_handle_t list);
/**
* @brief Apply a link list to a DMA channel
*
* @note This function can only work with Link-List transfer type.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] list Link list handle, allocated by `dw_gdma_new_link_list`
* @return
* - ESP_OK: Apply link list to DMA channel successfully
* - ESP_ERR_INVALID_ARG: Apply link list to DMA channel failed because of invalid argument
* - ESP_ERR_INVALID_STATE: Apply link list to DMA channel failed because the channel is not with Link-List transfer type
* - ESP_FAIL: Apply link list to DMA channel failed because of other error
*/
esp_err_t dw_gdma_channel_use_link_list(dw_gdma_channel_handle_t chan, dw_gdma_link_list_handle_t list);
/**
* @brief A helper function to return an item from a given link list, by index
*
* @param[in] list Link list handle, allocated by `dw_gdma_new_link_list`
* @param[in] item_index Index of the item
* @return
* - NULL: Invalid argument
* - Others: Link list item handle
*/
dw_gdma_lli_handle_t dw_gdma_link_list_get_item(dw_gdma_link_list_handle_t list, int item_index);
/**
* @brief Configure transfer parameters for a DMA link list item
*
* @note This is an "all-in-one" function for set up the link list item.
* @note This function can only work with Link-List transfer type. For other transfer types, please use `dw_gdma_channel_config_transfer` instead.
*
* @param[in] lli Link list item
* @param[in] config Block transfer configurations
* @return
* - ESP_OK: Configure link list item block transfer successfully
* - ESP_ERR_INVALID_ARG: Configure link list item block transfer failed because of invalid argument
* - ESP_FAIL: Configure link list item block transfer failed because of other error
*/
esp_err_t dw_gdma_lli_config_transfer(dw_gdma_lli_handle_t lli, dw_gdma_block_transfer_config_t *config);
/**
* @brief Markers of a DW_GDMA block
*/
typedef struct {
uint32_t is_last: 1; /*!< Set if this block is the last one */
uint32_t is_valid: 1; /*!< Set if this block is valid */
uint32_t en_trans_done_intr: 1; /*!< Set if to enable the transfer done interrupt for this block */
} dw_gdma_block_markers_t;
/**
* @brief Set block markers for a DMA channel
*
* @note This function doesn't work for Link-List transfer type. For Link-List transfer, please use `dw_gdma_lli_set_block_markers` instead.
* @note Setting the markers should always be the last step of configuring a block transfer, before enabling/continuing the channel.
*
* @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel`
* @param[in] markers Block markers
* @return
* - ESP_OK: Set block markers successfully
* - ESP_ERR_INVALID_ARG: Set block markers failed because of invalid argument
* - ESP_ERR_INVALID_STATE: Set block markers failed because the channel has Link-List transfer type
* - ESP_FAIL: Set block markers failed because of other error
*/
esp_err_t dw_gdma_channel_set_block_markers(dw_gdma_channel_handle_t chan, dw_gdma_block_markers_t markers);
/**
* @brief Set block markers for a DMA link list item
*
* @note Setting the markers should always be the last step of configuring a block transfer, before enabling/continuing the channel.
*
* @param[in] lli Link list item
* @param[in] markers Block markers
* @return
* - ESP_OK: Set block markers successfully
* - ESP_ERR_INVALID_ARG: Set block markers failed because of invalid argument
* - ESP_FAIL: Set block markers failed because of other error
*/
esp_err_t dw_gdma_lli_set_block_markers(dw_gdma_lli_handle_t lli, dw_gdma_block_markers_t markers);
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -8,6 +8,10 @@ if(CONFIG_SOC_GDMA_SUPPORTED)
list(APPEND srcs "test_gdma.c")
endif()
if(CONFIG_SOC_DW_GDMA_SUPPORTED)
list(APPEND srcs "test_dw_gdma.c")
endif()
# In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRCS ${srcs}

Wyświetl plik

@ -0,0 +1,517 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <inttypes.h>
#include "sdkconfig.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#include "esp_private/dw_gdma.h"
#include "hal/dw_gdma_ll.h"
#include "esp_cache.h"
TEST_CASE("DW_GDMA channel allocation", "[DW_GDMA]")
{
printf("install DMA channels exhaustively\r\n");
dw_gdma_channel_static_config_t static_config = {
.block_transfer_type = DW_GDMA_BLOCK_TRANSFER_CONTIGUOUS,
.role = DW_GDMA_ROLE_MEM,
.num_outstanding_requests = 1,
};
dw_gdma_channel_alloc_config_t alloc_config = {
.src = static_config,
.dst = static_config,
};
dw_gdma_channel_handle_t chans[DW_GDMA_LL_GROUPS][DW_GDMA_LL_CHANNELS_PER_GROUP];
for (int i = 0; i < DW_GDMA_LL_GROUPS; i++) {
for (int j = 0; j < DW_GDMA_LL_CHANNELS_PER_GROUP; j++) {
TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &chans[i][j]));
}
}
TEST_ESP_ERR(ESP_ERR_NOT_FOUND, dw_gdma_new_channel(&alloc_config, &chans[0][0]));
printf("delete DMA channels\r\n");
for (int i = 0; i < DW_GDMA_LL_GROUPS; i++) {
for (int j = 0; j < DW_GDMA_LL_CHANNELS_PER_GROUP; j++) {
TEST_ESP_OK(dw_gdma_del_channel(chans[i][j]));
}
}
}
static bool test_dw_gdma_conti_mode_trans_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data)
{
BaseType_t task_woken = pdFALSE;
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)user_data;
xSemaphoreGiveFromISR(done_sem, &task_woken);
return task_woken == pdTRUE;
}
TEST_CASE("DW_GDMA M2M Test: Contiguous Mode", "[DW_GDMA]")
{
SemaphoreHandle_t done_sem = xSemaphoreCreateBinary();
TEST_ASSERT_NOT_NULL(done_sem);
printf("prepare the source and destination buffers\r\n");
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
TEST_ASSERT_NOT_NULL(src_buf);
TEST_ASSERT_NOT_NULL(dst_buf);
for (int i = 0; i < 256; i++) {
src_buf[i] = i;
}
#if CONFIG_IDF_TARGET_ESP32P4
// do write-back for the source data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
#endif
printf("allocate a channel for memory copy\r\n");
dw_gdma_channel_static_config_t static_config = {
.block_transfer_type = DW_GDMA_BLOCK_TRANSFER_CONTIGUOUS,
.role = DW_GDMA_ROLE_MEM,
.num_outstanding_requests = 1,
};
dw_gdma_channel_alloc_config_t alloc_config = {
.src = static_config,
.dst = static_config,
.flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller
.chan_priority = 1,
};
dw_gdma_channel_handle_t m2m_chan = NULL;
TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan));
printf("register event handler\r\n");
dw_gdma_event_callbacks_t cbs = {
.on_full_trans_done = test_dw_gdma_conti_mode_trans_done_cb,
};
TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, done_sem));
printf("set up memory copy transaction\r\n");
dw_gdma_block_transfer_config_t transfer_config = {
.src = {
.addr = (uint32_t)src_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.dst = {
.addr = (uint32_t)dst_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.size = 256,
};
TEST_ESP_OK(dw_gdma_channel_config_transfer(m2m_chan, &transfer_config));
printf("start the DMA engine\r\n");
TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true));
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
// DMA should stop after the first block transfer is done
TEST_ASSERT_EQUAL(pdFALSE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
printf("check the memory copy result\r\n");
#if CONFIG_IDF_TARGET_ESP32P4
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
#endif
for (int i = 0; i < 256; i++) {
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
}
TEST_ESP_OK(dw_gdma_del_channel(m2m_chan));
free(src_buf);
free(dst_buf);
vSemaphoreDelete(done_sem);
}
static bool test_dw_gdma_reload_mode_block_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data)
{
BaseType_t task_woken = pdFALSE;
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)user_data;
xSemaphoreGiveFromISR(done_sem, &task_woken);
return task_woken == pdTRUE;
}
TEST_CASE("DW_GDMA M2M Test: Reload Mode", "[DW_GDMA]")
{
SemaphoreHandle_t done_sem = xSemaphoreCreateBinary();
TEST_ASSERT_NOT_NULL(done_sem);
printf("prepare the source and destination buffers\r\n");
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
TEST_ASSERT_NOT_NULL(src_buf);
TEST_ASSERT_NOT_NULL(dst_buf);
for (int i = 0; i < 256; i++) {
src_buf[i] = i;
}
#if CONFIG_IDF_TARGET_ESP32P4
// do write-back for the source data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
#endif
printf("allocate a channel for memory copy\r\n");
dw_gdma_channel_static_config_t static_config = {
.block_transfer_type = DW_GDMA_BLOCK_TRANSFER_RELOAD,
.role = DW_GDMA_ROLE_MEM,
.num_outstanding_requests = 1,
};
dw_gdma_channel_alloc_config_t alloc_config = {
.src = static_config,
.dst = static_config,
.flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller
.chan_priority = 1,
};
dw_gdma_channel_handle_t m2m_chan = NULL;
TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan));
printf("register event handler\r\n");
dw_gdma_event_callbacks_t cbs = {
.on_block_trans_done = test_dw_gdma_reload_mode_block_done_cb,
};
TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, done_sem));
printf("set up memory copy transaction\r\n");
dw_gdma_block_transfer_config_t transfer_config = {
.src = {
.addr = (uint32_t)src_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.dst = {
.addr = (uint32_t)dst_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.size = 256,
};
TEST_ESP_OK(dw_gdma_channel_config_transfer(m2m_chan, &transfer_config));
dw_gdma_block_markers_t markers = {
.en_trans_done_intr = true, // enable block trans done interrupt
};
TEST_ESP_OK(dw_gdma_channel_set_block_markers(m2m_chan, markers));
printf("start the DMA engine\r\n");
TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true));
// because of the auto-reload, we can keep receiving the block trans done event
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
printf("check the memory copy result\r\n");
#if CONFIG_IDF_TARGET_ESP32P4
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
#endif
for (int i = 0; i < 256; i++) {
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
}
// stop the DMA channel
TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, false));
TEST_ESP_OK(dw_gdma_del_channel(m2m_chan));
free(src_buf);
free(dst_buf);
vSemaphoreDelete(done_sem);
}
typedef struct {
SemaphoreHandle_t done_sem;
uint8_t count;
} test_gdma_shadow_mode_user_data_t;
static bool test_dw_gdma_shadow_mode_block_invalid_cb(dw_gdma_channel_handle_t chan, const dw_gdma_break_event_data_t *event_data, void *user_data)
{
BaseType_t task_woken = pdFALSE;
test_gdma_shadow_mode_user_data_t *udata = (test_gdma_shadow_mode_user_data_t *)user_data;
udata->count++;
dw_gdma_block_markers_t markers = {
.is_last = true, // mark the block as the last one
.is_valid = true, // mark the block as valid so that the DMA can continue the transfer
};
dw_gdma_channel_set_block_markers(chan, markers);
// after the block is marked as valid again, tell the DMA to continue the transfer
dw_gdma_channel_continue(chan);
return task_woken == pdTRUE;
}
static bool test_dw_gdma_shadow_mode_trans_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data)
{
BaseType_t task_woken = pdFALSE;
test_gdma_shadow_mode_user_data_t *udata = (test_gdma_shadow_mode_user_data_t *)user_data;
SemaphoreHandle_t done_sem = udata->done_sem;
xSemaphoreGiveFromISR(done_sem, &task_woken);
return task_woken == pdTRUE;
}
TEST_CASE("DW_GDMA M2M Test: Shadow Mode", "[DW_GDMA]")
{
SemaphoreHandle_t done_sem = xSemaphoreCreateBinary();
TEST_ASSERT_NOT_NULL(done_sem);
printf("prepare the source and destination buffers\r\n");
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
TEST_ASSERT_NOT_NULL(src_buf);
TEST_ASSERT_NOT_NULL(dst_buf);
for (int i = 0; i < 256; i++) {
src_buf[i] = i;
}
#if CONFIG_IDF_TARGET_ESP32P4
// do write-back for the source data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
#endif
printf("allocate a channel for memory copy\r\n");
dw_gdma_channel_static_config_t static_config = {
.block_transfer_type = DW_GDMA_BLOCK_TRANSFER_SHADOW,
.role = DW_GDMA_ROLE_MEM,
.num_outstanding_requests = 1,
};
dw_gdma_channel_alloc_config_t alloc_config = {
.src = static_config,
.dst = static_config,
.flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller
.chan_priority = 1,
};
dw_gdma_channel_handle_t m2m_chan = NULL;
TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan));
printf("set up memory copy transaction\r\n");
dw_gdma_block_transfer_config_t transfer_config = {
.src = {
.addr = (uint32_t)src_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.dst = {
.addr = (uint32_t)dst_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.size = 256,
};
TEST_ESP_OK(dw_gdma_channel_config_transfer(m2m_chan, &transfer_config));
dw_gdma_block_markers_t markers = {
.is_valid = true, // mark the block as valid so that the DMA can start the transfer
};
TEST_ESP_OK(dw_gdma_channel_set_block_markers(m2m_chan, markers));
printf("register event handler\r\n");
dw_gdma_event_callbacks_t cbs = {
.on_invalid_block = test_dw_gdma_shadow_mode_block_invalid_cb,
.on_full_trans_done = test_dw_gdma_shadow_mode_trans_done_cb,
};
test_gdma_shadow_mode_user_data_t user_data = {
.done_sem = done_sem,
.count = 0,
};
TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, &user_data));
printf("start the DMA engine\r\n");
TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true));
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000)));
// should only go into the block invalid callback for once
TEST_ASSERT_EQUAL_UINT8(1, user_data.count);
printf("check the memory copy result\r\n");
#if CONFIG_IDF_TARGET_ESP32P4
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
#endif
for (int i = 0; i < 256; i++) {
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
}
TEST_ESP_OK(dw_gdma_del_channel(m2m_chan));
free(src_buf);
free(dst_buf);
vSemaphoreDelete(done_sem);
}
typedef struct {
SemaphoreHandle_t done_sem;
void *dst_buffer_addr;
size_t dst_buffer_size;
uint8_t count;
} test_gdma_list_mode_user_data_t;
static bool test_dw_gdma_list_mode_trans_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data)
{
BaseType_t task_woken = pdFALSE;
test_gdma_list_mode_user_data_t *udata = (test_gdma_list_mode_user_data_t *)user_data;
SemaphoreHandle_t done_sem = udata->done_sem;
xSemaphoreGiveFromISR(done_sem, &task_woken);
return task_woken == pdTRUE;
}
static bool test_dw_gdma_list_mode_invalid_block_cb(dw_gdma_channel_handle_t chan, const dw_gdma_break_event_data_t *event_data, void *user_data)
{
test_gdma_list_mode_user_data_t *udata = (test_gdma_list_mode_user_data_t *)user_data;
dw_gdma_lli_handle_t lli = event_data->invalid_lli;
udata->count++;
// clear the destination buffer
memset(udata->dst_buffer_addr, 0, udata->dst_buffer_size);
dw_gdma_block_markers_t markers = {
.is_last = true, // mark the next block as the last one
.is_valid = true, // mark the block as valid so that the DMA can continue the transfer
};
dw_gdma_lli_set_block_markers(lli, markers);
// after the item is marked as valid again, tell the DMA to continue the transfer
dw_gdma_channel_continue(chan);
return false;
}
TEST_CASE("DW_GDMA M2M Test: Link-List Mode", "[DW_GDMA]")
{
SemaphoreHandle_t done_sem = xSemaphoreCreateBinary();
TEST_ASSERT_NOT_NULL(done_sem);
printf("prepare the source and destination buffers\r\n");
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
TEST_ASSERT_NOT_NULL(src_buf);
TEST_ASSERT_NOT_NULL(dst_buf);
for (int i = 0; i < 256; i++) {
src_buf[i] = i;
}
#if CONFIG_IDF_TARGET_ESP32P4
// do write-back for the source data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
#endif
printf("allocate a channel for memory copy\r\n");
dw_gdma_channel_static_config_t static_config = {
.block_transfer_type = DW_GDMA_BLOCK_TRANSFER_LIST,
.role = DW_GDMA_ROLE_MEM,
.num_outstanding_requests = 1,
};
dw_gdma_channel_alloc_config_t alloc_config = {
.src = static_config,
.dst = static_config,
.flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller
.chan_priority = 1,
};
dw_gdma_channel_handle_t m2m_chan = NULL;
TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan));
printf("create singly DMA link list\r\n");
dw_gdma_link_list_config_t link_list_config = {
.num_items = 2,
.link_type = DW_GDMA_LINKED_LIST_TYPE_SINGLY,
};
dw_gdma_link_list_handle_t link_list = NULL;
TEST_ESP_OK(dw_gdma_new_link_list(&link_list_config, &link_list));
printf("set up memory copy transaction\r\n");
dw_gdma_block_transfer_config_t transfer_config = {
.src = {
.addr = (uint32_t)src_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.dst = {
.addr = (uint32_t)dst_buf,
.burst_mode = DW_GDMA_BURST_MODE_INCREMENT,
.width = DW_GDMA_TRANS_WIDTH_8,
.burst_items = 4,
.burst_len = 0,
},
.size = 128,
};
dw_gdma_block_markers_t markers = {
.is_valid = true, // mark the block as valid so that the DMA can start the transfer
};
TEST_ESP_OK(dw_gdma_lli_config_transfer(dw_gdma_link_list_get_item(link_list, 0), &transfer_config));
TEST_ESP_OK(dw_gdma_lli_set_block_markers(dw_gdma_link_list_get_item(link_list, 0), markers));
transfer_config.src.addr = (uint32_t)(src_buf + 128);
transfer_config.dst.addr = (uint32_t)(dst_buf + 128);
markers.is_last = true;
TEST_ESP_OK(dw_gdma_lli_config_transfer(dw_gdma_link_list_get_item(link_list, 1), &transfer_config));
TEST_ESP_OK(dw_gdma_lli_set_block_markers(dw_gdma_link_list_get_item(link_list, 1), markers));
printf("register event handler\r\n");
dw_gdma_event_callbacks_t cbs = {
.on_full_trans_done = test_dw_gdma_list_mode_trans_done_cb,
.on_invalid_block = test_dw_gdma_list_mode_invalid_block_cb,
};
test_gdma_list_mode_user_data_t user_data = {
.done_sem = done_sem,
.count = 0,
.dst_buffer_addr = dst_buf,
.dst_buffer_size = 256,
};
TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, &user_data));
printf("use the link list\r\n");
TEST_ESP_OK(dw_gdma_channel_use_link_list(m2m_chan, link_list));
printf("start the DMA engine\r\n");
TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true));
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000)));
printf("check the memory copy result\r\n");
#if CONFIG_IDF_TARGET_ESP32P4
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
#endif
for (int i = 0; i < 256; i++) {
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
}
// delete the singly link list, and create a circular link list instead
TEST_ESP_OK(dw_gdma_del_link_list(link_list));
printf("create circular DMA link list\r\n");
link_list_config.link_type = DW_GDMA_LINKED_LIST_TYPE_CIRCULAR;
link_list_config.num_items = 1;
TEST_ESP_OK(dw_gdma_new_link_list(&link_list_config, &link_list));
// set the transfer parameters for the link list item
transfer_config.size = 256;
transfer_config.src.addr = (uint32_t)src_buf;
transfer_config.dst.addr = (uint32_t)dst_buf;
TEST_ESP_OK(dw_gdma_lli_config_transfer(dw_gdma_link_list_get_item(link_list, 0), &transfer_config));
markers.is_valid = true;
markers.is_last = false;
TEST_ESP_OK(dw_gdma_lli_set_block_markers(dw_gdma_link_list_get_item(link_list, 0), markers));
printf("use the link list\r\n");
TEST_ESP_OK(dw_gdma_channel_use_link_list(m2m_chan, link_list));
TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true));
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000)));
printf("check the memory copy result\r\n");
#if CONFIG_IDF_TARGET_ESP32P4
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
#endif
for (int i = 0; i < 256; i++) {
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
}
TEST_ESP_OK(dw_gdma_del_link_list(link_list));
TEST_ESP_OK(dw_gdma_del_channel(m2m_chan));
free(src_buf);
free(dst_buf);
vSemaphoreDelete(done_sem);
}

Wyświetl plik

@ -12,4 +12,14 @@
void dw_gdma_hal_init(dw_gdma_hal_context_t *hal, const dw_gdma_hal_config_t *config)
{
hal->dev = DW_GDMA_LL_GET_HW();
dw_gdma_ll_reset(hal->dev);
dw_gdma_ll_enable_controller(hal->dev, true);
dw_gdma_ll_enable_intr_global(hal->dev, true);
}
void dw_gdma_hal_deinit(dw_gdma_hal_context_t *hal)
{
dw_gdma_ll_enable_intr_global(hal->dev, false);
dw_gdma_ll_enable_controller(hal->dev, false);
hal->dev = NULL;
}

Wyświetl plik

@ -41,6 +41,13 @@ typedef struct {
*/
void dw_gdma_hal_init(dw_gdma_hal_context_t *hal, const dw_gdma_hal_config_t *config);
/**
* @brief DW_GDMA HAL driver deinitialization
*
* @param hal Pointer to the HAL driver context
*/
void dw_gdma_hal_deinit(dw_gdma_hal_context_t *hal);
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -0,0 +1,99 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DMA block transfer type
*/
typedef enum {
DW_GDMA_BLOCK_TRANSFER_CONTIGUOUS, /*!< Contiguous buffer address */
DW_GDMA_BLOCK_TRANSFER_RELOAD, /*!< Reload transfer configuration automatically */
DW_GDMA_BLOCK_TRANSFER_SHADOW, /*!< Shadow register */
DW_GDMA_BLOCK_TRANSFER_LIST, /*!< Link list */
} dw_gdma_block_transfer_type_t;
/**
* @brief Flow control type
*/
typedef enum {
DW_GDMA_FLOW_CTRL_SELF, /*!< Flow controller is the DMA engine itself */
DW_GDMA_FLOW_CTRL_SRC, /*!< Flow controller is the source peripheral */
DW_GDMA_FLOW_CTRL_DST, /*!< Flow controller is the destination peripheral */
} dw_gdma_flow_controller_t;
/**
* @brief Handshake interface type
*/
typedef enum {
DW_GDMA_HANDSHAKE_HW, /*!< Transaction requests are initiated by hardware */
DW_GDMA_HANDSHAKE_SW, /*!< Transaction requests are initiated by software */
} dw_gdma_handshake_type_t;
/**
* @brief Role of the DMA source/destination
*/
typedef enum {
DW_GDMA_ROLE_MEM, /*!< Target is a plain memory which is accessible by the DMA */
DW_GDMA_ROLE_PERIPH_DSI, /*!< Target is FIFO memory of peripheral: DSI */
DW_GDMA_ROLE_PERIPH_CSI, /*!< Target is FIFO memory of peripheral: CSI */
DW_GDMA_ROLE_PERIPH_ISP, /*!< Target is FIFO memory of peripheral: ISP */
} dw_gdma_role_t;
/**
* @brief Channel lock level
*/
typedef enum {
DW_GDMA_LOCK_LEVEL_FULL_TRANS, /*!< Lock over complete DMA transfer */
DW_GDMA_LOCK_LEVEL_BLOCK_TRANS, /*!< Lock over DMA block transfer */
} dw_gdma_lock_level_t;
/**
* @brief DW_GDMA transfer width
*/
typedef enum {
DW_GDMA_TRANS_WIDTH_8, /*!< Data transfer width: 8 bits */
DW_GDMA_TRANS_WIDTH_16, /*!< Data transfer width: 16 bits */
DW_GDMA_TRANS_WIDTH_32, /*!< Data transfer width: 32 bits */
DW_GDMA_TRANS_WIDTH_64, /*!< Data transfer width: 64 bits */
DW_GDMA_TRANS_WIDTH_128, /*!< Data transfer width: 128 bits */
DW_GDMA_TRANS_WIDTH_256, /*!< Data transfer width: 256 bits */
DW_GDMA_TRANS_WIDTH_512, /*!< Data transfer width: 512 bits */
} dw_gdma_transfer_width_t;
/**
* @brief DW_GDMA burst mode
*/
typedef enum {
DW_GDMA_BURST_MODE_INCREMENT, /*!< The address is increased after each transfer */
DW_GDMA_BURST_MODE_FIXED, /*!< The address remains the same after each transfer */
} dw_gdma_burst_mode_t;
/**
* @brief Number of data items that are contained in one burst transaction
*
* @note One item's bit width is set by `dw_gdma_transfer_width_t`
*/
typedef enum {
DW_GDMA_BURST_ITEMS_1, /*!< 1 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_4, /*!< 4 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_8, /*!< 8 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_16, /*!< 16 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_32, /*!< 32 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_64, /*!< 64 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_128, /*!< 128 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_256, /*!< 256 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_512, /*!< 512 data items in a burst transaction */
DW_GDMA_BURST_ITEMS_1024, /*!< 1024 data items in a burst transaction */
} dw_gdma_burst_items_t;
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -23,6 +23,10 @@ config SOC_AXI_GDMA_SUPPORTED
bool
default y
config SOC_DW_GDMA_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED
bool
default y

Wyświetl plik

@ -156,6 +156,12 @@
#define DR_REG_HP2LP_PERI_PMS_BASE (DR_REG_LPPERIPH_BASE + 0xE800)
#define DR_REG_LP_TSENSOR_BASE (DR_REG_LPPERIPH_BASE + 0xF000)
/**
* @brief: Special memory address
*/
#define MIPI_CSI_MEM_BASE 0x50104000
#define MIPI_DSI_MEM_BASE 0x50105000
/**
* This are module helper MACROs for quick module reference
* including some module(renamed) address

Wyświetl plik

@ -24,6 +24,7 @@
#define SOC_GDMA_SUPPORTED 1
#define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_AXI_GDMA_SUPPORTED 1
#define SOC_DW_GDMA_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1
#define SOC_PCNT_SUPPORTED 1
// #define SOC_LCDCAM_SUPPORTED 1 // TODO: IDF-7465