Merge branch 'bugfix/dma2d_various_small_fixes' into 'master'

fix(dma2d): correct a few 2D-DMA driver issues

See merge request espressif/esp-idf!29705
pull/13431/head
Song Ruo Jing 2024-03-19 21:36:45 +08:00
commit 34f05287ab
8 zmienionych plików z 100 dodań i 94 usunięć

Wyświetl plik

@ -98,8 +98,6 @@ static bool acquire_free_channels_for_trans(dma2d_group_t *dma2d_group, const dm
dma2d_group->tx_chans[channel_id]->base.status.periph_sel_id = -1;
if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_TX_REORDER) {
dma2d_group->tx_chans[channel_id]->base.status.reorder_en = true;
} else if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) {
dma2d_group->tx_chans[channel_id]->base.status.has_sibling = true;
}
channel_handle_array[idx].chan = &dma2d_group->tx_chans[channel_id]->base;
channel_handle_array[idx].dir = DMA2D_CHANNEL_DIRECTION_TX;
@ -137,8 +135,6 @@ static bool acquire_free_channels_for_trans(dma2d_group_t *dma2d_group, const dm
dma2d_group->rx_chans[channel_id]->base.status.periph_sel_id = -1;
if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_REORDER) {
dma2d_group->rx_chans[channel_id]->base.status.reorder_en = true;
} else if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) {
dma2d_group->rx_chans[channel_id]->base.status.has_sibling = true;
}
channel_handle_array[idx].chan = &dma2d_group->rx_chans[channel_id]->base;
channel_handle_array[idx].dir = DMA2D_CHANNEL_DIRECTION_RX;
@ -176,6 +172,8 @@ static bool free_up_channels(dma2d_group_t *group, dma2d_rx_channel_t *rx_chan)
uint32_t channel_id = rx_chan->base.channel_id;
// 1. Clean up channels
uint32_t bundled_tx_channel_mask = rx_chan->bundled_tx_channel_mask;
uint32_t tx_periph_sel_id_mask = 0;
uint32_t rx_periph_sel_id_mask = 0;
// Disable RX channel interrupt
portENTER_CRITICAL_SAFE(&rx_chan->base.spinlock);
dma2d_ll_rx_enable_interrupt(group->hal.dev, channel_id, UINT32_MAX, false);
@ -186,6 +184,9 @@ static bool free_up_channels(dma2d_group_t *group, dma2d_rx_channel_t *rx_chan)
dma2d_ll_rx_disconnect_from_periph(group->hal.dev, channel_id);
// Clear the pointer that points to the finished transaction
rx_chan->base.status.transaction = NULL;
// Record its periph_sel_id
assert(rx_chan->base.status.periph_sel_id != -1);
rx_periph_sel_id_mask |= (1 << rx_chan->base.status.periph_sel_id);
portEXIT_CRITICAL_SAFE(&rx_chan->base.spinlock);
// For every bundled TX channels:
while (rx_chan->bundled_tx_channel_mask) {
@ -201,6 +202,9 @@ static bool free_up_channels(dma2d_group_t *group, dma2d_rx_channel_t *rx_chan)
dma2d_ll_tx_disconnect_from_periph(group->hal.dev, nbit);
// Clear the pointer that points to the finished transaction
tx_chan->base.status.transaction = NULL;
// Record its periph_sel_id
assert(tx_chan->base.status.periph_sel_id != -1);
tx_periph_sel_id_mask |= (1 << tx_chan->base.status.periph_sel_id);
portEXIT_CRITICAL_SAFE(&tx_chan->base.spinlock);
}
// Channel functionality flags will be reset and assigned new values inside `acquire_free_channels_for_trans`
@ -212,12 +216,12 @@ static bool free_up_channels(dma2d_group_t *group, dma2d_rx_channel_t *rx_chan)
dma2d_trans_channel_info_t channel_handle_array[DMA2D_MAX_CHANNEL_NUM_PER_TRANSACTION];
portENTER_CRITICAL_SAFE(&group->spinlock);
// Release channels
group->tx_channel_free_mask |= bundled_tx_channel_mask;
group->rx_channel_free_mask |= (1 << channel_id);
int rx_periph_sel_id = group->rx_chans[channel_id]->base.status.periph_sel_id;
if (rx_periph_sel_id != -1 && ((1 << rx_periph_sel_id) & DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK)) {
group->periph_m2m_free_id_mask |= (1 << rx_periph_sel_id); // release m2m periph_sel_id
}
// Release M2M periph_sel_id
group->tx_periph_m2m_free_id_mask |= (tx_periph_sel_id_mask & DMA2D_LL_TX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK);
group->rx_periph_m2m_free_id_mask |= (rx_periph_sel_id_mask & DMA2D_LL_RX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK);
dma2d_trans_t *next_trans_elm = TAILQ_FIRST(&group->pending_trans_tailq);
if (next_trans_elm) {
@ -295,10 +299,10 @@ static NOINLINE_ATTR bool _dma2d_default_rx_isr(dma2d_group_t *group, int channe
}
// If last transcation completes (regardless success or not), free the channels
if ((intr_status & DMA2D_LL_EVENT_RX_SUC_EOF) ||
(intr_status & DMA2D_LL_EVENT_RX_ERR_EOF) ||
(intr_status & DMA2D_LL_EVENT_RX_DESC_ERROR)) {
assert(dma2d_ll_rx_is_fsm_idle(group->hal.dev, channel_id));
if (intr_status & (DMA2D_LL_EVENT_RX_SUC_EOF | DMA2D_LL_EVENT_RX_ERR_EOF | DMA2D_LL_EVENT_RX_DESC_ERROR)) {
if (!(intr_status & DMA2D_LL_EVENT_RX_ERR_EOF)) {
assert(dma2d_ll_rx_is_fsm_idle(group->hal.dev, channel_id));
}
need_yield |= free_up_channels(group, rx_chan);
}
@ -365,7 +369,8 @@ esp_err_t dma2d_acquire_pool(const dma2d_pool_config_t *config, dma2d_pool_handl
pre_alloc_group->rx_channel_free_mask = (1 << SOC_DMA2D_RX_CHANNELS_PER_GROUP) - 1;
pre_alloc_group->tx_channel_reserved_mask = dma2d_tx_channel_reserved_mask[group_id];
pre_alloc_group->rx_channel_reserved_mask = dma2d_rx_channel_reserved_mask[group_id];
pre_alloc_group->periph_m2m_free_id_mask = DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK;
pre_alloc_group->tx_periph_m2m_free_id_mask = DMA2D_LL_TX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK;
pre_alloc_group->rx_periph_m2m_free_id_mask = DMA2D_LL_RX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK;
pre_alloc_group->intr_priority = -1;
for (int i = 0; i < SOC_DMA2D_TX_CHANNELS_PER_GROUP; i++) {
pre_alloc_group->tx_chans[i] = &pre_alloc_tx_channels[i];
@ -393,8 +398,6 @@ esp_err_t dma2d_acquire_pool(const dma2d_pool_config_t *config, dma2d_pool_handl
dma2d_hal_init(&pre_alloc_group->hal, group_id); // initialize HAL context
// Enable 2D-DMA module clock
dma2d_ll_hw_enable(s_platform.groups[group_id]->hal.dev, true);
// Configure 2D-DMA accessible memory range
dma2d_ll_set_accessible_mem_range(s_platform.groups[group_id]->hal.dev);
} else {
ret = ESP_ERR_NO_MEM;
free(pre_alloc_tx_channels);
@ -531,41 +534,30 @@ esp_err_t dma2d_connect(dma2d_channel_handle_t dma2d_chan, const dma2d_trigger_t
// Find periph_sel_id for the channel
int peri_sel_id = trig_periph->periph_sel_id;
uint32_t *periph_m2m_free_id_mask = NULL;
uint32_t periph_m2m_available_id_mask = 0;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
periph_m2m_free_id_mask = &group->tx_periph_m2m_free_id_mask;
periph_m2m_available_id_mask = DMA2D_LL_TX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK;
} else {
periph_m2m_free_id_mask = &group->rx_periph_m2m_free_id_mask;
periph_m2m_available_id_mask = DMA2D_LL_RX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK;
}
portENTER_CRITICAL_SAFE(&group->spinlock);
if (trig_periph->periph == DMA2D_TRIG_PERIPH_M2M) {
// Assign peri_sel_id to one of {4, 5, 6, 7}
assert(dma2d_chan->status.has_sibling);
// First find out the peri_sel_id of its sibling channel
int sibling_periph_sel_id = -1;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
sibling_periph_sel_id = group->rx_chans[channel_id]->base.status.periph_sel_id;
} else {
sibling_periph_sel_id = group->tx_chans[channel_id]->base.status.periph_sel_id;
}
if (peri_sel_id == -1) {
// Unspecified periph_sel_id, decide by the driver
if (sibling_periph_sel_id != -1 && ((1 << sibling_periph_sel_id) & DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK)) {
peri_sel_id = sibling_periph_sel_id;
} else {
peri_sel_id = __builtin_ctz(group->periph_m2m_free_id_mask);
}
peri_sel_id = __builtin_ctz(*periph_m2m_free_id_mask);
} else {
// Check whether specified periph_sel_id is valid
if (sibling_periph_sel_id != -1) {
if (sibling_periph_sel_id != peri_sel_id) {
peri_sel_id = -1; // Conflict id with its sibling channel
}
} else {
if (!((1 << peri_sel_id) & group->periph_m2m_free_id_mask & DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK)) {
peri_sel_id = -1; // Occupied or invalid m2m peri_sel_id
}
if (!((1 << peri_sel_id) & *periph_m2m_free_id_mask & periph_m2m_available_id_mask)) {
peri_sel_id = -1; // Occupied or invalid m2m peri_sel_id
}
}
}
if (peri_sel_id >= 0) {
dma2d_chan->status.periph_sel_id = peri_sel_id;
group->periph_m2m_free_id_mask &= ~(1 << peri_sel_id); // acquire m2m periph_sel_id
*periph_m2m_free_id_mask &= ~(1 << peri_sel_id); // acquire m2m periph_sel_id
}
portEXIT_CRITICAL_SAFE(&group->spinlock);
ESP_GOTO_ON_FALSE_ISR(peri_sel_id >= 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid periph_sel_id");
@ -608,6 +600,7 @@ esp_err_t dma2d_connect(dma2d_channel_handle_t dma2d_chan, const dma2d_trigger_t
// Reset to certain settings
dma2d_ll_rx_enable_owner_check(group->hal.dev, channel_id, false);
dma2d_ll_rx_set_auto_return_owner(group->hal.dev, channel_id, DMA2D_DESCRIPTOR_BUFFER_OWNER_CPU); // After auto write back, the owner field will be cleared
dma2d_ll_rx_enable_descriptor_burst(group->hal.dev, channel_id, false);
dma2d_ll_rx_set_data_burst_length(group->hal.dev, channel_id, DMA2D_DATA_BURST_LENGTH_128);
dma2d_ll_rx_enable_page_bound_wrap(group->hal.dev, channel_id, true);
@ -834,12 +827,10 @@ esp_err_t dma2d_set_transfer_ability(dma2d_channel_handle_t dma2d_chan, const dm
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_enable_descriptor_burst(group->hal.dev, channel_id, ability->desc_burst_en);
dma2d_ll_tx_set_data_burst_length(group->hal.dev, channel_id, ability->data_burst_length);
dma2d_ll_tx_enable_page_bound_wrap(group->hal.dev, channel_id, ability->data_burst_length != 1);
dma2d_ll_tx_set_macro_block_size(group->hal.dev, channel_id, ability->mb_size);
} else {
dma2d_ll_rx_enable_descriptor_burst(group->hal.dev, channel_id, ability->desc_burst_en);
dma2d_ll_rx_set_data_burst_length(group->hal.dev, channel_id, ability->data_burst_length);
dma2d_ll_rx_enable_page_bound_wrap(group->hal.dev, channel_id, ability->data_burst_length != 1);
dma2d_ll_rx_set_macro_block_size(group->hal.dev, channel_id, ability->mb_size);
}
@ -942,7 +933,7 @@ err:
esp_err_t dma2d_force_end(dma2d_trans_t *trans, bool *need_yield)
{
ESP_RETURN_ON_FALSE_ISR(trans && trans->rx_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE_ISR(trans && trans->rx_chan && need_yield, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
assert(trans->rx_chan->direction == DMA2D_CHANNEL_DIRECTION_RX);
dma2d_group_t *group = trans->rx_chan->group;

Wyświetl plik

@ -55,7 +55,8 @@ struct dma2d_group_t {
uint8_t rx_channel_free_mask; // Bit mask indicating the free RX channels at the moment
uint8_t tx_channel_reserved_mask; // Bit mask indicating the being reserved TX channels
uint8_t rx_channel_reserved_mask; // Bit mask indicating the being reserved RX channels
uint32_t periph_m2m_free_id_mask; // Bit mask indicating the available M2M peripheral selelction IDs at the moment
uint32_t tx_periph_m2m_free_id_mask; // Bit mask indicating the available TX M2M peripheral selelction IDs at the moment
uint32_t rx_periph_m2m_free_id_mask; // Bit mask indicating the available RX M2M peripheral selelction IDs at the moment
dma2d_tx_channel_t *tx_chans[SOC_DMA2D_TX_CHANNELS_PER_GROUP]; // Handles of 2D-DMA TX channels
dma2d_rx_channel_t *rx_chans[SOC_DMA2D_RX_CHANNELS_PER_GROUP]; // Handles of 2D-DMA RX channels
int intr_priority; // All channels in the same group should share the same interrupt priority
@ -70,7 +71,6 @@ struct dma2d_channel_t {
struct {
dma2d_trans_t *transaction; // Pointer to the 2D-DMA transaction context that is currently being processed on the channel
uint32_t reorder_en : 1; // This flag indicates the channel will enable reorder functionality
uint32_t has_sibling : 1; // This flag indicates its sibling channel is also in-use
int periph_sel_id : (DMA2D_LL_CHANNEL_PERIPH_SEL_BIT_WIDTH + 1); // This is used to record the periph_sel_id of each channel
} status;
};

Wyświetl plik

@ -104,9 +104,9 @@ TEST_CASE("DMA2D_M2M_1D_basic", "[DMA2D]")
prtx[idx] = (i + idx + 0x45) & 0xFF;
prrx[idx] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// Writeback TX and RX buffers
esp_cache_msync((void *)prtx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)prrx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
@ -146,6 +146,11 @@ TEST_CASE("DMA2D_M2M_1D_basic", "[DMA2D]")
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * data_size;
prrx = rx_buf + i * data_size;
// Invalidate TX and RX buffers
esp_cache_msync((void *)prtx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
esp_cache_msync((void *)prrx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
for (int idx = 0; idx < data_size; idx++) {
TEST_ASSERT_EQUAL(prtx[idx], prrx[idx]);
TEST_ASSERT_EQUAL(prtx[idx], (i + idx + 0x45) & 0xFF);
@ -249,9 +254,9 @@ TEST_CASE("DMA2D_M2M_1D_RGB565_to_RGB888", "[DMA2D]")
prrx[idx * 3 + 1] = 0;
prrx[idx * 3 + 2] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, item_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, item_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// Writeback TX and RX buffers
esp_cache_msync((void *)prtx, item_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)prrx, item_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
@ -292,6 +297,11 @@ TEST_CASE("DMA2D_M2M_1D_RGB565_to_RGB888", "[DMA2D]")
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * item_size * 2;
prrx = rx_buf + i * item_size * 3;
// Invalidate TX and RX buffers
esp_cache_msync((void *)prtx, item_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
esp_cache_msync((void *)prrx, item_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
TEST_ASSERT_EQUAL(0, rgb565_to_rgb888_and_cmp(prtx, prrx, item_size));
}
@ -349,9 +359,9 @@ TEST_CASE("DMA2D_M2M_2D_basic", "[DMA2D]")
prtx[idx] = (i + idx + 0x45) & 0xFF;
prrx[idx] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// Writeback TX and RX buffers
esp_cache_msync((void *)prtx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)prrx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
@ -391,6 +401,11 @@ TEST_CASE("DMA2D_M2M_2D_basic", "[DMA2D]")
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * stripe_size * stripe_size;
prrx = rx_buf + i * stripe_size * stripe_size;
// Invalidate TX and RX buffers
esp_cache_msync((void *)prtx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
esp_cache_msync((void *)prrx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
for (int idx = 0; idx < stripe_size * stripe_size; idx++) {
TEST_ASSERT_EQUAL(prtx[idx], prrx[idx]);
TEST_ASSERT_EQUAL(prtx[idx], (i + idx + 0x45) & 0xFF);
@ -494,9 +509,9 @@ TEST_CASE("DMA2D_M2M_2D_RGB888_to_RGB565", "[DMA2D]")
prrx[idx * 2] = 0;
prrx[idx * 2 + 1] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, stripe_pixel_size * stripe_pixel_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, stripe_pixel_size * stripe_pixel_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// Writeback TX and RX buffers
esp_cache_msync((void *)prtx, stripe_pixel_size * stripe_pixel_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)prrx, stripe_pixel_size * stripe_pixel_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
@ -537,6 +552,11 @@ TEST_CASE("DMA2D_M2M_2D_RGB888_to_RGB565", "[DMA2D]")
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * stripe_pixel_size * stripe_pixel_size * 3;
prrx = rx_buf + i * stripe_pixel_size * stripe_pixel_size * 2;
// Invalidate TX and RX buffers
esp_cache_msync((void *)prtx, stripe_pixel_size * stripe_pixel_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
esp_cache_msync((void *)prrx, stripe_pixel_size * stripe_pixel_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
TEST_ASSERT_EQUAL(0, rgb888_to_rgb565_and_cmp(prtx, prrx, stripe_pixel_size * stripe_pixel_size));
}
@ -601,9 +621,9 @@ TEST_CASE("DMA2D_M2M_2D_window", "[DMA2D]")
prrx[idx * 2 + 1] = 0xFF;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, va * ha * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// Writeback TX and RX buffers
esp_cache_msync((void *)prtx, 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)prrx, va * ha * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
@ -643,6 +663,11 @@ TEST_CASE("DMA2D_M2M_2D_window", "[DMA2D]")
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * 64;
prrx = rx_buf + i * va * ha * 2;
// Invalidate TX and RX buffers
esp_cache_msync((void *)prtx, 64, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
esp_cache_msync((void *)prrx, va * ha * 2, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
printf("pic:\n");
for (int idx = 0; idx < va * ha; idx++) {
printf("%02X%02X ", prrx[idx * 2], prrx[idx * 2 + 1]);

Wyświetl plik

@ -62,8 +62,9 @@ extern "C" {
#define DMA2D_LL_RX_CHANNEL_SUPPORT_RO_MASK (0U | BIT0) // RX channels that support reorder feature
#define DMA2D_LL_RX_CHANNEL_SUPPORT_CSC_MASK (0U | BIT0) // RX channels that support color space conversion feature
// Any "dummy" peripheral selection ID can be used for M2M mode {4, 5, 6, 7}
#define DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK (0xF0)
// Any "dummy" peripheral selection ID can be used for M2M mode
#define DMA2D_LL_TX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK (0xF0)
#define DMA2D_LL_RX_CHANNEL_PERIPH_M2M_AVAILABLE_ID_MASK (0xF8)
// Peripheral selection ID that disconnects 2D-DMA channel from any peripherals
#define DMA2D_LL_CHANNEL_PERIPH_NO_CHOICE (7)
// Peripheral selection ID register field width
@ -104,17 +105,6 @@ static inline void dma2d_ll_reset_register(int group_id)
/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance
#define dma2d_ll_reset_register(...) (void)__DECLARE_RCC_ATOMIC_ENV; dma2d_ll_reset_register(__VA_ARGS__)
/**
* @brief Configure 2D-DMA accessible internal and external memory start/end address (for both buffer and descriptor)
*/
static inline void dma2d_ll_set_accessible_mem_range(dma2d_dev_t *dev)
{
dev->intr_mem_start_addr.access_intr_mem_start_addr = SOC_DIRAM_DRAM_LOW; // L2MEM region (2D-DMA indeed is able to access TCM/LP_MEM regions, but will be restricted in IDF)
dev->intr_mem_end_addr.access_intr_mem_end_addr = SOC_DIRAM_DRAM_HIGH;
dev->extr_mem_start_addr.access_extr_mem_start_addr = SOC_EXTRAM_LOW;
dev->extr_mem_end_addr.access_extr_mem_end_addr = SOC_EXTRAM_HIGH;
}
/**
* @brief Enable 2D-DMA module
*/
@ -230,7 +220,7 @@ static inline void dma2d_ll_rx_set_data_burst_length(dma2d_dev_t *dev, uint32_t
{
uint32_t sel;
switch (length) {
case DMA2D_DATA_BURST_LENGTH_1:
case DMA2D_DATA_BURST_LENGTH_8:
sel = 0;
break;
case DMA2D_DATA_BURST_LENGTH_16:
@ -384,13 +374,13 @@ static inline void dma2d_ll_rx_restart(dma2d_dev_t *dev, uint32_t channel)
}
/**
* @brief Enable 2D-DMA RX to return the address of current descriptor when receives error
* @brief Configure the value of the owner field written back to the 2D-DMA RX descriptor
*/
__attribute__((always_inline))
static inline void dma2d_ll_rx_enable_auto_return(dma2d_dev_t *dev, uint32_t channel, bool enable)
static inline void dma2d_ll_rx_set_auto_return_owner(dma2d_dev_t *dev, uint32_t channel, int owner)
{
volatile dma2d_in_link_conf_chn_reg_t *reg = (volatile dma2d_in_link_conf_chn_reg_t *)DMA2D_LL_IN_CHANNEL_GET_REG_ADDR(dev, channel, in_link_conf);
reg->inlink_auto_ret_chn = enable;
reg->inlink_auto_ret_chn = owner;
}
/**
@ -767,7 +757,7 @@ static inline void dma2d_ll_tx_set_data_burst_length(dma2d_dev_t *dev, uint32_t
{
uint32_t sel;
switch (length) {
case DMA2D_DATA_BURST_LENGTH_1:
case DMA2D_DATA_BURST_LENGTH_8:
sel = 0;
break;
case DMA2D_DATA_BURST_LENGTH_16:

Wyświetl plik

@ -120,7 +120,7 @@ typedef enum {
* @brief Enumeration of 2D-DMA data burst length options
*/
typedef enum {
DMA2D_DATA_BURST_LENGTH_1, /*!< 2D-DMA block size: single byte */
DMA2D_DATA_BURST_LENGTH_8, /*!< 2D-DMA block size: 8 bytes */
DMA2D_DATA_BURST_LENGTH_16, /*!< 2D-DMA block size: 16 bytes */
DMA2D_DATA_BURST_LENGTH_32, /*!< 2D-DMA block size: 32 bytes */
DMA2D_DATA_BURST_LENGTH_64, /*!< 2D-DMA block size: 64 bytes */

Wyświetl plik

@ -10,10 +10,10 @@
#define SOC_DMA2D_TRIG_PERIPH_JPEG_RX (0)
#define SOC_DMA2D_TRIG_PERIPH_PPA_SR_RX (1)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_RX (2)
#define SOC_DMA2D_TRIG_PERIPH_M2M_RX (-1) // Any value of 3 ~ 7, but TX and RX needs the same ID for M2M
#define SOC_DMA2D_TRIG_PERIPH_M2M_RX (-1) // Any value of 3 ~ 7, TX and RX do not have to use same ID value for M2M
#define SOC_DMA2D_TRIG_PERIPH_JPEG_TX (0)
#define SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX (1)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_FG_TX (2)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_BG_TX (3)
#define SOC_DMA2D_TRIG_PERIPH_M2M_TX (-1) // Any value of 4 ~ 7, but TX and RX needs the same ID for M2M
#define SOC_DMA2D_TRIG_PERIPH_M2M_TX (-1) // Any value of 4 ~ 7, TX and RX do not have to use same ID value for M2M

Wyświetl plik

@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -63,7 +63,7 @@ extern "C" {
#define DMA2D_OUT_LOOP_TEST_CH0_V 0x00000001U
#define DMA2D_OUT_LOOP_TEST_CH0_S 5
/** DMA2D_OUT_MEM_BURST_LENGTH_CH0 : R/W; bitpos: [8:6]; default: 0;
* Block size of Tx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Tx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
#define DMA2D_OUT_MEM_BURST_LENGTH_CH0 0x00000007U
@ -1098,7 +1098,7 @@ extern "C" {
#define DMA2D_OUT_LOOP_TEST_CH1_V 0x00000001U
#define DMA2D_OUT_LOOP_TEST_CH1_S 5
/** DMA2D_OUT_MEM_BURST_LENGTH_CH1 : R/W; bitpos: [8:6]; default: 0;
* Block size of Tx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Tx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
#define DMA2D_OUT_MEM_BURST_LENGTH_CH1 0x00000007U
@ -2106,7 +2106,7 @@ extern "C" {
#define DMA2D_OUT_LOOP_TEST_CH2_V 0x00000001U
#define DMA2D_OUT_LOOP_TEST_CH2_S 5
/** DMA2D_OUT_MEM_BURST_LENGTH_CH2 : R/W; bitpos: [8:6]; default: 0;
* Block size of Tx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Tx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
#define DMA2D_OUT_MEM_BURST_LENGTH_CH2 0x00000007U
@ -3105,7 +3105,7 @@ extern "C" {
#define DMA2D_IN_LOOP_TEST_CH0_V 0x00000001U
#define DMA2D_IN_LOOP_TEST_CH0_S 5
/** DMA2D_IN_MEM_BURST_LENGTH_CH0 : R/W; bitpos: [8:6]; default: 0;
* Block size of Rx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Rx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
#define DMA2D_IN_MEM_BURST_LENGTH_CH0 0x00000007U
@ -3732,8 +3732,8 @@ extern "C" {
*/
#define DMA2D_IN_LINK_CONF_CH0_REG (DR_REG_DMA2D_BASE + 0x51c)
/** DMA2D_INLINK_AUTO_RET_CH0 : R/W; bitpos: [20]; default: 1;
* Set this bit to return to current inlink descriptor's address, when there are some
* errors in current receiving data.
* Configure the value of the owner field written back to the inlink descriptor.
* 1: Write back 1. 0: Write back 0.
*/
#define DMA2D_INLINK_AUTO_RET_CH0 (BIT(20))
#define DMA2D_INLINK_AUTO_RET_CH0_M (DMA2D_INLINK_AUTO_RET_CH0_V << DMA2D_INLINK_AUTO_RET_CH0_S)
@ -4168,7 +4168,7 @@ extern "C" {
#define DMA2D_IN_LOOP_TEST_CH1_V 0x00000001U
#define DMA2D_IN_LOOP_TEST_CH1_S 5
/** DMA2D_IN_MEM_BURST_LENGTH_CH1 : R/W; bitpos: [8:6]; default: 0;
* Block size of Rx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Rx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
#define DMA2D_IN_MEM_BURST_LENGTH_CH1 0x00000007U
@ -4795,8 +4795,8 @@ extern "C" {
*/
#define DMA2D_IN_LINK_CONF_CH1_REG (DR_REG_DMA2D_BASE + 0x61c)
/** DMA2D_INLINK_AUTO_RET_CH1 : R/W; bitpos: [20]; default: 1;
* Set this bit to return to current inlink descriptor's address, when there are some
* errors in current receiving data.
* Configure the value of the owner field written back to the inlink descriptor.
* 1: Write back 1. 0: Write back 0.
*/
#define DMA2D_INLINK_AUTO_RET_CH1 (BIT(20))
#define DMA2D_INLINK_AUTO_RET_CH1_M (DMA2D_INLINK_AUTO_RET_CH1_V << DMA2D_INLINK_AUTO_RET_CH1_S)

Wyświetl plik

@ -46,7 +46,7 @@ typedef union {
*/
uint32_t out_loop_test_chn:1;
/** out_mem_burst_length_chn : R/W; bitpos: [8:6]; default: 0;
* Block size of Tx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Tx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
uint32_t out_mem_burst_length_chn:3;
@ -762,7 +762,7 @@ typedef union {
*/
uint32_t in_loop_test_chn:1;
/** in_mem_burst_length_chn : R/W; bitpos: [8:6]; default: 0;
* Block size of Rx channel 0. 0: single 1: 16 bytes 2: 32 bytes 3: 64
* Block size of Rx channel 0. 0: 8 bytes 1: 16 bytes 2: 32 bytes 3: 64
* bytes 4: 128 bytes
*/
uint32_t in_mem_burst_length_chn:3;
@ -1179,8 +1179,8 @@ typedef union {
struct {
uint32_t reserved_0:20;
/** inlink_auto_ret_chn : R/W; bitpos: [20]; default: 1;
* Set this bit to return to current inlink descriptor's address, when there are some
* errors in current receiving data.
* Configure the value of the owner field written back to the inlink descriptor.
* 1: Write back 1. 0: Write back 0.
*/
uint32_t inlink_auto_ret_chn:1;
/** inlink_stop_chn : R/W/SC; bitpos: [21]; default: 0;