From b303e4b7a6f8d031b506250139cf8908ad14567f Mon Sep 17 00:00:00 2001 From: Armando Date: Fri, 24 Jun 2022 19:01:51 +0800 Subject: [PATCH 1/5] spi_master: new segmented-configure-transfer mode --- .../include/driver/spi_master.h | 104 +++++ .../esp_driver_spi/src/gpspi/spi_master.c | 420 +++++++++++++++--- components/hal/esp32c2/include/hal/spi_ll.h | 276 ++++++++++++ components/hal/esp32c3/include/hal/spi_ll.h | 273 ++++++++++++ components/hal/esp32h2/include/hal/spi_ll.h | 275 ++++++++++++ components/hal/esp32s2/include/hal/spi_ll.h | 297 ++++++++++++- components/hal/esp32s3/include/hal/spi_ll.h | 274 ++++++++++++ components/hal/include/hal/spi_hal.h | 165 +++++++ components/hal/spi_hal.c | 28 ++ components/hal/spi_hal_iram.c | 174 ++++++++ .../esp32c2/include/soc/Kconfig.soc_caps.in | 12 + components/soc/esp32c2/include/soc/soc_caps.h | 5 + .../esp32c3/include/soc/Kconfig.soc_caps.in | 12 + components/soc/esp32c3/include/soc/soc_caps.h | 5 + .../esp32h2/include/soc/Kconfig.soc_caps.in | 12 + components/soc/esp32h2/include/soc/soc_caps.h | 5 + .../esp32s2/include/soc/Kconfig.soc_caps.in | 16 +- components/soc/esp32s2/include/soc/soc_caps.h | 9 +- .../esp32s3/include/soc/Kconfig.soc_caps.in | 12 + components/soc/esp32s3/include/soc/soc_caps.h | 5 + 20 files changed, 2319 insertions(+), 60 deletions(-) diff --git a/components/esp_driver_spi/include/driver/spi_master.h b/components/esp_driver_spi/include/driver/spi_master.h index 660f46283f..43f8ddf647 100644 --- a/components/esp_driver_spi/include/driver/spi_master.h +++ b/components/esp_driver_spi/include/driver/spi_master.h @@ -11,6 +11,7 @@ #include "hal/spi_types.h" //for spi_bus_initialization functions. to be back-compatible #include "driver/spi_common.h" +#include "soc/soc_caps.h" /** * @brief SPI common used frequency (in Hz) @@ -157,6 +158,47 @@ typedef struct { uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. } spi_transaction_ext_t ; +#if SOC_SPI_SCT_SUPPORTED +/** + * @Backgrounds: `SCT Mode` + * Segmented-Configure-Transfer Mode + * + * In this mode, you could pre-configure multiple SPI transactions. + * - These whole transaction is called one `Segmented-Configure-Transaction` or one `SCT`. + * - Each of the transactions in one `SCT` is called one `Segment`. + * + * Per segment can have different SPI phase configurations + */ + +/** + * SPI SCT Mode transaction flags + */ +#define SPI_SEG_TRANS_PREP_LEN_UPDATED (1<<0) ///< Use `spi_seg_transaction_t: cs_ena_pretrans` in this segment. +#define SPI_SEG_TRANS_CMD_LEN_UPDATED (1<<1) ///< Use `spi_seg_transaction_t: command_bits` in this segment. +#define SPI_SEG_TRANS_ADDR_LEN_UPDATED (1<<2) ///< Use `spi_seg_transaction_t: address_bits` in this segment. +#define SPI_SEG_TRANS_DUMMY_LEN_UPDATED (1<<3) ///< Use `spi_seg_transaction_t: dummy_bits` in this segment. +#define SPI_SEG_TRANS_DONE_LEN_UPDATED (1<<4) ///< Use `spi_seg_transaction_t: cs_ena_posttrans` in this segment. + +/** + * This struct is for SPI SCT (Segmented-Configure-Transfer) Mode. + * + * By default, length of each SPI Phase will not change per segment. Each segment will use the phase length you set when `spi_bus_add_device()` + * However, you could force a segment to use its custom phase length. To achieve this, set the `SPI_SEG_TRANS_XX` flags, to customize phase length. + */ +typedef struct { + struct spi_transaction_t base; ///< Transaction data, so that pointer to spi_transaction_t can be converted into spi_seg_transaction_t + uint8_t cs_ena_pretrans; ///< Amount of SPI bit-cycles the cs should be activated before the transmission + uint8_t cs_ena_posttrans; ///< Amount of SPI bit-cycles the cs should stay active after the transmission + uint8_t command_bits; ///< The command length in this transaction, in bits. + uint8_t address_bits; ///< The address length in this transaction, in bits. + uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. + uint32_t seg_trans_flags; ///< SCT specific flags. See `SPI_SEG_TRANS_XXX` macros. + + /**< Necessary buffer required by HW, don't touch this. >**/ + uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]; +} spi_seg_transaction_t; +#endif //#if SOC_SPI_SCT_SUPPORTED + typedef struct spi_device_t *spi_device_handle_t; ///< Handle for a device on a SPI bus /** * @brief Allocate a device on a SPI bus @@ -258,6 +300,68 @@ esp_err_t spi_device_get_trans_result(spi_device_handle_t handle, spi_transactio */ esp_err_t spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc); +#if SOC_SPI_SCT_SUPPORTED +/** + * @brief Enable/Disable Segmented-Configure-Transfer (SCT) mode + * + * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode + * + * @note This API isn't thread safe. Besides, after enabling this, current SPI host will be switched into SCT mode. + * Therefore, never call this API when in multiple threads, or when an SPI transaction is ongoing (on this SPI host). + * + * @param handle Device handle obtained using spi_host_add_dev + * @param enable True: to enable SCT mode; False: to disable SCT mode + * + * @return + * - ESP_OK: On success + * - ESP_ERR_INVALID_ARG: Invalid arguments + * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SPI internal Queue isn't empty, etc. + */ +esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool enable); + + +/** + * @brief Queue an SPI Segmented-Configure-Transaction (SCT) list for interrupt transaction execution. + * + * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode + * + * @note After calling this API, call `spi_device_get_segment_trans_result` to get the transaction results. + * + * @param handle Device handle obtained using spi_host_add_dev + * @param seg_trans_desc Pointer to the transaction segments list head (a one-segment-list is also acceptable) + * @param seg_num Segment number + * @param ticks_to_wait Ticks to wait until there's room in the queue; use portMAX_DELAY to never time out. + * + * @return + * - ESP_OK: On success + * - ESP_ERR_INVALID_ARG: Invalid arguments + * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SCT mode isn't enabled, DMA descriptors not enough, etc. + * - ESP_ERR_TIMEOUT: Timeout, this SCT transaction isn't queued successfully + */ +esp_err_t spi_device_queue_segment_trans(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num, TickType_t ticks_to_wait); + + +/** + * @brief Get the result of an SPI Segmented-Configure-Transaction (SCT). + * + * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode + * + * @note Until this API returns (with `ESP_OK`), you can now recycle the memory used for this SCT list (pointed by `seg_trans_desc`). + * You must maintain the SCT list related memory before this API returns, otherwise the SCT transaction may fail + * + * @param handle Device handle obtained using spi_host_add_dev + * @param[out] seg_trans_desc Pointer to the completed SCT list head (then you can recycle this list of memory). + * @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time out. + * + * @return + * - ESP_OK: On success + * - ESP_ERR_INVALID_ARG: Invalid arguments + * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: SCT mode isn't enabled, etc. + * - ESP_ERR_TIMEOUT: Timeout, didn't get a completed SCT transaction + */ +esp_err_t spi_device_get_segment_trans_result(spi_device_handle_t handle, spi_seg_transaction_t **seg_trans_desc, TickType_t ticks_to_wait); +#endif //#if SOC_SPI_SCT_SUPPORTED + /** * @brief Immediately start a polling transaction. * diff --git a/components/esp_driver_spi/src/gpspi/spi_master.c b/components/esp_driver_spi/src/gpspi/spi_master.c index ac2227f739..67f3599caf 100644 --- a/components/esp_driver_spi/src/gpspi/spi_master.c +++ b/components/esp_driver_spi/src/gpspi/spi_master.c @@ -140,23 +140,40 @@ typedef struct { const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is; //otherwise sets to the original buffer or NULL if no buffer is assigned. uint32_t *buffer_to_rcv; // similar to buffer_to_send + uint32_t dummy; //As we create the queue when in init, to use sct mode private descriptor as a queue item (when in sct mode), we need to add a dummy member here to keep the same size with `spi_sct_desc_priv_t`. } spi_trans_priv_t; +#if SOC_SPI_SCT_SUPPORTED +//Type of dma descriptors that used under SPI SCT mode +typedef struct { + lldesc_t *tx_seg_head; + lldesc_t *rx_seg_head; + spi_seg_transaction_t *sct_trans_desc_head; + uint16_t tx_used_desc_num; + uint16_t rx_used_desc_num; +} spi_sct_desc_priv_t; +#endif + typedef struct { int id; spi_device_t* device[DEV_NUM_MAX]; intr_handle_t intr; spi_hal_context_t hal; spi_trans_priv_t cur_trans_buf; +#if SOC_SPI_SCT_SUPPORTED + spi_sct_desc_priv_t cur_sct_trans; +#endif int cur_cs; //current device doing transaction const spi_bus_attr_t* bus_attr; const spi_dma_ctx_t *dma_ctx; + bool sct_mode_enabled; /** * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise * the acquiring of SPI bus will be freed when `spi_device_polling_end` is called. */ spi_device_t* device_acquiring_lock; + portMUX_TYPE spinlock; //debug information bool polling; //in process of a polling, avoid of queue new transactions into ISR @@ -237,6 +254,7 @@ static esp_err_t spi_master_init_driver(spi_host_device_t host_id) .cur_cs = DEV_NUM_MAX, .polling = false, .device_acquiring_lock = NULL, + .spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED, .bus_attr = bus_attr, .dma_ctx = dma_ctx, }; @@ -678,57 +696,61 @@ static void SPI_MASTER_ISR_ATTR s_spi_prepare_data(spi_device_t *dev, const spi_ spi_hal_enable_data_line(hal->hw, (!hal_dev->half_duplex && hal_trans->rcv_buffer) || hal_trans->send_buffer, !!hal_trans->rcv_buffer); } +static void SPI_MASTER_ISR_ATTR spi_format_hal_trans_struct(spi_device_t *dev, spi_trans_priv_t *trans_buf, spi_hal_trans_config_t *hal_trans) +{ + spi_host_t *host = dev->host; + spi_transaction_t *trans = trans_buf->trans; + hal_trans->tx_bitlen = trans->length; + hal_trans->rx_bitlen = trans->rxlength; + hal_trans->rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv; + hal_trans->send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send; + hal_trans->cmd = trans->cmd; + hal_trans->addr = trans->addr; + + if (trans->flags & SPI_TRANS_VARIABLE_CMD) { + hal_trans->cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits; + } else { + hal_trans->cmd_bits = dev->cfg.command_bits; + } + if (trans->flags & SPI_TRANS_VARIABLE_ADDR) { + hal_trans->addr_bits = ((spi_transaction_ext_t *)trans)->address_bits; + } else { + hal_trans->addr_bits = dev->cfg.address_bits; + } + if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) { + hal_trans->dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits; + } else { + hal_trans->dummy_bits = dev->cfg.dummy_bits; + } + + hal_trans->cs_keep_active = (trans->flags & SPI_TRANS_CS_KEEP_ACTIVE) ? 1 : 0; + //Set up OIO/QIO/DIO if needed + hal_trans->line_mode.data_lines = (trans->flags & SPI_TRANS_MODE_DIO) ? 2 : (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1; +#if SOC_SPI_SUPPORT_OCT + if (trans->flags & SPI_TRANS_MODE_OCT) { + hal_trans->line_mode.data_lines = 8; + } +#endif + hal_trans->line_mode.addr_lines = (trans->flags & SPI_TRANS_MULTILINE_ADDR) ? hal_trans->line_mode.data_lines : 1; + hal_trans->line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans->line_mode.data_lines : 1; +} + // The function is called to send a new transaction, in ISR or in the task. // Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used) static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf) { spi_transaction_t *trans = trans_buf->trans; - spi_host_t *host = dev->host; - spi_hal_context_t *hal = &(host->hal); + spi_hal_context_t *hal = &(dev->host->hal); spi_hal_dev_config_t *hal_dev = &(dev->hal_dev); - host->cur_cs = dev->id; + dev->host->cur_cs = dev->id; //Reconfigure according to device settings, the function only has effect when the dev_id is changed. spi_setup_device(dev); //set the transaction specific configuration each time before a transaction setup spi_hal_trans_config_t hal_trans = {}; - hal_trans.tx_bitlen = trans->length; - hal_trans.rx_bitlen = trans->rxlength; - hal_trans.rcv_buffer = (uint8_t*)trans_buf->buffer_to_rcv; - hal_trans.send_buffer = (uint8_t*)trans_buf->buffer_to_send; - hal_trans.cmd = trans->cmd; - hal_trans.addr = trans->addr; - hal_trans.cs_keep_active = (trans->flags & SPI_TRANS_CS_KEEP_ACTIVE) ? 1 : 0; - - //Set up OIO/QIO/DIO if needed - hal_trans.line_mode.data_lines = (trans->flags & SPI_TRANS_MODE_DIO) ? 2 : - (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1; -#if SOC_SPI_SUPPORT_OCT - if (trans->flags & SPI_TRANS_MODE_OCT) { - hal_trans.line_mode.data_lines = 8; - } -#endif - hal_trans.line_mode.addr_lines = (trans->flags & SPI_TRANS_MULTILINE_ADDR) ? hal_trans.line_mode.data_lines : 1; - hal_trans.line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans.line_mode.data_lines : 1; - - if (trans->flags & SPI_TRANS_VARIABLE_CMD) { - hal_trans.cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits; - } else { - hal_trans.cmd_bits = dev->cfg.command_bits; - } - if (trans->flags & SPI_TRANS_VARIABLE_ADDR) { - hal_trans.addr_bits = ((spi_transaction_ext_t *)trans)->address_bits; - } else { - hal_trans.addr_bits = dev->cfg.address_bits; - } - if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) { - hal_trans.dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits; - } else { - hal_trans.dummy_bits = dev->cfg.dummy_bits; - } - + spi_format_hal_trans_struct(dev, trans_buf, &hal_trans); spi_hal_setup_trans(hal, hal_dev, &hal_trans); s_spi_prepare_data(dev, &hal_trans); @@ -758,6 +780,41 @@ static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host) host->cur_cs = DEV_NUM_MAX; } +#if SOC_SPI_SCT_SUPPORTED +static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_desc_priv_t *cur_sct_trans) +{ + dev->host->cur_cs = dev->id; + + //Reconfigure according to device settings, the function only has effect when the dev_id is changed. + spi_setup_device(dev); + + spi_hal_sct_load_dma_link(&dev->host->hal, cur_sct_trans->rx_seg_head, cur_sct_trans->tx_seg_head); + if (dev->cfg.pre_cb) { + dev->cfg.pre_cb((spi_transaction_t *)cur_sct_trans->sct_trans_desc_head); + } + + //Kick off transfer + spi_hal_user_start(&dev->host->hal); +} + +static void SPI_MASTER_ISR_ATTR spi_post_sct_trans(spi_host_t *host) +{ + if (host->cur_sct_trans.rx_seg_head == NULL) { + assert(host->cur_sct_trans.rx_used_desc_num == 0); + } + + portENTER_CRITICAL_ISR(&host->spinlock); + spi_hal_sct_tx_dma_desc_recycle(&host->hal, host->cur_sct_trans.tx_used_desc_num); + spi_hal_sct_rx_dma_desc_recycle(&host->hal, host->cur_sct_trans.rx_used_desc_num); + portEXIT_CRITICAL_ISR(&host->spinlock); + if (host->device[host->cur_cs]->cfg.post_cb) { + host->device[host->cur_cs]->cfg.post_cb((spi_transaction_t *)host->cur_sct_trans.sct_trans_desc_head); + } + + host->cur_cs = DEV_NUM_MAX; +} +#endif //#if SOC_SPI_SCT_SUPPORTED + // This is run in interrupt context. static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) { @@ -769,7 +826,11 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) const spi_dma_ctx_t *dma_ctx = host->dma_ctx; #endif +#if SOC_SPI_SCT_SUPPORTED + assert(spi_hal_usr_is_done(&host->hal) || spi_ll_get_intr(host->hal.hw, SPI_LL_INTR_SEG_DONE)); +#else assert(spi_hal_usr_is_done(&host->hal)); +#endif /* * Help to skip the handling of in-flight transaction, and disable of the interrupt. @@ -804,15 +865,21 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) #endif } - //cur_cs is changed to DEV_NUM_MAX here - spi_post_trans(host); - - if (!(host->device[cs]->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT)) { - //Return transaction descriptor. - xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield); +#if SOC_SPI_SCT_SUPPORTED + if (host->sct_mode_enabled) { + //cur_cs is changed to DEV_NUM_MAX here + spi_post_sct_trans(host); + xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_sct_trans, &do_yield); + } else +#endif //#if SOC_SPI_SCT_SUPPORTED + { + //cur_cs is changed to DEV_NUM_MAX here + spi_post_trans(host); + if (!(host->device[cs]->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT)) { + //Return transaction descriptor. + xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield); + } } - - // spi_bus_lock_bg_pause(bus_attr->lock); #ifdef CONFIG_PM_ENABLE //Release APB frequency lock esp_pm_lock_release(bus_attr->pm_lock); @@ -849,7 +916,14 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) bool dev_has_req = spi_bus_lock_bg_check_dev_req(desired_dev); if (dev_has_req) { device_to_send = host->device[spi_bus_lock_get_dev_id(desired_dev)]; - trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield); +#if SOC_SPI_SCT_SUPPORTED + if (host->sct_mode_enabled) { + trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_sct_trans, &do_yield); + } else +#endif //#if SOC_SPI_SCT_SUPPORTED + { + trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield); + } if (!trans_found) { spi_bus_lock_bg_clear_req(desired_dev); } @@ -857,16 +931,24 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) } if (trans_found) { - spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf; +#if SOC_SPI_SCT_SUPPORTED + if (host->sct_mode_enabled) { + spi_new_sct_trans(device_to_send, &host->cur_sct_trans); + } else +#endif //#if SOC_SPI_SCT_SUPPORTED + { + spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf; #if CONFIG_IDF_TARGET_ESP32 - if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) { - //mark channel as active, so that the DMA will not be reset by the slave - //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same - spicommon_dmaworkaround_transfer_active(dma_ctx->tx_dma_chan.chan_id); - } + if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) { + //mark channel as active, so that the DMA will not be reset by the slave + //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same + spicommon_dmaworkaround_transfer_active(dma_ctx->tx_dma_chan.chan_id); + } #endif //#if CONFIG_IDF_TARGET_ESP32 - spi_new_trans(device_to_send, cur_trans_buf); + spi_new_trans(device_to_send, cur_trans_buf); + } } + // Exit of the ISR, handle interrupt re-enable (if sending transaction), retry (if there's coming BG), // or resume acquiring device task (if quit due to bus acquiring). } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield)); @@ -1309,3 +1391,233 @@ esp_err_t spi_bus_get_max_transaction_len(spi_host_device_t host_id, size_t *max return ESP_OK; } + +#if SOC_SPI_SCT_SUPPORTED +/** + * This function will turn this host into SCT (segmented-configure-transfer) mode. + * + * No concurrency guarantee, if a transaction is ongoing, calling this will lead to wrong transaction + */ +esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool enable) +{ + SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); + SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); + SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); + SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue) == 0, "Cannot enable SCT mode when internal Queue still has items", ESP_ERR_INVALID_STATE); + + esp_err_t ret = ESP_OK; + if (enable) { + /** + * This `fake_trans` transaction descriptor is only used to initialise the SPI registers + * This transaction won't be triggered. + */ + spi_transaction_t fake_trans = { + .flags = SPI_TRANS_USE_RXDATA | SPI_TRANS_USE_TXDATA, + .length = 8, + .tx_data = {0xff}, + }; + + spi_host_t *host = handle->host; + spi_trans_priv_t trans_buf; + spi_hal_context_t *hal = &handle->host->hal; + spi_hal_dev_config_t *hal_dev = &handle->hal_dev; + //As we know the `fake_trans` are internal, so no need to `uninstall_priv_desc` + ret = setup_priv_desc(&fake_trans, &trans_buf, (host->bus_attr->dma_enabled)); + if (ret != ESP_OK) { + return ret; + } + + //init SPI registers + spi_hal_setup_device(hal, hal_dev); + spi_hal_trans_config_t hal_trans = {}; + spi_format_hal_trans_struct(handle, &trans_buf, &hal_trans); + spi_hal_setup_trans(hal, hal_dev, &hal_trans); + + spi_hal_sct_init(&handle->host->hal); + } else { + spi_hal_sct_deinit(&handle->host->hal); + } + + handle->host->sct_mode_enabled = enable; + + return ESP_OK; +} + +static void SPI_MASTER_ATTR s_sct_init_conf_buffer(spi_hal_context_t *hal, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num) +{ + for (int i = 0; i < seg_num; i++) { + spi_hal_sct_init_conf_buffer(hal, seg_trans_desc[i].conf_buffer); + } +} + +static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, bool seg_end) +{ + spi_hal_context_t *hal = &handle->host->hal; + spi_hal_dev_config_t *hal_dev = &handle->hal_dev; + spi_hal_seg_config_t seg_config = {}; + + //prep + if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_PREP_LEN_UPDATED) { + seg_config.cs_setup = seg_trans_desc->cs_ena_pretrans; + } else { + seg_config.cs_setup = handle->cfg.cs_ena_pretrans; + } + + //cmd + seg_config.cmd = seg_trans_desc->base.cmd; + if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_CMD_LEN_UPDATED) { + seg_config.cmd_bits = seg_trans_desc->command_bits; + } else { + seg_config.cmd_bits = handle->cfg.command_bits; + } + + //addr + seg_config.addr = seg_trans_desc->base.addr; + if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_ADDR_LEN_UPDATED) { + seg_config.addr_bits = seg_trans_desc->address_bits; + } else { + seg_config.addr_bits = handle->cfg.address_bits; + } + + //dummy + if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_DUMMY_LEN_UPDATED) { + seg_config.dummy_bits = seg_trans_desc->dummy_bits; + } else { + seg_config.dummy_bits = handle->cfg.dummy_bits; + } + + //dout + seg_config.tx_bitlen = seg_trans_desc->base.length; + + //din + seg_config.rx_bitlen = seg_trans_desc->base.rxlength; + + //done + if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_DONE_LEN_UPDATED) { + seg_config.cs_hold = seg_trans_desc->cs_ena_posttrans; + } else { + seg_config.cs_hold = handle->cfg.cs_ena_posttrans; + } + + //conf + if (seg_end) { + seg_config.seg_end = true; + } + + spi_hal_sct_format_conf_buffer(hal, &seg_config, hal_dev, seg_trans_desc->conf_buffer); +} + +esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num, TickType_t ticks_to_wait) +{ + SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); + SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); + SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE); + esp_err_t ret = ESP_OK; + + for (int i = 0; i < seg_num; i++) { + ret = check_trans_valid(handle, (spi_transaction_t *)&seg_trans_desc[i]); + if (ret != ESP_OK) { + return ret; + } + } + SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); + + spi_hal_context_t *hal = &handle->host->hal; + s_sct_init_conf_buffer(hal, seg_trans_desc, seg_num); + + spi_hal_dma_desc_status_t dma_desc_status = SPI_HAL_DMA_DESC_NULL; + lldesc_t *tx_seg_head = NULL; + uint32_t tx_used_dma_desc_num = 0; + uint32_t tx_buf_len = 0; + lldesc_t *rx_seg_head = NULL; + uint32_t rx_used_dma_desc_num = 0; + uint32_t rx_buf_len = 0; + + /*--------------Get segment head--------------*/ + s_sct_format_conf_buffer(handle, &seg_trans_desc[0], (seg_num == 1)); + + //TX + tx_buf_len = (seg_trans_desc[0].base.length + 8 - 1) / 8; + portENTER_CRITICAL(&handle->host->spinlock); + dma_desc_status = spi_hal_sct_new_tx_dma_desc_head(hal, seg_trans_desc[0].conf_buffer, seg_trans_desc[0].base.tx_buffer, tx_buf_len, &tx_seg_head, &tx_used_dma_desc_num); + portEXIT_CRITICAL(&handle->host->spinlock); + SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); + + //RX + //This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength` + rx_buf_len = (seg_trans_desc[0].base.rxlength + 8 - 1) / 8; + if (seg_trans_desc[0].base.rx_buffer) { + portENTER_CRITICAL(&handle->host->spinlock); + dma_desc_status = spi_hal_sct_new_rx_dma_desc_head(hal, seg_trans_desc[0].base.rx_buffer, rx_buf_len, &rx_seg_head, &rx_used_dma_desc_num); + portEXIT_CRITICAL(&handle->host->spinlock); + SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); + } + + /*--------------Prepare other segments--------------*/ + for (int i = 1; i < seg_num; i++) { + s_sct_format_conf_buffer(handle, &seg_trans_desc[i], (i == (seg_num - 1))); + + //TX + tx_buf_len = (seg_trans_desc[i].base.length + 8 - 1) / 8; + portENTER_CRITICAL(&handle->host->spinlock); + dma_desc_status = spi_hal_sct_link_tx_seg_dma_desc(hal, seg_trans_desc[i].conf_buffer, seg_trans_desc[i].base.tx_buffer, tx_buf_len, &tx_used_dma_desc_num); + portEXIT_CRITICAL(&handle->host->spinlock); + SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); + + //RX + if (seg_trans_desc[i].base.rx_buffer) { + //This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength` + rx_buf_len = (seg_trans_desc[i].base.rxlength + 8 - 1) / 8; + portENTER_CRITICAL(&handle->host->spinlock); + dma_desc_status = spi_hal_sct_link_rx_seg_dma_desc(hal, seg_trans_desc[i].base.rx_buffer, rx_buf_len, &rx_used_dma_desc_num); + portEXIT_CRITICAL(&handle->host->spinlock); + } + } + +#ifdef CONFIG_PM_ENABLE + esp_pm_lock_acquire(handle->host->bus_attr->pm_lock); +#endif + + spi_sct_desc_priv_t sct_desc = { + .tx_seg_head = tx_seg_head, + .rx_seg_head = rx_seg_head, + .sct_trans_desc_head = seg_trans_desc, + .tx_used_desc_num = tx_used_dma_desc_num, + .rx_used_desc_num = rx_used_dma_desc_num, + }; + + BaseType_t r = xQueueSend(handle->trans_queue, (void *)&sct_desc, ticks_to_wait); + if (!r) { +#ifdef CONFIG_PM_ENABLE + //Release APB frequency lock + esp_pm_lock_release(handle->host->bus_attr->pm_lock); +#endif + return ESP_ERR_TIMEOUT; + } + + // The ISR will be invoked at correct time by the lock with `spi_bus_intr_enable`. + ret = spi_bus_lock_bg_request(handle->dev_lock); + if (ret != ESP_OK) { + return ret; + } + + return ESP_OK; +} + +esp_err_t SPI_MASTER_ATTR spi_device_get_segment_trans_result(spi_device_handle_t handle, spi_seg_transaction_t **seg_trans_desc, TickType_t ticks_to_wait) +{ + SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); + SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); + SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE); + spi_sct_desc_priv_t sct_desc = {}; + + BaseType_t r = xQueueReceive(handle->ret_queue, (void *)&sct_desc, ticks_to_wait); + if (!r) { + return ESP_ERR_TIMEOUT; + } + + *seg_trans_desc = sct_desc.sct_trans_desc_head; + + return ESP_OK; +} +#endif //#if SOC_SPI_SCT_SUPPORTED diff --git a/components/hal/esp32c2/include/hal/spi_ll.h b/components/hal/esp32c2/include/hal/spi_ll.h index ef3c941a68..8acbf98906 100644 --- a/components/hal/esp32c2/include/hal/spi_ll.h +++ b/components/hal/esp32c2/include/hal/spi_ll.h @@ -1260,6 +1260,282 @@ static inline int spi_ll_get_slave_hd_dummy_bits(spi_line_mode_t line_mode) return 8; } + + +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +#define SPI_LL_CONF_BUF_SET_BIT(_w, _m) ({ \ + (_w) |= (_m); \ + }) +#define SPI_LL_CONF_BUF_CLR_BIT(_w, _m) ({ \ + (_w) &= ~(_m); \ + }) + +#define SPI_LL_CONF_BUF_SET_FIELD(_w, _f, val) ({ \ + ((_w) = (((_w) & ~((_f##_V) << (_f##_S))) | (((val) & (_f##_V))<<(_f##_S)))); \ + }) + +#define SPI_LL_CONF_BUF_GET_FIELD(_w, _f) ({ \ + (((_w) >> (_f##_S)) & (_f##_V)); \ + }) + +//This offset is 1, for bitmap +#define SPI_LL_CONF_BUFFER_OFFSET (1) +//bitmap must be the first +#define SPI_LL_CONF_BITMAP_POS (0) + +#define SPI_LL_ADDR_REG_POS (0) +#define SPI_LL_CTRL_REG_POS (1) +#define SPI_LL_CLOCK_REG_POS (2) +#define SPI_LL_USER_REG_POS (3) +#define SPI_LL_USER1_REG_POS (4) +#define SPI_LL_USER2_REG_POS (5) +#define SPI_LL_MS_DLEN_REG_POS (6) +#define SPI_LL_MISC_REG_POS (7) +#define SPI_LL_DIN_MODE_REG_POS (8) +#define SPI_LL_DIN_NUM_REG_POS (9) +#define SPI_LL_DOUT_MODE_REG_POS (10) +#define SPI_LL_DMA_CONF_REG_POS (11) +#define SPI_LL_DMA_INT_ENA_REG_POS (12) +#define SPI_LL_DMA_INT_CLR_REG_POS (13) + +#define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Update the conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +{ + //user reg: usr_conf_nxt + if (is_end) { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } else { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } +} + +/** + * Update the conf buffer for prep phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS setup time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_prep_phase_conf_buffer(spi_dev_t *hw, uint8_t setup, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_setup + if(setup) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } + + //user1 reg: cs_setup_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_TIME, setup - 1); +} + +/** + * Update the conf buffer for cmd phase + * + * @param hw Beginning address of the peripheral registers. + * @param cmd Command value + * @param cmdlen Length of the cmd phase + * @param lsbfirst Whether LSB first + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_cmd_phase_conf_buffer(spi_dev_t *hw, uint16_t cmd, int cmdlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_command + if (cmdlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } + + //user2 reg: usr_command_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_BITLEN, cmdlen - 1); + + //user2 reg: usr_command_value + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, cmd); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); + } +} + +/** + * Update the conf buffer for addr phase + * + * @param hw Beginning address of the peripheral registers. + * @param addr Address to set + * @param addrlen Length of the address phase + * @param lsbfirst whether the LSB first feature is enabled. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_addr_phase_conf_buffer(spi_dev_t *hw, uint64_t addr, int addrlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_addr + if (addrlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } + + //user1 reg: usr_addr_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_BITLEN, addrlen - 1); + + //addr reg: addr + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, HAL_SWAP32(addr)); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, (addr << (32 - addrlen))); + } +} + +/** + * Update the conf buffer for dummy phase + * + * @param hw Beginning address of the peripheral registers. + * @param dummy_n Dummy cycles used. 0 to disable the dummy phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dummy_phase_conf_buffer(spi_dev_t *hw, int dummy_n, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_dummy + if (dummy_n) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } + + //user1 reg: usr_dummy_cyclelen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_CYCLELEN, dummy_n - 1); +} + +/** + * Update the conf buffer for dout phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen output length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dout_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_mosi + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_mosi + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + } +} + +/** + * Update the conf buffer for din phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen input length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_din_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_miso + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_miso + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + } +} + +/** + * Update the conf buffer for done phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS hold time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_done_phase_conf_buffer(spi_dev_t *hw, int hold, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_hold + if(hold) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } + + //user1 reg: cs_hold_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_TIME, hold); +} + +/** + * Initialize the conf buffer: + * + * - init bitmap + * - save all register values into the rest of the conf buffer words + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +__attribute__((always_inline)) +static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr; + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; + conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; + conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user1.val; + conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user2.val; + conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ms_dlen.val; + conf_buffer[SPI_LL_MISC_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->misc.val; + conf_buffer[SPI_LL_DIN_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_mode.val; + conf_buffer[SPI_LL_DIN_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_num.val; + conf_buffer[SPI_LL_DOUT_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_mode.val; + conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_conf.val; + conf_buffer[SPI_LL_DMA_INT_ENA_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_ena.val; + conf_buffer[SPI_LL_DMA_INT_CLR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_clr.val; +} + +/** + * Enable/Disable the conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param enable True: enable; False: disable + */ +static inline void spi_ll_conf_state_enable(spi_dev_t *hw, bool enable) +{ + hw->slave.usr_conf = enable; +} + +/** + * Set Segmented-Configure-Transfer required magic value + * + * @param hw Beginning address of the peripheral registers. + * @param magic_value magic value + */ +static inline void spi_ll_set_magic_number(spi_dev_t *hw, uint8_t magic_value) +{ + hw->slave.dma_seg_magic_value = magic_value; +} + + #undef SPI_LL_RST_MASK #undef SPI_LL_UNUSED_INT_MASK diff --git a/components/hal/esp32c3/include/hal/spi_ll.h b/components/hal/esp32c3/include/hal/spi_ll.h index 41c36294d0..97bfad5b50 100644 --- a/components/hal/esp32c3/include/hal/spi_ll.h +++ b/components/hal/esp32c3/include/hal/spi_ll.h @@ -1175,6 +1175,279 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) return hw->slave1.last_addr; } +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +#define SPI_LL_CONF_BUF_SET_BIT(_w, _m) ({ \ + (_w) |= (_m); \ + }) +#define SPI_LL_CONF_BUF_CLR_BIT(_w, _m) ({ \ + (_w) &= ~(_m); \ + }) + +#define SPI_LL_CONF_BUF_SET_FIELD(_w, _f, val) ({ \ + ((_w) = (((_w) & ~((_f##_V) << (_f##_S))) | (((val) & (_f##_V))<<(_f##_S)))); \ + }) + +#define SPI_LL_CONF_BUF_GET_FIELD(_w, _f) ({ \ + (((_w) >> (_f##_S)) & (_f##_V)); \ + }) + +//This offset is 1, for bitmap +#define SPI_LL_CONF_BUFFER_OFFSET (1) +//bitmap must be the first +#define SPI_LL_CONF_BITMAP_POS (0) + +#define SPI_LL_ADDR_REG_POS (0) +#define SPI_LL_CTRL_REG_POS (1) +#define SPI_LL_CLOCK_REG_POS (2) +#define SPI_LL_USER_REG_POS (3) +#define SPI_LL_USER1_REG_POS (4) +#define SPI_LL_USER2_REG_POS (5) +#define SPI_LL_MS_DLEN_REG_POS (6) +#define SPI_LL_MISC_REG_POS (7) +#define SPI_LL_DIN_MODE_REG_POS (8) +#define SPI_LL_DIN_NUM_REG_POS (9) +#define SPI_LL_DOUT_MODE_REG_POS (10) +#define SPI_LL_DMA_CONF_REG_POS (11) +#define SPI_LL_DMA_INT_ENA_REG_POS (12) +#define SPI_LL_DMA_INT_CLR_REG_POS (13) + +#define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Update the conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +{ + //user reg: usr_conf_nxt + if (is_end) { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } else { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } +} + +/** + * Update the conf buffer for prep phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS setup time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_prep_phase_conf_buffer(spi_dev_t *hw, uint8_t setup, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_setup + if(setup) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } + + //user1 reg: cs_setup_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_TIME, setup - 1); +} + +/** + * Update the conf buffer for cmd phase + * + * @param hw Beginning address of the peripheral registers. + * @param cmd Command value + * @param cmdlen Length of the cmd phase + * @param lsbfirst Whether LSB first + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_cmd_phase_conf_buffer(spi_dev_t *hw, uint16_t cmd, int cmdlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_command + if (cmdlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } + + //user2 reg: usr_command_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_BITLEN, cmdlen - 1); + + //user2 reg: usr_command_value + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, cmd); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); + } +} + +/** + * Update the conf buffer for addr phase + * + * @param hw Beginning address of the peripheral registers. + * @param addr Address to set + * @param addrlen Length of the address phase + * @param lsbfirst whether the LSB first feature is enabled. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_addr_phase_conf_buffer(spi_dev_t *hw, uint64_t addr, int addrlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_addr + if (addrlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } + + //user1 reg: usr_addr_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_BITLEN, addrlen - 1); + + //addr reg: addr + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, HAL_SWAP32(addr)); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, (addr << (32 - addrlen))); + } +} + +/** + * Update the conf buffer for dummy phase + * + * @param hw Beginning address of the peripheral registers. + * @param dummy_n Dummy cycles used. 0 to disable the dummy phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dummy_phase_conf_buffer(spi_dev_t *hw, int dummy_n, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_dummy + if (dummy_n) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } + + //user1 reg: usr_dummy_cyclelen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_CYCLELEN, dummy_n - 1); +} + +/** + * Update the conf buffer for dout phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen output length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dout_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_mosi + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_mosi + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + } +} + +/** + * Update the conf buffer for din phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen input length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_din_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_miso + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_miso + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + } +} + +/** + * Update the conf buffer for done phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS hold time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_done_phase_conf_buffer(spi_dev_t *hw, int hold, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_hold + if(hold) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } + + //user1 reg: cs_hold_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_TIME, hold); +} + +/** + * Initialize the conf buffer: + * + * - init bitmap + * - save all register values into the rest of the conf buffer words + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +__attribute__((always_inline)) +static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr; + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; + conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; + conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user1.val; + conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user2.val; + conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ms_dlen.val; + conf_buffer[SPI_LL_MISC_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->misc.val; + conf_buffer[SPI_LL_DIN_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_mode.val; + conf_buffer[SPI_LL_DIN_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_num.val; + conf_buffer[SPI_LL_DOUT_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_mode.val; + conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_conf.val; + conf_buffer[SPI_LL_DMA_INT_ENA_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_ena.val; + conf_buffer[SPI_LL_DMA_INT_CLR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_clr.val; +} + +/** + * Enable/Disable the conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param enable True: enable; False: disable + */ +static inline void spi_ll_conf_state_enable(spi_dev_t *hw, bool enable) +{ + hw->slave.usr_conf = enable; +} + +/** + * Set Segmented-Configure-Transfer required magic value + * + * @param hw Beginning address of the peripheral registers. + * @param magic_value magic value + */ +static inline void spi_ll_set_magic_number(spi_dev_t *hw, uint8_t magic_value) +{ + hw->slave.dma_seg_magic_value = magic_value; +} + #undef SPI_LL_RST_MASK #undef SPI_LL_UNUSED_INT_MASK diff --git a/components/hal/esp32h2/include/hal/spi_ll.h b/components/hal/esp32h2/include/hal/spi_ll.h index baffa95787..68b6434406 100644 --- a/components/hal/esp32h2/include/hal/spi_ll.h +++ b/components/hal/esp32h2/include/hal/spi_ll.h @@ -1166,6 +1166,281 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) return hw->slave1.slv_last_addr; } + +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +#define SPI_LL_CONF_BUF_SET_BIT(_w, _m) ({ \ + (_w) |= (_m); \ + }) +#define SPI_LL_CONF_BUF_CLR_BIT(_w, _m) ({ \ + (_w) &= ~(_m); \ + }) + +#define SPI_LL_CONF_BUF_SET_FIELD(_w, _f, val) ({ \ + ((_w) = (((_w) & ~((_f##_V) << (_f##_S))) | (((val) & (_f##_V))<<(_f##_S)))); \ + }) + +#define SPI_LL_CONF_BUF_GET_FIELD(_w, _f) ({ \ + (((_w) >> (_f##_S)) & (_f##_V)); \ + }) + +//This offset is 1, for bitmap +#define SPI_LL_CONF_BUFFER_OFFSET (1) +//bitmap must be the first +#define SPI_LL_CONF_BITMAP_POS (0) + +#define SPI_LL_ADDR_REG_POS (0) +#define SPI_LL_CTRL_REG_POS (1) +#define SPI_LL_CLOCK_REG_POS (2) +#define SPI_LL_USER_REG_POS (3) +#define SPI_LL_USER1_REG_POS (4) +#define SPI_LL_USER2_REG_POS (5) +#define SPI_LL_MS_DLEN_REG_POS (6) +#define SPI_LL_MISC_REG_POS (7) +#define SPI_LL_DIN_MODE_REG_POS (8) +#define SPI_LL_DIN_NUM_REG_POS (9) +#define SPI_LL_DOUT_MODE_REG_POS (10) +#define SPI_LL_DMA_CONF_REG_POS (11) +#define SPI_LL_DMA_INT_ENA_REG_POS (12) +#define SPI_LL_DMA_INT_CLR_REG_POS (13) + +#define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Update the conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +{ + //user reg: usr_conf_nxt + if (is_end) { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } else { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } +} + +/** + * Update the conf buffer for prep phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS setup time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_prep_phase_conf_buffer(spi_dev_t *hw, uint8_t setup, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_setup + if(setup) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } + + //user1 reg: cs_setup_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_TIME, setup - 1); +} + +/** + * Update the conf buffer for cmd phase + * + * @param hw Beginning address of the peripheral registers. + * @param cmd Command value + * @param cmdlen Length of the cmd phase + * @param lsbfirst Whether LSB first + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_cmd_phase_conf_buffer(spi_dev_t *hw, uint16_t cmd, int cmdlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_command + if (cmdlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } + + //user2 reg: usr_command_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_BITLEN, cmdlen - 1); + + //user2 reg: usr_command_value + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, cmd); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); + } +} + +/** + * Update the conf buffer for addr phase + * + * @param hw Beginning address of the peripheral registers. + * @param addr Address to set + * @param addrlen Length of the address phase + * @param lsbfirst whether the LSB first feature is enabled. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_addr_phase_conf_buffer(spi_dev_t *hw, uint64_t addr, int addrlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_addr + if (addrlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } + + //user1 reg: usr_addr_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_BITLEN, addrlen - 1); + + //addr reg: addr + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, HAL_SWAP32(addr)); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, (addr << (32 - addrlen))); + } +} + +/** + * Update the conf buffer for dummy phase + * + * @param hw Beginning address of the peripheral registers. + * @param dummy_n Dummy cycles used. 0 to disable the dummy phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dummy_phase_conf_buffer(spi_dev_t *hw, int dummy_n, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_dummy + if (dummy_n) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } + + //user1 reg: usr_dummy_cyclelen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_CYCLELEN, dummy_n - 1); +} + +/** + * Update the conf buffer for dout phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen output length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dout_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_mosi + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_mosi + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + } +} + +/** + * Update the conf buffer for din phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen input length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_din_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_miso + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_miso + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + } +} + +/** + * Update the conf buffer for done phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS hold time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_done_phase_conf_buffer(spi_dev_t *hw, int hold, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_hold + if(hold) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } + + //user1 reg: cs_hold_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_TIME, hold); +} + +/** + * Initialize the conf buffer: + * + * - init bitmap + * - save all register values into the rest of the conf buffer words + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +__attribute__((always_inline)) +static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr; + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; + conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; + conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user1.val; + conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user2.val; + conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ms_dlen.val; + conf_buffer[SPI_LL_MISC_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->misc.val; + conf_buffer[SPI_LL_DIN_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_mode.val; + conf_buffer[SPI_LL_DIN_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_num.val; + conf_buffer[SPI_LL_DOUT_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_mode.val; + conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_conf.val; + conf_buffer[SPI_LL_DMA_INT_ENA_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_ena.val; + conf_buffer[SPI_LL_DMA_INT_CLR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_clr.val; +} + +/** + * Enable/Disable the conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param enable True: enable; False: disable + */ +static inline void spi_ll_conf_state_enable(spi_dev_t *hw, bool enable) +{ + hw->slave.usr_conf = enable; +} + +/** + * Set Segmented-Configure-Transfer required magic value + * + * @param hw Beginning address of the peripheral registers. + * @param magic_value magic value + */ +static inline void spi_ll_set_magic_number(spi_dev_t *hw, uint8_t magic_value) +{ + hw->slave.dma_seg_magic_value = magic_value; +} + + #undef SPI_LL_RST_MASK #undef SPI_LL_UNUSED_INT_MASK diff --git a/components/hal/esp32s2/include/hal/spi_ll.h b/components/hal/esp32s2/include/hal/spi_ll.h index 9dc462cfd5..2a0d6f925e 100644 --- a/components/hal/esp32s2/include/hal/spi_ll.h +++ b/components/hal/esp32s2/include/hal/spi_ll.h @@ -18,10 +18,13 @@ #include #include "esp_types.h" #include "esp_attr.h" +#include "esp_bit_defs.h" #include "soc/spi_periph.h" #include "soc/spi_struct.h" +#include "soc/spi_reg.h" #include "soc/dport_reg.h" #include "soc/lldesc.h" +#include "soc/soc_caps.h" #include "hal/assert.h" #include "hal/misc.h" #include "hal/spi_types.h" @@ -308,7 +311,7 @@ static inline void spi_ll_user_start(spi_dev_t *hw) */ static inline uint32_t spi_ll_get_running_cmd(spi_dev_t *hw) { - return hw->cmd.val; + return hw->cmd.usr; } /** @@ -1017,7 +1020,6 @@ static inline void spi_ll_set_command(spi_dev_t *hw, uint16_t cmd, int cmdlen, b * more straightly. */ HAL_FORCE_MODIFY_U32_REG_FIELD(hw->user2, usr_command_value, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); - } } @@ -1463,6 +1465,297 @@ static inline bool spi_ll_tx_get_empty_err(spi_dev_t *hw) return hw->dma_int_raw.outfifo_empty_err; } +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +#define SPI_LL_CONF_BUF_SET_BIT(_w, _m) ({ \ + (_w) |= (_m); \ + }) +#define SPI_LL_CONF_BUF_CLR_BIT(_w, _m) ({ \ + (_w) &= ~(_m); \ + }) + +#define SPI_LL_CONF_BUF_SET_FIELD(_w, _f, val) ({ \ + ((_w) = (((_w) & ~((_f##_V) << (_f##_S))) | (((val) & (_f##_V))<<(_f##_S)))); \ + }) + +#define SPI_LL_CONF_BUF_GET_FIELD(_w, _f) ({ \ + (((_w) >> (_f##_S)) & (_f##_V)); \ + }) + +//This offset is 1, for bitmap +#define SPI_LL_CONF_BUFFER_OFFSET (1) +//bitmap must be the first +#define SPI_LL_CONF_BITMAP_POS (0) + +#define SPI_LL_CMD_REG_POS (0) +#define SPI_LL_ADDR_REG_POS (1) +#define SPI_LL_CTRL_REG_POS (2) +#define SPI_LL_CTRL1_REG_POS (3) +#define SPI_LL_CTRL2_REG_POS (4) +#define SPI_LL_CLOCK_REG_POS (5) +#define SPI_LL_USER_REG_POS (6) +#define SPI_LL_USER1_REG_POS (7) +#define SPI_LL_USER2_REG_POS (8) +#define SPI_LL_MOSI_DLEN_REG_POS (9) +#define SPI_LL_MISO_DLEN_REG_POS (10) +#define SPI_LL_MISC_REG_POS (11) +#define SPI_LL_SLAVE_REG_POS (12) +#define SPI_LL_FSM_REG_POS (13) +#define SPI_LL_HOLD_REG_POS (14) +#define SPI_LL_DMA_INT_ENA_REG_POS (15) +#define SPI_LL_DMA_INT_RAW_REG_POS (16) +#define SPI_LL_DMA_INT_CLR_REG_POS (17) +#define SPI_LL_DIN_MODE_REG_POS (18) +#define SPI_LL_DIN_NUM_REG_POS (19) +#define SPI_LL_DOUT_MODE_REG_POS (20) +#define SPI_LL_DOUT_NUM_REG_POS (21) +#define SPI_LL_LCD_CTRL_REG_POS (22) +#define SPI_LL_LCD_CTRL1_REG_POS (23) +#define SPI_LL_LCD_CTRL2_REG_POS (24) +#define SPI_LL_LCD_D_MODE_REG_POS (25) +#define SPI_LL_LCD_D_NUM_REG_POS (26) + +#define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Update the conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +{ + //user reg: usr_conf_nxt + if (is_end) { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } else { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } +} + +/** + * Update the conf buffer for prep phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS setup time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_prep_phase_conf_buffer(spi_dev_t *hw, uint8_t setup, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_setup + if(setup) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } + + //ctrl2 reg: cs_setup_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_CTRL2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_TIME, setup - 1); +} + +/** + * Update the conf buffer for cmd phase + * + * @param hw Beginning address of the peripheral registers. + * @param cmd Command value + * @param cmdlen Length of the cmd phase + * @param lsbfirst Whether LSB first + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_cmd_phase_conf_buffer(spi_dev_t *hw, uint16_t cmd, int cmdlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_command + if (cmdlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } + + //user2 reg: usr_command_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_BITLEN, cmdlen - 1); + + //user2 reg: usr_command_value + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, cmd); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); + } +} + +/** + * Update the conf buffer for addr phase + * + * @param hw Beginning address of the peripheral registers. + * @param addr Address to set + * @param addrlen Length of the address phase + * @param lsbfirst whether the LSB first feature is enabled. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_addr_phase_conf_buffer(spi_dev_t *hw, uint64_t addr, int addrlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_addr + if (addrlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } + + //user1 reg: usr_addr_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_BITLEN, addrlen - 1); + + //addr reg: addr + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, HAL_SWAP32(addr)); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, (addr << (32 - addrlen))); + } +} + +/** + * Update the conf buffer for dummy phase + * + * @param hw Beginning address of the peripheral registers. + * @param dummy_n Dummy cycles used. 0 to disable the dummy phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dummy_phase_conf_buffer(spi_dev_t *hw, int dummy_n, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_dummy + if (dummy_n) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } + + //user1 reg: usr_dummy_cyclelen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_CYCLELEN, dummy_n - 1); +} + +/** + * Update the conf buffer for dout phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen output length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dout_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_mosi + if (bitlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + } + + //mosi_dlen reg: usr_mosi_bit_len + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MOSI_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_DBITLEN, bitlen - 1); +} + +/** + * Update the conf buffer for din phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen input length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_din_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_miso + if (bitlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + } + + //miso_dlen reg: usr_miso_bit_len + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MISO_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_DBITLEN, bitlen - 1); +} + +/** + * Update the conf buffer for done phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS hold time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_done_phase_conf_buffer(spi_dev_t *hw, int hold, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_hold + if(hold) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } + + //ctrl2 reg: cs_hold_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_CTRL2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_TIME, hold); +} + +/** + * Initialize the conf buffer: + * + * - init bitmap + * - save all register values into the rest of the conf buffer words + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +__attribute__((always_inline)) +static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFFFFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); + conf_buffer[SPI_LL_CMD_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->cmd.val; + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr; + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; + conf_buffer[SPI_LL_CTRL1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl1.val; + conf_buffer[SPI_LL_CTRL2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl2.val; + conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; + conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user1.val; + conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user2.val; + conf_buffer[SPI_LL_MOSI_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->mosi_dlen.val; + conf_buffer[SPI_LL_MISO_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->miso_dlen.val; + conf_buffer[SPI_LL_MISC_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->misc.val; + conf_buffer[SPI_LL_SLAVE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->slave.val; + conf_buffer[SPI_LL_FSM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->fsm.val; + conf_buffer[SPI_LL_HOLD_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->hold.val; + conf_buffer[SPI_LL_DMA_INT_ENA_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_ena.val; + conf_buffer[SPI_LL_DMA_INT_RAW_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_raw.val; + conf_buffer[SPI_LL_DMA_INT_CLR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_clr.val; + conf_buffer[SPI_LL_DIN_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_mode.val; + conf_buffer[SPI_LL_DIN_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_num.val; + conf_buffer[SPI_LL_DOUT_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_mode.val; + conf_buffer[SPI_LL_DOUT_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_num.val; + conf_buffer[SPI_LL_LCD_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->lcd_ctrl.val; + conf_buffer[SPI_LL_LCD_CTRL1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->lcd_ctrl1.val; + conf_buffer[SPI_LL_LCD_CTRL2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->lcd_ctrl2.val; + conf_buffer[SPI_LL_LCD_D_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->lcd_d_mode.val; + conf_buffer[SPI_LL_LCD_D_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->lcd_d_num.val; +} + +/** + * Enable/Disable the conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param enable True: enable; False: disable + */ +static inline void spi_ll_conf_state_enable(spi_dev_t *hw, bool enable) +{ + hw->slv_rd_byte.usr_conf = enable; +} + +/** + * Set Segmented-Configure-Transfer required magic value + * + * @param hw Beginning address of the peripheral registers. + * @param magic_value magic value + */ +static inline void spi_ll_set_magic_number(spi_dev_t *hw, uint8_t magic_value) +{ + hw->slv_rd_byte.dma_seg_magic_value = magic_value; +} + #undef SPI_LL_RST_MASK #undef SPI_LL_UNUSED_INT_MASK diff --git a/components/hal/esp32s3/include/hal/spi_ll.h b/components/hal/esp32s3/include/hal/spi_ll.h index 2670ed0d68..3c77a79358 100644 --- a/components/hal/esp32s3/include/hal/spi_ll.h +++ b/components/hal/esp32s3/include/hal/spi_ll.h @@ -1195,6 +1195,280 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) return hw->slave1.last_addr; } + +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +#define SPI_LL_CONF_BUF_SET_BIT(_w, _m) ({ \ + (_w) |= (_m); \ + }) +#define SPI_LL_CONF_BUF_CLR_BIT(_w, _m) ({ \ + (_w) &= ~(_m); \ + }) + +#define SPI_LL_CONF_BUF_SET_FIELD(_w, _f, val) ({ \ + ((_w) = (((_w) & ~((_f##_V) << (_f##_S))) | (((val) & (_f##_V))<<(_f##_S)))); \ + }) + +#define SPI_LL_CONF_BUF_GET_FIELD(_w, _f) ({ \ + (((_w) >> (_f##_S)) & (_f##_V)); \ + }) + +//This offset is 1, for bitmap +#define SPI_LL_CONF_BUFFER_OFFSET (1) +//bitmap must be the first +#define SPI_LL_CONF_BITMAP_POS (0) + +#define SPI_LL_ADDR_REG_POS (0) +#define SPI_LL_CTRL_REG_POS (1) +#define SPI_LL_CLOCK_REG_POS (2) +#define SPI_LL_USER_REG_POS (3) +#define SPI_LL_USER1_REG_POS (4) +#define SPI_LL_USER2_REG_POS (5) +#define SPI_LL_MS_DLEN_REG_POS (6) +#define SPI_LL_MISC_REG_POS (7) +#define SPI_LL_DIN_MODE_REG_POS (8) +#define SPI_LL_DIN_NUM_REG_POS (9) +#define SPI_LL_DOUT_MODE_REG_POS (10) +#define SPI_LL_DMA_CONF_REG_POS (11) +#define SPI_LL_DMA_INT_ENA_REG_POS (12) +#define SPI_LL_DMA_INT_CLR_REG_POS (13) + +#define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Update the conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +{ + //user reg: usr_conf_nxt + if (is_end) { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } else { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } +} + +/** + * Update the conf buffer for prep phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS setup time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_prep_phase_conf_buffer(spi_dev_t *hw, uint8_t setup, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_setup + if(setup) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } + + //user1 reg: cs_setup_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_TIME, setup - 1); +} + +/** + * Update the conf buffer for cmd phase + * + * @param hw Beginning address of the peripheral registers. + * @param cmd Command value + * @param cmdlen Length of the cmd phase + * @param lsbfirst Whether LSB first + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_cmd_phase_conf_buffer(spi_dev_t *hw, uint16_t cmd, int cmdlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_command + if (cmdlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } + + //user2 reg: usr_command_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_BITLEN, cmdlen - 1); + + //user2 reg: usr_command_value + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, cmd); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); + } +} + +/** + * Update the conf buffer for addr phase + * + * @param hw Beginning address of the peripheral registers. + * @param addr Address to set + * @param addrlen Length of the address phase + * @param lsbfirst whether the LSB first feature is enabled. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_addr_phase_conf_buffer(spi_dev_t *hw, uint64_t addr, int addrlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_addr + if (addrlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } + + //user1 reg: usr_addr_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_BITLEN, addrlen - 1); + + //addr reg: addr + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, HAL_SWAP32(addr)); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, (addr << (32 - addrlen))); + } +} + +/** + * Update the conf buffer for dummy phase + * + * @param hw Beginning address of the peripheral registers. + * @param dummy_n Dummy cycles used. 0 to disable the dummy phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dummy_phase_conf_buffer(spi_dev_t *hw, int dummy_n, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_dummy + if (dummy_n) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } + + //user1 reg: usr_dummy_cyclelen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_CYCLELEN, dummy_n - 1); +} + +/** + * Update the conf buffer for dout phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen output length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dout_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_mosi + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_mosi + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + } +} + +/** + * Update the conf buffer for din phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen input length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_din_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_miso + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_miso + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + } +} + +/** + * Update the conf buffer for done phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS hold time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_done_phase_conf_buffer(spi_dev_t *hw, int hold, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_hold + if(hold) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } + + //user1 reg: cs_hold_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_TIME, hold); +} + +/** + * Initialize the conf buffer: + * + * - init bitmap + * - save all register values into the rest of the conf buffer words + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +__attribute__((always_inline)) +static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr; + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; + conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; + conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user1.val; + conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user2.val; + conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ms_dlen.val; + conf_buffer[SPI_LL_MISC_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->misc.val; + conf_buffer[SPI_LL_DIN_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_mode.val; + conf_buffer[SPI_LL_DIN_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_num.val; + conf_buffer[SPI_LL_DOUT_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_mode.val; + conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_conf.val; + conf_buffer[SPI_LL_DMA_INT_ENA_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_ena.val; + conf_buffer[SPI_LL_DMA_INT_CLR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_clr.val; +} + +/** + * Enable/Disable the conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param enable True: enable; False: disable + */ +static inline void spi_ll_conf_state_enable(spi_dev_t *hw, bool enable) +{ + hw->slave.usr_conf = enable; +} + +/** + * Set Segmented-Configure-Transfer required magic value + * + * @param hw Beginning address of the peripheral registers. + * @param magic_value magic value + */ +static inline void spi_ll_set_magic_number(spi_dev_t *hw, uint8_t magic_value) +{ + hw->slave.dma_seg_magic_value = magic_value; +} + #undef SPI_LL_RST_MASK #undef SPI_LL_UNUSED_INT_MASK diff --git a/components/hal/include/hal/spi_hal.h b/components/hal/include/hal/spi_hal.h index ca21ad8a4d..d7b1e15fd8 100644 --- a/components/hal/include/hal/spi_hal.h +++ b/components/hal/include/hal/spi_hal.h @@ -48,6 +48,15 @@ typedef dma_descriptor_align4_t spi_dma_desc_t; typedef dma_descriptor_align8_t spi_dma_desc_t; #endif +/** + * @brief Enum for DMA descriptor status + */ +typedef enum spi_hal_dma_desc_status_t { + SPI_HAL_DMA_DESC_NULL = 0, ///< Null descriptos + SPI_HAL_DMA_DESC_RUN_OUT = 1, ///< DMA descriptors are not enough for data + SPI_HAL_DMA_DESC_LINKED = 2, ///< DMA descriptors are linked successfully +} spi_hal_dma_desc_status_t; + /** * Input parameters to the ``spi_hal_cal_clock_conf`` to calculate the timing configuration */ @@ -103,6 +112,17 @@ typedef struct { /* Configured by driver at initialization, don't touch */ spi_dev_t *hw; ///< Beginning address of the peripheral registers. bool dma_enabled; ///< Whether the DMA is enabled, do not update after initialization + +#if SOC_SPI_SCT_SUPPORTED + /* Segmented-Configure-Transfer required, configured by driver, don't touch */ + uint32_t tx_free_desc_num; + uint32_t rx_free_desc_num; + lldesc_t *cur_tx_seg_link; ///< Current TX DMA descriptor used for sct mode. + lldesc_t *cur_rx_seg_link; ///< Current RX DMA descriptor used for sct mode. + lldesc_t *tx_seg_link_tail; ///< Tail of the TX DMA descriptor link + lldesc_t *rx_seg_link_tail; ///< Tail of the RX DMA descriptor link +#endif //#if SOC_SPI_SCT_SUPPORTED + /* Internal parameters, don't touch */ spi_hal_trans_config_t trans_config; ///< Transaction configuration } spi_hal_context_t; @@ -133,6 +153,32 @@ typedef struct { };//boolean configurations } spi_hal_dev_config_t; +#if SOC_SPI_SCT_SUPPORTED +/** + * SCT mode required configurations, per segment + */ +typedef struct { + /* CONF State */ + bool seg_end; ///< True: this segment is the end; False: this segment isn't the end; + /* PREP State */ + int cs_setup; ///< Setup time of CS active edge before the first SPI clock + /* CMD State */ + uint16_t cmd; ///< Command value to be sent + int cmd_bits; ///< Length (in bits) of the command phase + /* ADDR State */ + uint64_t addr; ///< Address value to be sent + int addr_bits; ///< Length (in bits) of the address phase + /* DUMMY State */ + int dummy_bits; ///< Base length (in bits) of the dummy phase. + /* DOUT State */ + int tx_bitlen; ///< TX length, in bits + /* DIN State */ + int rx_bitlen; ///< RX length, in bits + /* DONE State */ + int cs_hold; ///< Hold time of CS inactive edge after the last SPI clock +} spi_hal_seg_config_t; +#endif //#if SOC_SPI_SCT_SUPPORTED + /** * Init the peripheral and the context. * @@ -266,6 +312,125 @@ void spi_hal_cal_timing(int source_freq_hz, int eff_clk, bool gpio_is_used, int */ int spi_hal_get_freq_limit(bool gpio_is_used, int input_delay_ns); +#if SOC_SPI_SCT_SUPPORTED +/*---------------------------------------------------------- + * Segmented-Configure-Transfer (SCT) Mode + * ---------------------------------------------------------*/ +/** + * Initialise SCT mode required registers and hal states + * + * @param hal Context of the HAL layer. + */ +void spi_hal_sct_init(spi_hal_context_t *hal); + +/** + * Initialise conf buffer, give it an initial value + * + * @param hal Context of the HAL layer. + */ +void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]); + +/** + * Format the conf buffer + * According to the `spi_hal_seg_config_t`, update the conf buffer + * + * @param hal Context of the HAL layer. + * @param config Conf buffer configuration, per segment. See `spi_hal_seg_config_t` to know what can be configured + * @param conf_buffer Conf buffer + */ +void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_config_t *config, const spi_hal_dev_config_t *dev, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]); + +/** + * Format tx dma descriptor(s) for a SCT head + * + * @param hal Context of the HAL layer. + * @param conf_buffer Conf buffer + * @param send_buffer TX buffer + * @param buf_len_bytes TX buffer length, in bytes + * @param[out] trans_head SCT dma descriptor head + * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used + * + * @return + * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together + * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) + */ +spi_hal_dma_desc_status_t spi_hal_sct_new_tx_dma_desc_head(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num); + +/** + * Format tx dma descriptor(s) for a segment, and linked it to its previous segment + * + * @param hal Context of the HAL layer. + * @param conf_buffer Conf buffer + * @param send_buffer TX buffer + * @param buf_len_bytes TX buffer length, in bytes + * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used + * + * @return + * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together + * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) + */ +spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num); + +/** + * Recycle used tx dma descriptors (back to available state, NOT a memory free) + * + * @param hal Context of the HAL layer. + * @param recycle_num Number of the to-be-recycled descriptors + */ +void spi_hal_sct_tx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num); + +/** + * Format rx dma descriptor(s) for a SCT head + * + * @param hal Context of the HAL layer. + * @param recv_buffer RX buffer + * @param buf_len_bytes RX buffer length, in bytes + * @param[out] trans_head SCT dma descriptor head + * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used + * + * @return + * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together + * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) + */ +spi_hal_dma_desc_status_t spi_hal_sct_new_rx_dma_desc_head(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num); + +/** + * Format rx dma descriptor(s) for a segment, and linked it to its previous segment + * + * @param hal Context of the HAL layer. + * @param send_buffer RX buffer + * @param buf_len_bytes RX buffer length, in bytes + * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used + * + * @return + * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together + * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) + */ +spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num); + +/** + * Recycle used rx dma descriptors (back to available state, NOT a memory free) + * + * @param hal Context of the HAL layer. + * @param recycle_num Number of the to-be-recycled descriptors + */ +void spi_hal_sct_rx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num); + +/** + * Load dma descriptors to dma + * Will do nothing to TX or RX dma, when `tx_seg_head` or `rx_seg_head` is NULL + * + * @param hal Context of the HAL layer. + * @param rx_seg_head Head of the SCT RX dma descriptors + * @param tx_seg_head Head of the SCT TX dma descriptors + */ +void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head); + +/** + * Deinit SCT mode related registers and hal states + */ +void spi_hal_sct_deinit(spi_hal_context_t *hal); +#endif //#if SOC_SPI_SCT_SUPPORTED #endif //#if SOC_GPSPI_SUPPORTED #ifdef __cplusplus diff --git a/components/hal/spi_hal.c b/components/hal/spi_hal.c index 723e1c0520..cc51fa0b7a 100644 --- a/components/hal/spi_hal.c +++ b/components/hal/spi_hal.c @@ -52,6 +52,34 @@ void spi_hal_deinit(spi_hal_context_t *hal) } } +#if SOC_SPI_SCT_SUPPORTED +static void s_sct_reset_dma_link(spi_hal_context_t *hal) +{ + hal->tx_free_desc_num = hal->dmadesc_n; + hal->rx_free_desc_num = hal->dmadesc_n; + hal->cur_tx_seg_link = hal->dmadesc_tx; + hal->cur_rx_seg_link = hal->dmadesc_rx; + hal->tx_seg_link_tail = NULL; + hal->rx_seg_link_tail = NULL; +} + +void spi_hal_sct_init(spi_hal_context_t *hal) +{ + s_sct_reset_dma_link(hal); + spi_ll_conf_state_enable(hal->hw, true); + spi_ll_set_magic_number(hal->hw, SPI_LL_SCT_MAGIC_NUMBER); + spi_ll_enable_intr(hal->hw, SPI_LL_INTR_SEG_DONE); + spi_ll_set_intr(hal->hw, SPI_LL_INTR_SEG_DONE); +} + +void spi_hal_sct_deinit(spi_hal_context_t *hal) +{ + spi_ll_conf_state_enable(hal->hw, false); + spi_ll_disable_intr(hal->hw, SPI_LL_INTR_SEG_DONE); + spi_ll_clear_intr(hal->hw, SPI_LL_INTR_SEG_DONE); +} +#endif //#if SOC_SPI_SCT_SUPPORTED + esp_err_t spi_hal_cal_clock_conf(const spi_hal_timing_param_t *timing_param, spi_hal_timing_conf_t *timing_conf) { spi_hal_timing_conf_t temp_conf = {}; diff --git a/components/hal/spi_hal_iram.c b/components/hal/spi_hal_iram.c index f7b3526bf8..3db2f89638 100644 --- a/components/hal/spi_hal_iram.c +++ b/components/hal/spi_hal_iram.c @@ -162,3 +162,177 @@ void spi_hal_fetch_result(const spi_hal_context_t *hal) spi_ll_read_buffer(hal->hw, trans->rcv_buffer, trans->rx_bitlen); } } + +#if SOC_SPI_SCT_SUPPORTED +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + spi_ll_init_conf_buffer(hal->hw, conf_buffer); +} + +void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_config_t *config, const spi_hal_dev_config_t *dev, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + spi_ll_format_prep_phase_conf_buffer(hal->hw, config->cs_setup, conf_buffer); + spi_ll_format_cmd_phase_conf_buffer(hal->hw, config->cmd, config->cmd_bits, dev->tx_lsbfirst, conf_buffer); + spi_ll_format_addr_phase_conf_buffer(hal->hw, config->addr, config->addr_bits, dev->rx_lsbfirst, conf_buffer); + spi_ll_format_dummy_phase_conf_buffer(hal->hw, config->dummy_bits, conf_buffer); + spi_ll_format_dout_phase_conf_buffer(hal->hw, config->tx_bitlen, conf_buffer); + spi_ll_format_din_phase_conf_buffer(hal->hw, config->rx_bitlen, conf_buffer); + spi_ll_format_done_phase_conf_buffer(hal->hw, config->cs_hold, conf_buffer); + spi_ll_format_conf_phase_conf_buffer(hal->hw, conf_buffer, config->seg_end); +} + +void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head) +{ + spi_ll_clear_intr(hal->hw, SPI_LL_INTR_SEG_DONE); + + HAL_ASSERT(hal->dma_enabled); + if (rx_seg_head) { + spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan); + spi_ll_dma_rx_fifo_reset(hal->hw); + spi_ll_infifo_full_clr(hal->hw); + spi_ll_dma_rx_enable(hal->hw, 1); + spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, rx_seg_head); + } + + if (tx_seg_head) { + spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan); + spi_ll_dma_tx_fifo_reset(hal->hw); + spi_ll_outfifo_empty_clr(hal->hw); + spi_ll_dma_tx_enable(hal->hw, 1); + spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, tx_seg_head); + } +} + +/*----------------------------------------------------------- + * Below hal functions should be in the same spinlock + *-----------------------------------------------------------*/ +/*------------------------- + * TX + *------------------------*/ +void spi_hal_sct_tx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num) +{ + hal->tx_free_desc_num += recycle_num; +} + +static void s_sct_prepare_tx_seg(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head) +{ + HAL_ASSERT(hal->tx_free_desc_num >= 1 + lldesc_get_required_num(buf_len_bytes)); + + *trans_head = hal->cur_tx_seg_link; + lldesc_setup_link(hal->cur_tx_seg_link, conf_buffer, SOC_SPI_SCT_BUFFER_NUM_MAX * 4, false); + lldesc_t *conf_buffer_link = hal->cur_tx_seg_link; + hal->tx_free_desc_num -= 1; + + hal->tx_seg_link_tail = hal->cur_tx_seg_link; + hal->cur_tx_seg_link++; + if (hal->cur_tx_seg_link == hal->dmadesc_tx + hal->dmadesc_n) { + //As there is enough space, so we simply point this to the pool head + hal->cur_tx_seg_link = hal->dmadesc_tx; + } + + if(send_buffer && buf_len_bytes) { + lldesc_setup_link(hal->cur_tx_seg_link, send_buffer, buf_len_bytes, false); + STAILQ_NEXT(conf_buffer_link, qe) = hal->cur_tx_seg_link; + for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) { + hal->tx_seg_link_tail = hal->cur_tx_seg_link; + hal->cur_tx_seg_link++; + if (hal->cur_tx_seg_link == hal->dmadesc_tx + hal->dmadesc_n) { + //As there is enough space, so we simply point this to the pool head + hal->cur_tx_seg_link = hal->dmadesc_tx; + } + } + hal->tx_free_desc_num -= lldesc_get_required_num(buf_len_bytes); + } +} + +spi_hal_dma_desc_status_t spi_hal_sct_new_tx_dma_desc_head(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num) +{ + //1 desc for the conf_buffer, other for data. + if (hal->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) { + return SPI_HAL_DMA_DESC_RUN_OUT; + } + + s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, trans_head); + *used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes); + + return SPI_HAL_DMA_DESC_LINKED; +} + +spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num) +{ + //1 desc for the conf_buffer, other for data. + if (hal->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) { + return SPI_HAL_DMA_DESC_RUN_OUT; + } + + if (hal->tx_seg_link_tail) { + //Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail. + STAILQ_NEXT(hal->tx_seg_link_tail, qe) = hal->cur_tx_seg_link; + } + + lldesc_t *internal_head = NULL; + s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, &internal_head); + *used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes); + + return SPI_HAL_DMA_DESC_LINKED; +} + +/*------------------------- + * RX + *------------------------*/ +void spi_hal_sct_rx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num) +{ + hal->rx_free_desc_num += recycle_num; +} + +static void s_sct_prepare_rx_seg(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head) +{ + HAL_ASSERT(hal->rx_free_desc_num >= lldesc_get_required_num(buf_len_bytes)); + + *trans_head = hal->cur_rx_seg_link; + lldesc_setup_link(hal->cur_rx_seg_link, recv_buffer, buf_len_bytes, true); + for (int i = 0; i< lldesc_get_required_num(buf_len_bytes); i++) { + hal->rx_seg_link_tail = hal->cur_rx_seg_link; + hal->cur_rx_seg_link++; + if (hal->cur_rx_seg_link == hal->dmadesc_rx + hal->dmadesc_n) { + //As there is enough space, so we simply point this to the pool head + hal->cur_rx_seg_link = hal->dmadesc_rx; + } + } + + hal->rx_free_desc_num -= lldesc_get_required_num(buf_len_bytes); +} + +spi_hal_dma_desc_status_t spi_hal_sct_new_rx_dma_desc_head(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num) +{ + if (hal->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) { + return SPI_HAL_DMA_DESC_RUN_OUT; + } + + s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, trans_head); + *used_desc_num = lldesc_get_required_num(buf_len_bytes); + + return SPI_HAL_DMA_DESC_LINKED; +} + +spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num) +{ + if (hal->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) { + return SPI_HAL_DMA_DESC_RUN_OUT; + } + + if (hal->rx_seg_link_tail) { + //Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail. + STAILQ_NEXT(hal->rx_seg_link_tail, qe) = hal->cur_rx_seg_link; + } + + lldesc_t *internal_head = NULL; + s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, &internal_head); + *used_desc_num = lldesc_get_required_num(buf_len_bytes); + + return SPI_HAL_DMA_DESC_LINKED; +} +#endif //#if SOC_SPI_SCT_SUPPORTED diff --git a/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in index 9f6b8b12f0..b65bee4d40 100644 --- a/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in @@ -471,6 +471,18 @@ config SOC_SPI_PERIPH_SUPPORT_CONTROL_DUMMY_OUT bool default y +config SOC_SPI_SCT_SUPPORTED + bool + default y + +config SOC_SPI_SCT_REG_NUM + int + default 14 + +config SOC_SPI_SCT_BUFFER_NUM_MAX + bool + default y + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32c2/include/soc/soc_caps.h b/components/soc/esp32c2/include/soc/soc_caps.h index 54ae8ca832..194e5a0ba7 100644 --- a/components/soc/esp32c2/include/soc/soc_caps.h +++ b/components/soc/esp32c2/include/soc/soc_caps.h @@ -227,6 +227,11 @@ // Peripheral supports output given level during its "dummy phase" #define SOC_SPI_PERIPH_SUPPORT_CONTROL_DUMMY_OUT 1 +#define SOC_SPI_SCT_SUPPORTED 1 +#define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer +#define SOC_SPI_SCT_REG_NUM 14 +#define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs + #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 diff --git a/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in b/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in index 8adf0d59d6..dbfdc2ab94 100644 --- a/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in @@ -699,6 +699,18 @@ config SOC_SPI_PERIPH_SUPPORT_CONTROL_DUMMY_OUT bool default y +config SOC_SPI_SCT_SUPPORTED + bool + default y + +config SOC_SPI_SCT_REG_NUM + int + default 14 + +config SOC_SPI_SCT_BUFFER_NUM_MAX + bool + default y + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32c3/include/soc/soc_caps.h b/components/soc/esp32c3/include/soc/soc_caps.h index 7955f562fc..851bc357e4 100644 --- a/components/soc/esp32c3/include/soc/soc_caps.h +++ b/components/soc/esp32c3/include/soc/soc_caps.h @@ -311,6 +311,11 @@ // Peripheral supports output given level during its "dummy phase" #define SOC_SPI_PERIPH_SUPPORT_CONTROL_DUMMY_OUT 1 +#define SOC_SPI_SCT_SUPPORTED 1 +#define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer +#define SOC_SPI_SCT_REG_NUM 14 +#define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs + #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 diff --git a/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in index 2f2dea3976..adf7a665e6 100644 --- a/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in @@ -959,6 +959,18 @@ config SOC_SPI_SUPPORT_CLK_RC_FAST bool default y +config SOC_SPI_SCT_SUPPORTED + bool + default y + +config SOC_SPI_SCT_REG_NUM + int + default 14 + +config SOC_SPI_SCT_BUFFER_NUM_MAX + bool + default y + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32h2/include/soc/soc_caps.h b/components/soc/esp32h2/include/soc/soc_caps.h index df5613bd45..b5e7bb534b 100644 --- a/components/soc/esp32h2/include/soc/soc_caps.h +++ b/components/soc/esp32h2/include/soc/soc_caps.h @@ -386,6 +386,11 @@ // host_id = 0 -> SPI0/SPI1, host_id = 1 -> SPI2, #define SOC_SPI_PERIPH_SUPPORT_MULTILINE_MODE(host_id) ({(void)host_id; 1;}) +#define SOC_SPI_SCT_SUPPORTED 1 +#define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer +#define SOC_SPI_SCT_REG_NUM 14 +#define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs + #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 diff --git a/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in index 542fdb2c12..b4b2f5b77e 100644 --- a/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in @@ -695,11 +695,23 @@ config SOC_SPI_PERIPH_SUPPORT_CONTROL_DUMMY_OUT bool default y -config SOC_MEMSPI_IS_INDEPENDENT +config SOC_SPI_SUPPORT_OCT bool default y -config SOC_SPI_SUPPORT_OCT +config SOC_SPI_SCT_SUPPORTED + bool + default y + +config SOC_SPI_SCT_REG_NUM + int + default 27 + +config SOC_SPI_SCT_BUFFER_NUM_MAX + bool + default y + +config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32s2/include/soc/soc_caps.h b/components/soc/esp32s2/include/soc/soc_caps.h index 91cf9d5c61..4bacf33d89 100644 --- a/components/soc/esp32s2/include/soc/soc_caps.h +++ b/components/soc/esp32s2/include/soc/soc_caps.h @@ -301,9 +301,14 @@ // Only SPI1 supports this feature #define SOC_SPI_PERIPH_SUPPORT_CONTROL_DUMMY_OUT 1 -#define SOC_MEMSPI_IS_INDEPENDENT 1 -#define SOC_SPI_SUPPORT_OCT 1 +#define SOC_SPI_SUPPORT_OCT 1 +#define SOC_SPI_SCT_SUPPORTED 1 +#define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) (((PERIPH_NUM==1) || (PERIPH_NUM==2)) ? 1 : 0) //Support Segmented-Configure-Transfer +#define SOC_SPI_SCT_REG_NUM 27 +#define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 27-word-regs + +#define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_MEMSPI_SRC_FREQ_80M_SUPPORTED 1 #define SOC_MEMSPI_SRC_FREQ_40M_SUPPORTED 1 #define SOC_MEMSPI_SRC_FREQ_26M_SUPPORTED 1 diff --git a/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in b/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in index 8018742ed5..8437b6513d 100644 --- a/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in @@ -839,6 +839,18 @@ config SOC_SPI_SUPPORT_OCT bool default y +config SOC_SPI_SCT_SUPPORTED + bool + default y + +config SOC_SPI_SCT_REG_NUM + int + default 14 + +config SOC_SPI_SCT_BUFFER_NUM_MAX + bool + default y + config SOC_MEMSPI_SRC_FREQ_120M bool default y diff --git a/components/soc/esp32s3/include/soc/soc_caps.h b/components/soc/esp32s3/include/soc/soc_caps.h index 261e8d5139..9b6240a153 100644 --- a/components/soc/esp32s3/include/soc/soc_caps.h +++ b/components/soc/esp32s3/include/soc/soc_caps.h @@ -332,6 +332,11 @@ #define SOC_SPI_MAX_PRE_DIVIDER 16 #define SOC_SPI_SUPPORT_OCT 1 +#define SOC_SPI_SCT_SUPPORTED 1 +#define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer +#define SOC_SPI_SCT_REG_NUM 14 +#define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs + #define SOC_MEMSPI_SRC_FREQ_120M 1 #define SOC_MEMSPI_SRC_FREQ_80M_SUPPORTED 1 #define SOC_MEMSPI_SRC_FREQ_40M_SUPPORTED 1 From 30760a95bb2f9c8758ac8aa8ac05d459a747de44 Mon Sep 17 00:00:00 2001 From: Armando Date: Fri, 24 Jun 2022 19:02:17 +0800 Subject: [PATCH 2/5] spi_master: spi master sct test app --- .../spi/master/main/test_spi_master_sct.c | 408 ++++++++++++++++++ .../test_apps/master/main/CMakeLists.txt | 1 + 2 files changed, 409 insertions(+) create mode 100644 components/driver/test_apps/spi/master/main/test_spi_master_sct.c diff --git a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c b/components/driver/test_apps/spi/master/main/test_spi_master_sct.c new file mode 100644 index 0000000000..1c30d267b4 --- /dev/null +++ b/components/driver/test_apps/spi/master/main/test_spi_master_sct.c @@ -0,0 +1,408 @@ +/* + * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include "sdkconfig.h" +#include "esp_attr.h" +#include "esp_log.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "unity.h" +#include "test_utils.h" +#include "esp_heap_caps.h" +#include "driver/spi_master.h" +#include "driver/spi_slave_hd.h" +#include "driver/spi_slave.h" +#include "soc/spi_pins.h" +#include "test_spi_utils.h" + + +__attribute__((unused)) static const char *TAG = "SCT"; + +#if SOC_SPI_SCT_SUPPORTED +/*----------------------------------------------------------- + * FD SCT Functional Test + *-----------------------------------------------------------*/ +#define TEST_FD_SEG_NUM 4 +#define TEST_FD_LEN 8 +#define TEST_FD_LEN_STEP 8 +#define TEST_FD_LEN_MAX 32 + + +#include "soc/spi_struct.h" + +static void fd_master(void) +{ + spi_device_handle_t handle; + + spi_bus_config_t buscfg={ + .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, + .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, + .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, + .quadwp_io_num = -1, + .quadhd_io_num = -1, + .max_transfer_sz = 4092 * 10, + }; + + spi_device_interface_config_t devcfg = { + .command_bits = 0, + .address_bits = 0, + .dummy_bits = 0, + .clock_speed_hz = 10 * 1000, + .duty_cycle_pos = 128, //50% duty cycle + .mode = 0, + .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, + .cs_ena_posttrans = 3, //Keep the CS low 3 cycles after transaction, to stop slave from missing the last bit when CS has less propagation delay than CLK + .queue_size = 3, + }; + + TEST_ESP_OK(spi_bus_initialize(SPI2_HOST, &buscfg, SPI_DMA_CH_AUTO)); + TEST_ESP_OK(spi_bus_add_device(SPI2_HOST, &devcfg, &handle)); + unity_send_signal("Master ready"); + + uint8_t *master_tx_buf[TEST_FD_SEG_NUM] = {}; + uint8_t *master_rx_buf[TEST_FD_SEG_NUM] = {}; + uint8_t *slave_tx_buf[TEST_FD_SEG_NUM] = {}; + uint32_t seed = 199; + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + master_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + master_rx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + slave_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + get_tx_buffer(seed, master_tx_buf[i], slave_tx_buf[i], TEST_FD_LEN_MAX); + seed++; + } + + uint32_t test_len = TEST_FD_LEN; + spi_seg_transaction_t seg_trans[TEST_FD_SEG_NUM] = {}; + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + seg_trans[i].base.tx_buffer = master_tx_buf[i]; + seg_trans[i].base.rx_buffer = master_rx_buf[i]; + seg_trans[i].base.length = test_len * 8; + test_len += TEST_FD_LEN_STEP; + } + + unity_wait_for_signal("Slave ready"); + spi_seg_transaction_t *ret_seg_trans = NULL; + TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); + TEST_ESP_OK(spi_device_queue_segment_trans(handle, seg_trans, TEST_FD_SEG_NUM, portMAX_DELAY)); + TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); + TEST_ASSERT(ret_seg_trans == seg_trans); + + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + ESP_LOG_BUFFER_HEX("master tx:", ret_seg_trans[i].base.tx_buffer, ret_seg_trans[i].base.length / 8); + ESP_LOG_BUFFER_HEX("master rx:", ret_seg_trans[i].base.rx_buffer, ret_seg_trans[i].base.length / 8); + printf("\n"); + TEST_ASSERT_EQUAL_HEX8_ARRAY(slave_tx_buf[i], master_rx_buf[i], ret_seg_trans[i].base.length / 8); + + free(master_tx_buf[i]); + free(master_rx_buf[i]); + free(slave_tx_buf[i]); + } + + TEST_ESP_OK(spi_bus_remove_device(handle)); + TEST_ESP_OK(spi_bus_free(SPI2_HOST)); +} + +static void fd_slave(void) +{ + unity_wait_for_signal("Master ready"); + spi_bus_config_t buscfg = { + .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, + .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, + .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, + .quadwp_io_num = -1, + .quadhd_io_num = -1, + }; + + spi_slave_interface_config_t slvcfg = { + .mode = 0, + .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, + .queue_size = 4, + }; + TEST_ESP_OK(spi_slave_initialize(SPI2_HOST, &buscfg, &slvcfg, SPI_DMA_CH_AUTO)); + + uint8_t *slave_tx_buf[TEST_FD_SEG_NUM] = {}; + uint8_t *slave_rx_buf[TEST_FD_SEG_NUM] = {}; + uint8_t *master_tx_buf[TEST_FD_SEG_NUM] = {}; + uint32_t seed = 199; + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + slave_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + slave_rx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + master_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + get_tx_buffer(seed, master_tx_buf[i], slave_tx_buf[i], TEST_FD_LEN_MAX); + seed++; + } + + uint32_t test_len = TEST_FD_LEN; + spi_slave_transaction_t trans[TEST_FD_SEG_NUM] = {}; + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + trans[i].tx_buffer = slave_tx_buf[i]; + trans[i].rx_buffer = slave_rx_buf[i]; + trans[i].length = test_len * 8; + test_len += TEST_FD_LEN_STEP; + } + + unity_send_signal("Slave ready"); + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + TEST_ESP_OK(spi_slave_queue_trans(SPI2_HOST, &trans[i], portMAX_DELAY)); + } + + spi_slave_transaction_t *ret_trans = NULL; + for (int i = 0; i < TEST_FD_SEG_NUM; i++) { + ESP_LOGI(TAG, "Slave Trans %d", i); + TEST_ESP_OK(spi_slave_get_trans_result(SPI2_HOST, &ret_trans, portMAX_DELAY)); + TEST_ASSERT(ret_trans == &trans[i]); + + //show result + ESP_LOGI("slave", "trans_len: %d", trans[i].trans_len / 8); + ESP_LOG_BUFFER_HEX("slave tx:", trans[i].tx_buffer, trans[i].length / 8); + ESP_LOG_BUFFER_HEX("slave rx:", trans[i].rx_buffer, trans[i].length / 8); + printf("\n"); + TEST_ASSERT_EQUAL_HEX8_ARRAY(master_tx_buf[i], slave_rx_buf[i], trans[i].length / 8); + + free(slave_tx_buf[i]); + free(slave_rx_buf[i]); + free(master_tx_buf[i]); + } + + TEST_ESP_OK(spi_slave_free(SPI2_HOST)); +} + +TEST_CASE_MULTIPLE_DEVICES("SPI_Master_SCT_FD_Functional", "[spi_ms][test_env=Example_SPI_Multi_device][timeout=120]", fd_master, fd_slave); +#endif //#if SOC_SPI_SCT_SUPPORTED + +#if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED) +/*----------------------------------------------------------- + * HD SCT Functional Test + *-----------------------------------------------------------*/ +#define TEST_HD_TIMES 4 +//Master write, slave read, wrt slave reg +#define TEST_HD_BUF_0_ID 12 +#define TEST_HD_BUF_0_VAL 0x99 +//Master read, slave write, wrt slave reg +#define TEST_HD_BUF_1_ID 13 +#define TEST_HD_BUF_1_VAL 0xAA + +#define TEST_HD_DATA_LEN 64 +#define TEST_HD_DATA_LEN_PER_SEG 32 + +static void hd_master(void) +{ + spi_device_handle_t handle; + + spi_bus_config_t buscfg={ + .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, + .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, + .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, + .quadwp_io_num = -1, + .quadhd_io_num = -1, + .max_transfer_sz = 4092 * 10, + }; + + spi_device_interface_config_t devcfg = { + .command_bits = 8, + .address_bits = 8, + .dummy_bits = 8, + .clock_speed_hz = 10 * 1000, + .duty_cycle_pos = 128, //50% duty cycle + .mode = 0, + .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, + .cs_ena_posttrans = 3, //Keep the CS low 3 cycles after transaction, to stop slave from missing the last bit when CS has less propagation delay than CLK + .queue_size = 3, + .flags = SPI_DEVICE_HALFDUPLEX, + }; + + TEST_ESP_OK(spi_bus_initialize(SPI2_HOST, &buscfg, SPI_DMA_CH_AUTO)); + TEST_ESP_OK(spi_bus_add_device(SPI2_HOST, &devcfg, &handle)); + unity_send_signal("Master ready"); + + //Test data preparation + uint32_t master_tx_val = TEST_HD_BUF_0_VAL; + uint8_t *master_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + uint8_t *master_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + uint32_t master_rx_val = 0; + uint8_t *slave_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + get_tx_buffer(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); + + spi_seg_transaction_t *ret_seg_trans = NULL; + + //---------------------Master TX---------------------------// + spi_seg_transaction_t tx_seg_trans[TEST_HD_TIMES] = { + { + .base = { + .cmd = 0x1, + .addr = TEST_HD_BUF_0_ID, + .length = 4 * 8, + .tx_buffer = (uint8_t *)&master_tx_val, + }, + }, + //TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment + { + .base = { + .cmd = 0x3, + .length = TEST_HD_DATA_LEN_PER_SEG * 8, + .tx_buffer = master_tx_buf, + }, + .dummy_bits = 8, + .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + }, + { + .base = { + .cmd = 0x3, + .length = TEST_HD_DATA_LEN_PER_SEG * 8, + .tx_buffer = master_tx_buf + TEST_HD_DATA_LEN_PER_SEG, + }, + .dummy_bits = 8, + .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + }, + { + .base = { + .cmd = 0x7, + } + }, + }; + + TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); + unity_wait_for_signal("Slave ready"); + TEST_ESP_OK(spi_device_queue_segment_trans(handle, tx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); + TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); + TEST_ASSERT(ret_seg_trans == tx_seg_trans); + ESP_LOG_BUFFER_HEX("Master tx", master_tx_buf, TEST_HD_DATA_LEN); + TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, false)); + + + //---------------------Master RX---------------------------// + spi_seg_transaction_t rx_seg_trans[TEST_HD_TIMES] = { + { + .base = { + .cmd = 0x2, + .addr = TEST_HD_BUF_1_ID, + .rxlength = 4 * 8, + .rx_buffer = (uint8_t *)&master_rx_val, + }, + }, + // TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment + { + .base = { + .cmd = 0x4, + .rxlength = TEST_HD_DATA_LEN_PER_SEG * 8, + .rx_buffer = master_rx_buf, + }, + .dummy_bits = 8, + .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + }, + { + .base = { + .cmd = 0x4, + .rxlength = TEST_HD_DATA_LEN_PER_SEG * 8, + .rx_buffer = master_rx_buf + TEST_HD_DATA_LEN_PER_SEG, + }, + .dummy_bits = 8, + .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + }, + { + .base = { + .cmd = 0x8, + } + }, + }; + TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); + + unity_wait_for_signal("Slave ready"); + TEST_ESP_OK(spi_device_queue_segment_trans(handle, rx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); + TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); + TEST_ASSERT(ret_seg_trans == rx_seg_trans); + + ESP_LOGI("Master", "Slave Reg[%d] value is: 0x%" PRIx32, TEST_HD_BUF_1_ID, master_rx_val); + TEST_ASSERT(master_rx_val == TEST_HD_BUF_1_VAL); + + TEST_ASSERT_EQUAL_HEX8_ARRAY(slave_tx_buf, master_rx_buf, TEST_HD_DATA_LEN); + ESP_LOG_BUFFER_HEX("Master rx", master_rx_buf, TEST_HD_DATA_LEN); + + //Memory Recycle + free(master_tx_buf); + free(master_rx_buf); + free(slave_tx_buf); + + TEST_ESP_OK(spi_bus_remove_device(handle)); + TEST_ESP_OK(spi_bus_free(SPI2_HOST)); +} + +static void hd_slave(void) +{ + spi_bus_config_t bus_cfg = { + .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, + .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, + .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, + .quadwp_io_num = -1, + .quadhd_io_num = -1, + .max_transfer_sz = 4092 * 4, + }; + + spi_slave_hd_slot_config_t slave_hd_cfg = { + .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, + .dma_chan = SPI_DMA_CH_AUTO, + .flags = 0, + .mode = 0, + .command_bits = 8, + .address_bits = 8, + .dummy_bits = 8, + .queue_size = 4, + }; + TEST_ESP_OK(spi_slave_hd_init(SPI2_HOST, &bus_cfg, &slave_hd_cfg)); + + spi_slave_hd_data_t *ret_trans = NULL; + + //Test data preparation + uint32_t slave_tx_val = TEST_HD_BUF_1_VAL; + uint8_t *slave_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + uint8_t *slave_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + uint32_t slave_rx_val = 0; + uint8_t *master_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + get_tx_buffer(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); + + unity_wait_for_signal("Master ready"); + + //---------------------Slave RX---------------------------// + spi_slave_hd_data_t slave_rx_trans = { + .data = slave_rx_buf, + .len = TEST_HD_DATA_LEN, + }; + TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_RX, &slave_rx_trans, portMAX_DELAY)); + unity_send_signal("slave ready"); + TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY)); + TEST_ASSERT(ret_trans == &slave_rx_trans); + + TEST_ASSERT_EQUAL_HEX8_ARRAY(master_tx_buf, slave_rx_buf, TEST_HD_DATA_LEN); + ESP_LOG_BUFFER_HEX("Slave rx", slave_rx_buf, TEST_HD_DATA_LEN); + + spi_slave_hd_read_buffer(SPI2_HOST, TEST_HD_BUF_0_ID, (uint8_t *)&slave_rx_val, 4); + ESP_LOGI("Slave", "Slave Reg[%d] value is: 0x%" PRIx32, TEST_HD_BUF_0_ID, slave_rx_val); + TEST_ASSERT(slave_rx_val == TEST_HD_BUF_0_VAL); + + //---------------------Slave TX---------------------------// + spi_slave_hd_write_buffer(SPI2_HOST, TEST_HD_BUF_1_ID, (uint8_t *)&slave_tx_val, 4); + spi_slave_hd_data_t slave_tx_trans = { + .data = slave_tx_buf, + .len = TEST_HD_DATA_LEN, + }; + TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_TX, &slave_tx_trans, portMAX_DELAY)); + unity_send_signal("slave ready"); + TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY)); + TEST_ASSERT(ret_trans == &slave_tx_trans); + ESP_LOG_BUFFER_HEX("Slave tx", slave_tx_buf, TEST_HD_DATA_LEN); + + //Memory Recycle + free(slave_tx_buf); + free(slave_rx_buf); + free(master_tx_buf); + + TEST_ESP_OK(spi_slave_hd_deinit(SPI2_HOST)); +} + +TEST_CASE_MULTIPLE_DEVICES("SPI_Master_SCT_HD_Functional", "[spi_ms][test_env=Example_SPI_Multi_device][timeout=120]", hd_master, hd_slave); + +#endif //#if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED) diff --git a/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt b/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt index 9b34511e99..e72a32b797 100644 --- a/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt +++ b/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt @@ -4,6 +4,7 @@ set(srcs "test_spi_master.c" "test_spi_sio.c" "test_spi_bus_lock.c" + "test_spi_master_sct.c" ) # In order for the cases defined by `TEST_CASE` to be linked into the final elf, From 1e6c61daa6b6ea79d0e1ae1652e1ee76b02cee7b Mon Sep 17 00:00:00 2001 From: wanlei Date: Thu, 9 Mar 2023 15:07:21 +0800 Subject: [PATCH 3/5] spi_master: sct mode support set line mode, transaction interval time support line mode 1-2-4-8 depend on targets. fix sct mode dma descriptor counter compute issue. add conf_bits_len setting API to control interval time. --- .../spi/master/main/test_spi_master_sct.c | 154 +----------------- .../include/driver/spi_master.h | 1 + .../esp_driver_spi/src/gpspi/spi_master.c | 41 ++++- components/hal/esp32c2/include/hal/spi_ll.h | 58 ++++++- components/hal/esp32c3/include/hal/spi_ll.h | 58 ++++++- components/hal/esp32c6/include/hal/spi_ll.h | 2 +- components/hal/esp32h2/include/hal/spi_ll.h | 65 +++++++- components/hal/esp32s2/include/hal/spi_ll.h | 79 ++++++++- components/hal/esp32s3/include/hal/spi_ll.h | 66 +++++++- components/hal/include/hal/spi_hal.h | 12 ++ components/hal/spi_hal.c | 3 + components/hal/spi_hal_iram.c | 11 +- .../esp32c2/include/soc/Kconfig.soc_caps.in | 4 + components/soc/esp32c2/include/soc/soc_caps.h | 1 + .../esp32c3/include/soc/Kconfig.soc_caps.in | 4 + components/soc/esp32c3/include/soc/soc_caps.h | 1 + .../esp32h2/include/soc/Kconfig.soc_caps.in | 4 + components/soc/esp32h2/include/soc/soc_caps.h | 1 + .../esp32s2/include/soc/Kconfig.soc_caps.in | 4 + components/soc/esp32s2/include/soc/soc_caps.h | 1 + .../esp32s3/include/soc/Kconfig.soc_caps.in | 4 + components/soc/esp32s3/include/soc/soc_caps.h | 1 + 22 files changed, 398 insertions(+), 177 deletions(-) diff --git a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c b/components/driver/test_apps/spi/master/main/test_spi_master_sct.c index 1c30d267b4..915a520b16 100644 --- a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c +++ b/components/driver/test_apps/spi/master/main/test_spi_master_sct.c @@ -22,158 +22,6 @@ __attribute__((unused)) static const char *TAG = "SCT"; -#if SOC_SPI_SCT_SUPPORTED -/*----------------------------------------------------------- - * FD SCT Functional Test - *-----------------------------------------------------------*/ -#define TEST_FD_SEG_NUM 4 -#define TEST_FD_LEN 8 -#define TEST_FD_LEN_STEP 8 -#define TEST_FD_LEN_MAX 32 - - -#include "soc/spi_struct.h" - -static void fd_master(void) -{ - spi_device_handle_t handle; - - spi_bus_config_t buscfg={ - .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, - .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, - .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, - .quadwp_io_num = -1, - .quadhd_io_num = -1, - .max_transfer_sz = 4092 * 10, - }; - - spi_device_interface_config_t devcfg = { - .command_bits = 0, - .address_bits = 0, - .dummy_bits = 0, - .clock_speed_hz = 10 * 1000, - .duty_cycle_pos = 128, //50% duty cycle - .mode = 0, - .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, - .cs_ena_posttrans = 3, //Keep the CS low 3 cycles after transaction, to stop slave from missing the last bit when CS has less propagation delay than CLK - .queue_size = 3, - }; - - TEST_ESP_OK(spi_bus_initialize(SPI2_HOST, &buscfg, SPI_DMA_CH_AUTO)); - TEST_ESP_OK(spi_bus_add_device(SPI2_HOST, &devcfg, &handle)); - unity_send_signal("Master ready"); - - uint8_t *master_tx_buf[TEST_FD_SEG_NUM] = {}; - uint8_t *master_rx_buf[TEST_FD_SEG_NUM] = {}; - uint8_t *slave_tx_buf[TEST_FD_SEG_NUM] = {}; - uint32_t seed = 199; - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - master_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - master_rx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - slave_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - get_tx_buffer(seed, master_tx_buf[i], slave_tx_buf[i], TEST_FD_LEN_MAX); - seed++; - } - - uint32_t test_len = TEST_FD_LEN; - spi_seg_transaction_t seg_trans[TEST_FD_SEG_NUM] = {}; - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - seg_trans[i].base.tx_buffer = master_tx_buf[i]; - seg_trans[i].base.rx_buffer = master_rx_buf[i]; - seg_trans[i].base.length = test_len * 8; - test_len += TEST_FD_LEN_STEP; - } - - unity_wait_for_signal("Slave ready"); - spi_seg_transaction_t *ret_seg_trans = NULL; - TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); - TEST_ESP_OK(spi_device_queue_segment_trans(handle, seg_trans, TEST_FD_SEG_NUM, portMAX_DELAY)); - TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); - TEST_ASSERT(ret_seg_trans == seg_trans); - - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - ESP_LOG_BUFFER_HEX("master tx:", ret_seg_trans[i].base.tx_buffer, ret_seg_trans[i].base.length / 8); - ESP_LOG_BUFFER_HEX("master rx:", ret_seg_trans[i].base.rx_buffer, ret_seg_trans[i].base.length / 8); - printf("\n"); - TEST_ASSERT_EQUAL_HEX8_ARRAY(slave_tx_buf[i], master_rx_buf[i], ret_seg_trans[i].base.length / 8); - - free(master_tx_buf[i]); - free(master_rx_buf[i]); - free(slave_tx_buf[i]); - } - - TEST_ESP_OK(spi_bus_remove_device(handle)); - TEST_ESP_OK(spi_bus_free(SPI2_HOST)); -} - -static void fd_slave(void) -{ - unity_wait_for_signal("Master ready"); - spi_bus_config_t buscfg = { - .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, - .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, - .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, - .quadwp_io_num = -1, - .quadhd_io_num = -1, - }; - - spi_slave_interface_config_t slvcfg = { - .mode = 0, - .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, - .queue_size = 4, - }; - TEST_ESP_OK(spi_slave_initialize(SPI2_HOST, &buscfg, &slvcfg, SPI_DMA_CH_AUTO)); - - uint8_t *slave_tx_buf[TEST_FD_SEG_NUM] = {}; - uint8_t *slave_rx_buf[TEST_FD_SEG_NUM] = {}; - uint8_t *master_tx_buf[TEST_FD_SEG_NUM] = {}; - uint32_t seed = 199; - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - slave_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - slave_rx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - master_tx_buf[i] = heap_caps_calloc(1, TEST_FD_LEN_MAX, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - get_tx_buffer(seed, master_tx_buf[i], slave_tx_buf[i], TEST_FD_LEN_MAX); - seed++; - } - - uint32_t test_len = TEST_FD_LEN; - spi_slave_transaction_t trans[TEST_FD_SEG_NUM] = {}; - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - trans[i].tx_buffer = slave_tx_buf[i]; - trans[i].rx_buffer = slave_rx_buf[i]; - trans[i].length = test_len * 8; - test_len += TEST_FD_LEN_STEP; - } - - unity_send_signal("Slave ready"); - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - TEST_ESP_OK(spi_slave_queue_trans(SPI2_HOST, &trans[i], portMAX_DELAY)); - } - - spi_slave_transaction_t *ret_trans = NULL; - for (int i = 0; i < TEST_FD_SEG_NUM; i++) { - ESP_LOGI(TAG, "Slave Trans %d", i); - TEST_ESP_OK(spi_slave_get_trans_result(SPI2_HOST, &ret_trans, portMAX_DELAY)); - TEST_ASSERT(ret_trans == &trans[i]); - - //show result - ESP_LOGI("slave", "trans_len: %d", trans[i].trans_len / 8); - ESP_LOG_BUFFER_HEX("slave tx:", trans[i].tx_buffer, trans[i].length / 8); - ESP_LOG_BUFFER_HEX("slave rx:", trans[i].rx_buffer, trans[i].length / 8); - printf("\n"); - TEST_ASSERT_EQUAL_HEX8_ARRAY(master_tx_buf[i], slave_rx_buf[i], trans[i].length / 8); - - free(slave_tx_buf[i]); - free(slave_rx_buf[i]); - free(master_tx_buf[i]); - } - - TEST_ESP_OK(spi_slave_free(SPI2_HOST)); -} - -TEST_CASE_MULTIPLE_DEVICES("SPI_Master_SCT_FD_Functional", "[spi_ms][test_env=Example_SPI_Multi_device][timeout=120]", fd_master, fd_slave); -#endif //#if SOC_SPI_SCT_SUPPORTED - #if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED) /*----------------------------------------------------------- * HD SCT Functional Test @@ -403,6 +251,6 @@ static void hd_slave(void) TEST_ESP_OK(spi_slave_hd_deinit(SPI2_HOST)); } -TEST_CASE_MULTIPLE_DEVICES("SPI_Master_SCT_HD_Functional", "[spi_ms][test_env=Example_SPI_Multi_device][timeout=120]", hd_master, hd_slave); +TEST_CASE_MULTIPLE_DEVICES("SPI_Master_SCT_HD_Functional", "[spi_ms]", hd_master, hd_slave); #endif //#if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED) diff --git a/components/esp_driver_spi/include/driver/spi_master.h b/components/esp_driver_spi/include/driver/spi_master.h index 43f8ddf647..3c3698046f 100644 --- a/components/esp_driver_spi/include/driver/spi_master.h +++ b/components/esp_driver_spi/include/driver/spi_master.h @@ -192,6 +192,7 @@ typedef struct { uint8_t command_bits; ///< The command length in this transaction, in bits. uint8_t address_bits; ///< The address length in this transaction, in bits. uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. + uint32_t seg_gap_clock_len; ///< The len of CS inactive time between segments, in clocks. uint32_t seg_trans_flags; ///< SCT specific flags. See `SPI_SEG_TRANS_XXX` macros. /**< Necessary buffer required by HW, don't touch this. >**/ diff --git a/components/esp_driver_spi/src/gpspi/spi_master.c b/components/esp_driver_spi/src/gpspi/spi_master.c index 67f3599caf..0579b630be 100644 --- a/components/esp_driver_spi/src/gpspi/spi_master.c +++ b/components/esp_driver_spi/src/gpspi/spi_master.c @@ -781,6 +781,21 @@ static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host) } #if SOC_SPI_SCT_SUPPORTED +static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_seg_transaction_t *trans_header, spi_hal_trans_config_t *hal_trans) +{ + spi_transaction_t *trans = &trans_header->base; + + //Set up OIO/QIO/DIO if needed + hal_trans->line_mode.data_lines = (trans->flags & SPI_TRANS_MODE_DIO) ? 2 : (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1; +#if SOC_SPI_SUPPORT_OCT + if (trans->flags & SPI_TRANS_MODE_OCT) { + hal_trans->line_mode.data_lines = 8; + } +#endif + hal_trans->line_mode.addr_lines = (trans->flags & SPI_TRANS_MULTILINE_ADDR) ? hal_trans->line_mode.data_lines : 1; + hal_trans->line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans->line_mode.data_lines : 1; +} + static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_desc_priv_t *cur_sct_trans) { dev->host->cur_cs = dev->id; @@ -788,6 +803,10 @@ static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_des //Reconfigure according to device settings, the function only has effect when the dev_id is changed. spi_setup_device(dev); +#if !CONFIG_IDF_TARGET_ESP32S2 + // s2 update this seg_gap_clock_len by dma from conf_buffer + spi_hal_sct_set_conf_bits_len(&dev->host->hal, cur_sct_trans->sct_trans_desc_head->seg_gap_clock_len); +#endif spi_hal_sct_load_dma_link(&dev->host->hal, cur_sct_trans->rx_seg_head, cur_sct_trans->tx_seg_head); if (dev->cfg.pre_cb) { dev->cfg.pre_cb((spi_transaction_t *)cur_sct_trans->sct_trans_desc_head); @@ -869,7 +888,9 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) if (host->sct_mode_enabled) { //cur_cs is changed to DEV_NUM_MAX here spi_post_sct_trans(host); - xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_sct_trans, &do_yield); + if (!(host->device[cs]->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT)) { + xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_sct_trans, &do_yield); + } } else #endif //#if SOC_SPI_SCT_SUPPORTED { @@ -1402,6 +1423,7 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena { SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); + SPI_CHECK(handle->cfg.flags & SPI_DEVICE_HALFDUPLEX, "SCT mode only available under Half Duplex mode", ESP_ERR_INVALID_STATE); SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue) == 0, "Cannot enable SCT mode when internal Queue still has items", ESP_ERR_INVALID_STATE); @@ -1432,8 +1454,13 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena spi_hal_trans_config_t hal_trans = {}; spi_format_hal_trans_struct(handle, &trans_buf, &hal_trans); spi_hal_setup_trans(hal, hal_dev, &hal_trans); +#if CONFIG_IDF_TARGET_ESP32S2 + // conf_base need ensure transaction gap len more than about 2us under different freq. + // conf_base only configurable on s2. + spi_hal_sct_setup_conf_base(hal, handle->real_clk_freq_hz/600000); +#endif - spi_hal_sct_init(&handle->host->hal); + spi_hal_sct_init(hal); } else { spi_hal_sct_deinit(&handle->host->hal); } @@ -1445,8 +1472,13 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena static void SPI_MASTER_ATTR s_sct_init_conf_buffer(spi_hal_context_t *hal, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num) { + // read from HW need waiting for slower APB clock domain return data, loop to contact slow clock domain will waste time. + // use one imagen then copied by cpu instead. + uint32_t conf_buffer_img[SOC_SPI_SCT_BUFFER_NUM_MAX]; + spi_hal_sct_init_conf_buffer(hal, conf_buffer_img); + for (int i = 0; i < seg_num; i++) { - spi_hal_sct_init_conf_buffer(hal, seg_trans_desc[i].conf_buffer); + memcpy(seg_trans_desc[i].conf_buffer, conf_buffer_img, sizeof(conf_buffer_img)); } } @@ -1503,7 +1535,10 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, if (seg_end) { seg_config.seg_end = true; } + seg_config.seg_gap_len = seg_trans_desc->seg_gap_clock_len; + // set line mode or ... + spi_sct_set_hal_trans_config(seg_trans_desc, &hal->trans_config); spi_hal_sct_format_conf_buffer(hal, &seg_config, hal_dev, seg_trans_desc->conf_buffer); } diff --git a/components/hal/esp32c2/include/hal/spi_ll.h b/components/hal/esp32c2/include/hal/spi_ll.h index 8acbf98906..ddfab2e2ae 100644 --- a/components/hal/esp32c2/include/hal/spi_ll.h +++ b/components/hal/esp32c2/include/hal/spi_ll.h @@ -296,7 +296,7 @@ static inline void spi_ll_user_start(spi_dev_t *hw) */ static inline uint32_t spi_ll_get_running_cmd(spi_dev_t *hw) { - return hw->cmd.val; + return hw->cmd.usr; } /** @@ -650,8 +650,8 @@ static inline void spi_ll_master_set_line_mode(spi_dev_t *hw, spi_line_mode_t li hw->ctrl.faddr_dual = (line_mode.addr_lines == 2); hw->ctrl.faddr_quad = (line_mode.addr_lines == 4); hw->ctrl.fread_dual = (line_mode.data_lines == 2); - hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->ctrl.fread_quad = (line_mode.data_lines == 4); + hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->user.fwrite_quad = (line_mode.data_lines == 4); } @@ -1302,13 +1302,27 @@ static inline int spi_ll_get_slave_hd_dummy_bits(spi_line_mode_t line_mode) #define SPI_LL_SCT_MAGIC_NUMBER (0x2) +/** + * Set conf phase bits len to HW for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen) +{ + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + hw->cmd.conf_bitlen = conf_bitlen; + } +} + /** * Update the conf buffer for conf phase * * @param hw Beginning address of the peripheral registers. + * @param is_end Is this transaction the end of this segment. * @param conf_buffer Conf buffer to be updated. */ -static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, bool is_end, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { //user reg: usr_conf_nxt if (is_end) { @@ -1318,6 +1332,44 @@ static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t } } +/** + * Update the line mode of conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param line_mode line mode struct of each phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_line_mode_conf_buff(spi_dev_t *hw, spi_line_mode_t line_mode, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_CTRL_MASK; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_USER_MASK; + + switch (line_mode.cmd_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_QUAD_M); break; + default: break; + } + + switch (line_mode.addr_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_QUAD_M); break; + default: break; + } + + switch (line_mode.data_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_DUAL_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_DUAL_M); + break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_QUAD_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_QUAD_M); + break; + default: break; + } +} + /** * Update the conf buffer for prep phase * diff --git a/components/hal/esp32c3/include/hal/spi_ll.h b/components/hal/esp32c3/include/hal/spi_ll.h index 97bfad5b50..c99a23e70b 100644 --- a/components/hal/esp32c3/include/hal/spi_ll.h +++ b/components/hal/esp32c3/include/hal/spi_ll.h @@ -298,7 +298,7 @@ static inline void spi_ll_user_start(spi_dev_t *hw) */ static inline uint32_t spi_ll_get_running_cmd(spi_dev_t *hw) { - return hw->cmd.val; + return hw->cmd.usr; } /** @@ -652,8 +652,8 @@ static inline void spi_ll_master_set_line_mode(spi_dev_t *hw, spi_line_mode_t li hw->ctrl.faddr_dual = (line_mode.addr_lines == 2); hw->ctrl.faddr_quad = (line_mode.addr_lines == 4); hw->ctrl.fread_dual = (line_mode.data_lines == 2); - hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->ctrl.fread_quad = (line_mode.data_lines == 4); + hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->user.fwrite_quad = (line_mode.data_lines == 4); } @@ -1215,13 +1215,27 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) #define SPI_LL_SCT_MAGIC_NUMBER (0x2) +/** + * Set conf phase bits len to HW for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen) +{ + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + hw->cmd.conf_bitlen = conf_bitlen; + } +} + /** * Update the conf buffer for conf phase * * @param hw Beginning address of the peripheral registers. + * @param is_end Is this transaction the end of this segment. * @param conf_buffer Conf buffer to be updated. */ -static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, bool is_end, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { //user reg: usr_conf_nxt if (is_end) { @@ -1231,6 +1245,44 @@ static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t } } +/** + * Update the line mode of conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param line_mode line mode struct of each phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_line_mode_conf_buff(spi_dev_t *hw, spi_line_mode_t line_mode, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_CTRL_MASK; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_USER_MASK; + + switch (line_mode.cmd_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_QUAD_M); break; + default: break; + } + + switch (line_mode.addr_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_QUAD_M); break; + default: break; + } + + switch (line_mode.data_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_DUAL_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_DUAL_M); + break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_QUAD_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_QUAD_M); + break; + default: break; + } +} + /** * Update the conf buffer for prep phase * diff --git a/components/hal/esp32c6/include/hal/spi_ll.h b/components/hal/esp32c6/include/hal/spi_ll.h index 05de203fc3..843d0dab21 100644 --- a/components/hal/esp32c6/include/hal/spi_ll.h +++ b/components/hal/esp32c6/include/hal/spi_ll.h @@ -290,7 +290,7 @@ static inline void spi_ll_user_start(spi_dev_t *hw) */ static inline uint32_t spi_ll_get_running_cmd(spi_dev_t *hw) { - return hw->cmd.val; + return hw->cmd.usr; } /** diff --git a/components/hal/esp32h2/include/hal/spi_ll.h b/components/hal/esp32h2/include/hal/spi_ll.h index 68b6434406..89a9f58c45 100644 --- a/components/hal/esp32h2/include/hal/spi_ll.h +++ b/components/hal/esp32h2/include/hal/spi_ll.h @@ -289,7 +289,7 @@ static inline void spi_ll_user_start(spi_dev_t *hw) */ static inline uint32_t spi_ll_get_running_cmd(spi_dev_t *hw) { - return hw->cmd.val; + return hw->cmd.usr; } /** @@ -643,8 +643,8 @@ static inline void spi_ll_master_set_line_mode(spi_dev_t *hw, spi_line_mode_t li hw->ctrl.faddr_dual = (line_mode.addr_lines == 2); hw->ctrl.faddr_quad = (line_mode.addr_lines == 4); hw->ctrl.fread_dual = (line_mode.data_lines == 2); - hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->ctrl.fread_quad = (line_mode.data_lines == 4); + hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->user.fwrite_quad = (line_mode.data_lines == 4); } @@ -1207,13 +1207,27 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) #define SPI_LL_SCT_MAGIC_NUMBER (0x2) +/** + * Set conf phase bits len to HW for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen) +{ + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + hw->cmd.conf_bitlen = conf_bitlen; + } +} + /** * Update the conf buffer for conf phase * * @param hw Beginning address of the peripheral registers. + * @param is_end Is this transaction the end of this segment. * @param conf_buffer Conf buffer to be updated. */ -static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, bool is_end, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { //user reg: usr_conf_nxt if (is_end) { @@ -1223,6 +1237,49 @@ static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t } } +/** + * Update the line mode of conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param line_mode line mode struct of each phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_line_mode_conf_buff(spi_dev_t *hw, spi_line_mode_t line_mode, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_CTRL_MASK; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_USER_MASK; + + switch (line_mode.cmd_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_QUAD_M); break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_OCT_M ); break; + default: break; + } + + switch (line_mode.addr_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_QUAD_M); break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_OCT_M ); break; + default: break; + } + + switch (line_mode.data_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_DUAL_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_DUAL_M); + break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_QUAD_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_QUAD_M); + break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_OCT_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_OCT_M); + break; + default: break; + } +} + /** * Update the conf buffer for prep phase * @@ -1402,7 +1459,7 @@ __attribute__((always_inline)) static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); - conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr; + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr.usr_addr_value; conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; diff --git a/components/hal/esp32s2/include/hal/spi_ll.h b/components/hal/esp32s2/include/hal/spi_ll.h index 2a0d6f925e..1bd5aa058a 100644 --- a/components/hal/esp32s2/include/hal/spi_ll.h +++ b/components/hal/esp32s2/include/hal/spi_ll.h @@ -644,10 +644,10 @@ static inline void spi_ll_master_set_line_mode(spi_dev_t *hw, spi_line_mode_t li hw->ctrl.faddr_quad = (line_mode.addr_lines == 4); hw->ctrl.faddr_oct = (line_mode.addr_lines == 8); hw->ctrl.fread_dual = (line_mode.data_lines == 2); - hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->ctrl.fread_quad = (line_mode.data_lines == 4); - hw->user.fwrite_quad = (line_mode.data_lines == 4); hw->ctrl.fread_oct = (line_mode.data_lines == 8); + hw->user.fwrite_dual = (line_mode.data_lines == 2); + hw->user.fwrite_quad = (line_mode.data_lines == 4); hw->user.fwrite_oct = (line_mode.data_lines == 8); } @@ -1518,13 +1518,43 @@ static inline bool spi_ll_tx_get_empty_err(spi_dev_t *hw) #define SPI_LL_SCT_MAGIC_NUMBER (0x2) +/** + * Set conf phase base bits len to HW for segment config trans mode. + * need let transaction gap more than approx 2 us under different freq, calculated by driver layer. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_base Conf base bits len. + */ +static inline void spi_ll_set_conf_base_bitslen(spi_dev_t *hw, uint8_t conf_base) +{ + // 7 bits wide + if(conf_base < 128) { + hw->slv_wrbuf_dlen.conf_base_bitlen = conf_base; + } +} + +/** + * Set conf phase bits len to config buffer for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_format_conf_bitslen_buffer(spi_dev_t *hw, uint32_t conf_bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //cmd reg: conf_bitlen + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_CMD_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CONF_BITLEN, conf_bitlen); + } +} + /** * Update the conf buffer for conf phase * * @param hw Beginning address of the peripheral registers. + * @param is_end Is this transaction the end of this segment. * @param conf_buffer Conf buffer to be updated. */ -static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, bool is_end, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { //user reg: usr_conf_nxt if (is_end) { @@ -1534,6 +1564,49 @@ static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t } } +/** + * Update the line mode of conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param line_mode line mode struct of each phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_line_mode_conf_buff(spi_dev_t *hw, spi_line_mode_t line_mode, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_CTRL_MASK; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_USER_MASK; + + switch (line_mode.cmd_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_QUAD_M); break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_OCT_M ); break; + default: break; + } + + switch (line_mode.addr_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_QUAD_M); break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_OCT_M ); break; + default: break; + } + + switch (line_mode.data_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_DUAL_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_DUAL_M); + break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_QUAD_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_QUAD_M); + break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_OCT_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_OCT_M); + break; + default: break; + } +} + /** * Update the conf buffer for prep phase * diff --git a/components/hal/esp32s3/include/hal/spi_ll.h b/components/hal/esp32s3/include/hal/spi_ll.h index 3c77a79358..3c756ddb87 100644 --- a/components/hal/esp32s3/include/hal/spi_ll.h +++ b/components/hal/esp32s3/include/hal/spi_ll.h @@ -305,7 +305,7 @@ static inline void spi_ll_user_start(spi_dev_t *hw) */ static inline uint32_t spi_ll_get_running_cmd(spi_dev_t *hw) { - return hw->cmd.val; + return hw->cmd.usr; } /** @@ -661,10 +661,10 @@ static inline void spi_ll_master_set_line_mode(spi_dev_t *hw, spi_line_mode_t li hw->ctrl.faddr_quad = (line_mode.addr_lines == 4); hw->ctrl.faddr_oct = (line_mode.addr_lines == 8); hw->ctrl.fread_dual = (line_mode.data_lines == 2); - hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->ctrl.fread_quad = (line_mode.data_lines == 4); - hw->user.fwrite_quad = (line_mode.data_lines == 4); hw->ctrl.fread_oct = (line_mode.data_lines == 8); + hw->user.fwrite_dual = (line_mode.data_lines == 2); + hw->user.fwrite_quad = (line_mode.data_lines == 4); hw->user.fwrite_oct = (line_mode.data_lines == 8); } @@ -1236,13 +1236,28 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) #define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Set conf phase bits len to HW for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen) +{ + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + hw->cmd.conf_bitlen = conf_bitlen; + } +} + /** * Update the conf buffer for conf phase * * @param hw Beginning address of the peripheral registers. + * @param is_end Is this transaction the end of this segment. * @param conf_buffer Conf buffer to be updated. */ -static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], bool is_end) +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, bool is_end, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { //user reg: usr_conf_nxt if (is_end) { @@ -1252,6 +1267,49 @@ static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, uint32_t } } +/** + * Update the line mode of conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param line_mode line mode struct of each phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_line_mode_conf_buff(spi_dev_t *hw, spi_line_mode_t line_mode, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_CTRL_MASK; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_USER_MASK; + + switch (line_mode.cmd_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_QUAD_M); break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_OCT_M ); break; + default: break; + } + + switch (line_mode.addr_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_QUAD_M); break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_OCT_M ); break; + default: break; + } + + switch (line_mode.data_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_DUAL_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_DUAL_M); + break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_QUAD_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_QUAD_M); + break; + case 8: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_OCT_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_OCT_M); + break; + default: break; + } +} + /** * Update the conf buffer for prep phase * diff --git a/components/hal/include/hal/spi_hal.h b/components/hal/include/hal/spi_hal.h index d7b1e15fd8..c9e5e3017b 100644 --- a/components/hal/include/hal/spi_hal.h +++ b/components/hal/include/hal/spi_hal.h @@ -160,6 +160,7 @@ typedef struct { typedef struct { /* CONF State */ bool seg_end; ///< True: this segment is the end; False: this segment isn't the end; + uint32_t seg_gap_len; ///< spi clock length of CS inactive on config phase for sct /* PREP State */ int cs_setup; ///< Setup time of CS active edge before the first SPI clock /* CMD State */ @@ -430,6 +431,17 @@ void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, ll * Deinit SCT mode related registers and hal states */ void spi_hal_sct_deinit(spi_hal_context_t *hal); + +/** + * Set conf_bitslen to HW for sct. + */ +#define spi_hal_sct_set_conf_bits_len(hal, conf_len) spi_ll_set_conf_phase_bits_len((hal)->hw, conf_len) + +/** + * Set conf_bitslen base to HW for sct, only supported on s2. + */ +#define spi_hal_sct_setup_conf_base(hal, conf_base) spi_ll_set_conf_base_bitslen((hal)->hw, conf_base) + #endif //#if SOC_SPI_SCT_SUPPORTED #endif //#if SOC_GPSPI_SUPPORTED diff --git a/components/hal/spi_hal.c b/components/hal/spi_hal.c index cc51fa0b7a..eab8594829 100644 --- a/components/hal/spi_hal.c +++ b/components/hal/spi_hal.c @@ -68,6 +68,7 @@ void spi_hal_sct_init(spi_hal_context_t *hal) s_sct_reset_dma_link(hal); spi_ll_conf_state_enable(hal->hw, true); spi_ll_set_magic_number(hal->hw, SPI_LL_SCT_MAGIC_NUMBER); + spi_ll_disable_int(hal->hw); //trans_done intr enabled in `add device` phase, sct mode shoud use sct_trans_done only spi_ll_enable_intr(hal->hw, SPI_LL_INTR_SEG_DONE); spi_ll_set_intr(hal->hw, SPI_LL_INTR_SEG_DONE); } @@ -77,6 +78,8 @@ void spi_hal_sct_deinit(spi_hal_context_t *hal) spi_ll_conf_state_enable(hal->hw, false); spi_ll_disable_intr(hal->hw, SPI_LL_INTR_SEG_DONE); spi_ll_clear_intr(hal->hw, SPI_LL_INTR_SEG_DONE); + spi_ll_clear_int_stat(hal->hw); + spi_ll_enable_int(hal->hw); //recover trans_done intr } #endif //#if SOC_SPI_SCT_SUPPORTED diff --git a/components/hal/spi_hal_iram.c b/components/hal/spi_hal_iram.c index 3db2f89638..c19d02be0c 100644 --- a/components/hal/spi_hal_iram.c +++ b/components/hal/spi_hal_iram.c @@ -174,6 +174,7 @@ void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[S void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_config_t *config, const spi_hal_dev_config_t *dev, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { + spi_ll_format_line_mode_conf_buff(hal->hw, hal->trans_config.line_mode, conf_buffer); spi_ll_format_prep_phase_conf_buffer(hal->hw, config->cs_setup, conf_buffer); spi_ll_format_cmd_phase_conf_buffer(hal->hw, config->cmd, config->cmd_bits, dev->tx_lsbfirst, conf_buffer); spi_ll_format_addr_phase_conf_buffer(hal->hw, config->addr, config->addr_bits, dev->rx_lsbfirst, conf_buffer); @@ -181,7 +182,11 @@ void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_co spi_ll_format_dout_phase_conf_buffer(hal->hw, config->tx_bitlen, conf_buffer); spi_ll_format_din_phase_conf_buffer(hal->hw, config->rx_bitlen, conf_buffer); spi_ll_format_done_phase_conf_buffer(hal->hw, config->cs_hold, conf_buffer); - spi_ll_format_conf_phase_conf_buffer(hal->hw, conf_buffer, config->seg_end); + spi_ll_format_conf_phase_conf_buffer(hal->hw, config->seg_end, conf_buffer); +#if CONFIG_IDF_TARGET_ESP32S2 + // only s2 support update seg_gap_len by conf_buffer + spi_ll_format_conf_bitslen_buffer(hal->hw, config->seg_gap_len, conf_buffer); +#endif } void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head) @@ -275,7 +280,7 @@ spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *ha lldesc_t *internal_head = NULL; s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, &internal_head); - *used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes); + *used_desc_num += 1 + lldesc_get_required_num(buf_len_bytes); return SPI_HAL_DMA_DESC_LINKED; } @@ -331,7 +336,7 @@ spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *ha lldesc_t *internal_head = NULL; s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, &internal_head); - *used_desc_num = lldesc_get_required_num(buf_len_bytes); + *used_desc_num += lldesc_get_required_num(buf_len_bytes); return SPI_HAL_DMA_DESC_LINKED; } diff --git a/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in index b65bee4d40..2ec638f74e 100644 --- a/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32c2/include/soc/Kconfig.soc_caps.in @@ -483,6 +483,10 @@ config SOC_SPI_SCT_BUFFER_NUM_MAX bool default y +config SOC_SPI_SCT_CONF_BITLEN_MAX + hex + default 0x3FFFA + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32c2/include/soc/soc_caps.h b/components/soc/esp32c2/include/soc/soc_caps.h index 194e5a0ba7..05c60b0899 100644 --- a/components/soc/esp32c2/include/soc/soc_caps.h +++ b/components/soc/esp32c2/include/soc/soc_caps.h @@ -231,6 +231,7 @@ #define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer #define SOC_SPI_SCT_REG_NUM 14 #define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs +#define SOC_SPI_SCT_CONF_BITLEN_MAX 0x3FFFA //18 bits wide reg #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 diff --git a/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in b/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in index dbfdc2ab94..ad46552c11 100644 --- a/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32c3/include/soc/Kconfig.soc_caps.in @@ -711,6 +711,10 @@ config SOC_SPI_SCT_BUFFER_NUM_MAX bool default y +config SOC_SPI_SCT_CONF_BITLEN_MAX + hex + default 0x3FFFA + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32c3/include/soc/soc_caps.h b/components/soc/esp32c3/include/soc/soc_caps.h index 851bc357e4..468b0d858a 100644 --- a/components/soc/esp32c3/include/soc/soc_caps.h +++ b/components/soc/esp32c3/include/soc/soc_caps.h @@ -315,6 +315,7 @@ #define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer #define SOC_SPI_SCT_REG_NUM 14 #define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs +#define SOC_SPI_SCT_CONF_BITLEN_MAX 0x3FFFA //18 bits wide reg #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 diff --git a/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in index adf7a665e6..2e41f0cb3c 100644 --- a/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in @@ -971,6 +971,10 @@ config SOC_SPI_SCT_BUFFER_NUM_MAX bool default y +config SOC_SPI_SCT_CONF_BITLEN_MAX + hex + default 0x3FFFA + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32h2/include/soc/soc_caps.h b/components/soc/esp32h2/include/soc/soc_caps.h index b5e7bb534b..7dc38d4486 100644 --- a/components/soc/esp32h2/include/soc/soc_caps.h +++ b/components/soc/esp32h2/include/soc/soc_caps.h @@ -390,6 +390,7 @@ #define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer #define SOC_SPI_SCT_REG_NUM 14 #define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs +#define SOC_SPI_SCT_CONF_BITLEN_MAX 0x3FFFA //18 bits wide reg #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 diff --git a/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in index b4b2f5b77e..d986427ee4 100644 --- a/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32s2/include/soc/Kconfig.soc_caps.in @@ -711,6 +711,10 @@ config SOC_SPI_SCT_BUFFER_NUM_MAX bool default y +config SOC_SPI_SCT_CONF_BITLEN_MAX + hex + default 0x7FFFFD + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32s2/include/soc/soc_caps.h b/components/soc/esp32s2/include/soc/soc_caps.h index 4bacf33d89..4c5365fc66 100644 --- a/components/soc/esp32s2/include/soc/soc_caps.h +++ b/components/soc/esp32s2/include/soc/soc_caps.h @@ -307,6 +307,7 @@ #define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) (((PERIPH_NUM==1) || (PERIPH_NUM==2)) ? 1 : 0) //Support Segmented-Configure-Transfer #define SOC_SPI_SCT_REG_NUM 27 #define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 27-word-regs +#define SOC_SPI_SCT_CONF_BITLEN_MAX 0x7FFFFD //23 bit wide reg #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_MEMSPI_SRC_FREQ_80M_SUPPORTED 1 diff --git a/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in b/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in index 8437b6513d..7314e76574 100644 --- a/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32s3/include/soc/Kconfig.soc_caps.in @@ -851,6 +851,10 @@ config SOC_SPI_SCT_BUFFER_NUM_MAX bool default y +config SOC_SPI_SCT_CONF_BITLEN_MAX + hex + default 0x3FFFA + config SOC_MEMSPI_SRC_FREQ_120M bool default y diff --git a/components/soc/esp32s3/include/soc/soc_caps.h b/components/soc/esp32s3/include/soc/soc_caps.h index 9b6240a153..4d8eba5221 100644 --- a/components/soc/esp32s3/include/soc/soc_caps.h +++ b/components/soc/esp32s3/include/soc/soc_caps.h @@ -336,6 +336,7 @@ #define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer #define SOC_SPI_SCT_REG_NUM 14 #define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs +#define SOC_SPI_SCT_CONF_BITLEN_MAX 0x3FFFA //18 bits wide reg #define SOC_MEMSPI_SRC_FREQ_120M 1 #define SOC_MEMSPI_SRC_FREQ_80M_SUPPORTED 1 From a307096ec027fd73b2589fca27fc5f080c44f957 Mon Sep 17 00:00:00 2001 From: wanlei Date: Fri, 10 Mar 2023 19:31:54 +0800 Subject: [PATCH 4/5] spi_master: sct mode supported on c6 --- .../spi/master/main/test_spi_master_sct.c | 52 +-- components/hal/esp32c6/include/hal/spi_ll.h | 328 +++++++++++++++++- .../esp32c6/include/soc/Kconfig.soc_caps.in | 16 + components/soc/esp32c6/include/soc/soc_caps.h | 6 + 4 files changed, 362 insertions(+), 40 deletions(-) diff --git a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c b/components/driver/test_apps/spi/master/main/test_spi_master_sct.c index 915a520b16..6f317d31ab 100644 --- a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c +++ b/components/driver/test_apps/spi/master/main/test_spi_master_sct.c @@ -41,27 +41,16 @@ static void hd_master(void) { spi_device_handle_t handle; - spi_bus_config_t buscfg={ - .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, - .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, - .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, - .quadwp_io_num = -1, - .quadhd_io_num = -1, - .max_transfer_sz = 4092 * 10, - }; + spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG(); + buscfg.max_transfer_sz = 4092 * 10; - spi_device_interface_config_t devcfg = { - .command_bits = 8, - .address_bits = 8, - .dummy_bits = 8, - .clock_speed_hz = 10 * 1000, - .duty_cycle_pos = 128, //50% duty cycle - .mode = 0, - .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, - .cs_ena_posttrans = 3, //Keep the CS low 3 cycles after transaction, to stop slave from missing the last bit when CS has less propagation delay than CLK - .queue_size = 3, - .flags = SPI_DEVICE_HALFDUPLEX, - }; + spi_device_interface_config_t devcfg = SPI_DEVICE_TEST_DEFAULT_CONFIG(); + devcfg.command_bits = 8; + devcfg.address_bits = 8; + devcfg.dummy_bits = 8; + devcfg.clock_speed_hz = 10 * 1000; + devcfg.input_delay_ns = 0; + devcfg.flags = SPI_DEVICE_HALFDUPLEX; TEST_ESP_OK(spi_bus_initialize(SPI2_HOST, &buscfg, SPI_DMA_CH_AUTO)); TEST_ESP_OK(spi_bus_add_device(SPI2_HOST, &devcfg, &handle)); @@ -181,26 +170,11 @@ static void hd_master(void) static void hd_slave(void) { - spi_bus_config_t bus_cfg = { - .miso_io_num = SPI2_IOMUX_PIN_NUM_MISO, - .mosi_io_num = SPI2_IOMUX_PIN_NUM_MOSI, - .sclk_io_num = SPI2_IOMUX_PIN_NUM_CLK, - .quadwp_io_num = -1, - .quadhd_io_num = -1, - .max_transfer_sz = 4092 * 4, - }; + spi_bus_config_t buscfg = SPI_BUS_TEST_DEFAULT_CONFIG(); + spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG(); + slave_hd_cfg.dma_chan = SPI_DMA_CH_AUTO, - spi_slave_hd_slot_config_t slave_hd_cfg = { - .spics_io_num = SPI2_IOMUX_PIN_NUM_CS, - .dma_chan = SPI_DMA_CH_AUTO, - .flags = 0, - .mode = 0, - .command_bits = 8, - .address_bits = 8, - .dummy_bits = 8, - .queue_size = 4, - }; - TEST_ESP_OK(spi_slave_hd_init(SPI2_HOST, &bus_cfg, &slave_hd_cfg)); + TEST_ESP_OK(spi_slave_hd_init(SPI2_HOST, &buscfg, &slave_hd_cfg)); spi_slave_hd_data_t *ret_trans = NULL; diff --git a/components/hal/esp32c6/include/hal/spi_ll.h b/components/hal/esp32c6/include/hal/spi_ll.h index 843d0dab21..5e38d292a1 100644 --- a/components/hal/esp32c6/include/hal/spi_ll.h +++ b/components/hal/esp32c6/include/hal/spi_ll.h @@ -644,8 +644,8 @@ static inline void spi_ll_master_set_line_mode(spi_dev_t *hw, spi_line_mode_t li hw->ctrl.faddr_dual = (line_mode.addr_lines == 2); hw->ctrl.faddr_quad = (line_mode.addr_lines == 4); hw->ctrl.fread_dual = (line_mode.data_lines == 2); - hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->ctrl.fread_quad = (line_mode.data_lines == 4); + hw->user.fwrite_dual = (line_mode.data_lines == 2); hw->user.fwrite_quad = (line_mode.data_lines == 4); } @@ -1167,6 +1167,332 @@ static inline uint32_t spi_ll_slave_hd_get_last_addr(spi_dev_t *hw) return hw->slave1.slv_last_addr; } + +/*------------------------------------------------------------------------------ + * Segmented-Configure-Transfer + *----------------------------------------------------------------------------*/ +#define SPI_LL_CONF_BUF_SET_BIT(_w, _m) ({ \ + (_w) |= (_m); \ + }) +#define SPI_LL_CONF_BUF_CLR_BIT(_w, _m) ({ \ + (_w) &= ~(_m); \ + }) + +#define SPI_LL_CONF_BUF_SET_FIELD(_w, _f, val) ({ \ + ((_w) = (((_w) & ~((_f##_V) << (_f##_S))) | (((val) & (_f##_V))<<(_f##_S)))); \ + }) + +#define SPI_LL_CONF_BUF_GET_FIELD(_w, _f) ({ \ + (((_w) >> (_f##_S)) & (_f##_V)); \ + }) + +//This offset is 1, for bitmap +#define SPI_LL_CONF_BUFFER_OFFSET (1) +//bitmap must be the first +#define SPI_LL_CONF_BITMAP_POS (0) + +#define SPI_LL_ADDR_REG_POS (0) +#define SPI_LL_CTRL_REG_POS (1) +#define SPI_LL_CLOCK_REG_POS (2) +#define SPI_LL_USER_REG_POS (3) +#define SPI_LL_USER1_REG_POS (4) +#define SPI_LL_USER2_REG_POS (5) +#define SPI_LL_MS_DLEN_REG_POS (6) +#define SPI_LL_MISC_REG_POS (7) +#define SPI_LL_DIN_MODE_REG_POS (8) +#define SPI_LL_DIN_NUM_REG_POS (9) +#define SPI_LL_DOUT_MODE_REG_POS (10) +#define SPI_LL_DMA_CONF_REG_POS (11) +#define SPI_LL_DMA_INT_ENA_REG_POS (12) +#define SPI_LL_DMA_INT_CLR_REG_POS (13) + +#define SPI_LL_SCT_MAGIC_NUMBER (0x2) + +/** + * Set conf phase bits len to HW for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen) +{ + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + hw->cmd.conf_bitlen = conf_bitlen; + } +} + +/** + * Update the conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param is_end Is this transaction the end of this segment. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_conf_phase_conf_buffer(spi_dev_t *hw, bool is_end, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_conf_nxt + if (is_end) { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } else { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_CONF_NXT_M); + } +} + +/** + * Update the line mode of conf buffer for conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param line_mode line mode struct of each phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_line_mode_conf_buff(spi_dev_t *hw, spi_line_mode_t line_mode, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_CTRL_MASK; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] &= ~SPI_LL_ONE_LINE_USER_MASK; + + switch (line_mode.cmd_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FCMD_QUAD_M); break; + default: break; + } + + switch (line_mode.addr_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_DUAL_M); break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FADDR_QUAD_M); break; + default: break; + } + + switch (line_mode.data_lines) + { + case 2: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_DUAL_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_DUAL_M); + break; + case 4: SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FREAD_QUAD_M ); + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_FWRITE_QUAD_M); + break; + default: break; + } +} + +/** + * Update the conf buffer for prep phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS setup time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_prep_phase_conf_buffer(spi_dev_t *hw, uint8_t setup, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_setup + if(setup) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_M); + } + + //user1 reg: cs_setup_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_SETUP_TIME, setup - 1); +} + +/** + * Update the conf buffer for cmd phase + * + * @param hw Beginning address of the peripheral registers. + * @param cmd Command value + * @param cmdlen Length of the cmd phase + * @param lsbfirst Whether LSB first + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_cmd_phase_conf_buffer(spi_dev_t *hw, uint16_t cmd, int cmdlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_command + if (cmdlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_M); + } + + //user2 reg: usr_command_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_BITLEN, cmdlen - 1); + + //user2 reg: usr_command_value + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, cmd); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_COMMAND_VALUE, HAL_SPI_SWAP_DATA_TX(cmd, cmdlen)); + } +} + +/** + * Update the conf buffer for addr phase + * + * @param hw Beginning address of the peripheral registers. + * @param addr Address to set + * @param addrlen Length of the address phase + * @param lsbfirst whether the LSB first feature is enabled. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_addr_phase_conf_buffer(spi_dev_t *hw, uint64_t addr, int addrlen, bool lsbfirst, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_addr + if (addrlen) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_M); + } + + //user1 reg: usr_addr_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_BITLEN, addrlen - 1); + + //addr reg: addr + if (lsbfirst) { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, HAL_SWAP32(addr)); + } else { + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_ADDR_VALUE, (addr << (32 - addrlen))); + } +} + +/** + * Update the conf buffer for dummy phase + * + * @param hw Beginning address of the peripheral registers. + * @param dummy_n Dummy cycles used. 0 to disable the dummy phase. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dummy_phase_conf_buffer(spi_dev_t *hw, int dummy_n, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: usr_dummy + if (dummy_n) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_M); + } + + //user1 reg: usr_dummy_cyclelen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_DUMMY_CYCLELEN, dummy_n - 1); +} + +/** + * Update the conf buffer for dout phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen output length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_dout_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_mosi + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_mosi + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MOSI_M); + //dma_conf reg: dma_tx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_TX_ENA_M); + } +} + +/** + * Update the conf buffer for din phase + * + * @param hw Beginning address of the peripheral registers. + * @param bitlen input length, in bits. + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_din_phase_conf_buffer(spi_dev_t *hw, int bitlen, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + if (bitlen) { + //user reg: usr_miso + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + //ms_dlen reg: ms_data_bitlen + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_MS_DATA_BITLEN, bitlen - 1); + } else { + //user reg: usr_miso + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_USR_MISO_M); + //dma_conf reg: dma_rx_ena + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_DMA_RX_ENA_M); + } +} + +/** + * Update the conf buffer for done phase + * + * @param hw Beginning address of the peripheral registers. + * @param setup CS hold time + * @param conf_buffer Conf buffer to be updated. + */ +static inline void spi_ll_format_done_phase_conf_buffer(spi_dev_t *hw, int hold, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + //user reg: cs_hold + if(hold) { + SPI_LL_CONF_BUF_SET_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } else { + SPI_LL_CONF_BUF_CLR_BIT(conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_M); + } + + //user1 reg: cs_hold_time + SPI_LL_CONF_BUF_SET_FIELD(conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET], SPI_CS_HOLD_TIME, hold); +} + +/** + * Initialize the conf buffer: + * + * - init bitmap + * - save all register values into the rest of the conf buffer words + * + * @param hw Beginning address of the peripheral registers. + * @param conf_buffer Conf buffer to be updated. + */ +__attribute__((always_inline)) +static inline void spi_ll_init_conf_buffer(spi_dev_t *hw, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) +{ + conf_buffer[SPI_LL_CONF_BITMAP_POS] = 0x7FFF | (SPI_LL_SCT_MAGIC_NUMBER << 28); + conf_buffer[SPI_LL_ADDR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->addr.usr_addr_value; + conf_buffer[SPI_LL_CTRL_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ctrl.val; + conf_buffer[SPI_LL_CLOCK_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->clock.val; + conf_buffer[SPI_LL_USER_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user.val; + conf_buffer[SPI_LL_USER1_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user1.val; + conf_buffer[SPI_LL_USER2_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->user2.val; + conf_buffer[SPI_LL_MS_DLEN_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->ms_dlen.val; + conf_buffer[SPI_LL_MISC_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->misc.val; + conf_buffer[SPI_LL_DIN_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_mode.val; + conf_buffer[SPI_LL_DIN_NUM_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->din_num.val; + conf_buffer[SPI_LL_DOUT_MODE_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dout_mode.val; + conf_buffer[SPI_LL_DMA_CONF_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_conf.val; + conf_buffer[SPI_LL_DMA_INT_ENA_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_ena.val; + conf_buffer[SPI_LL_DMA_INT_CLR_REG_POS + SPI_LL_CONF_BUFFER_OFFSET] = hw->dma_int_clr.val; +} + +/** + * Enable/Disable the conf phase + * + * @param hw Beginning address of the peripheral registers. + * @param enable True: enable; False: disable + */ +static inline void spi_ll_conf_state_enable(spi_dev_t *hw, bool enable) +{ + hw->slave.usr_conf = enable; +} + +/** + * Set Segmented-Configure-Transfer required magic value + * + * @param hw Beginning address of the peripheral registers. + * @param magic_value magic value + */ +static inline void spi_ll_set_magic_number(spi_dev_t *hw, uint8_t magic_value) +{ + hw->slave.dma_seg_magic_value = magic_value; +} + #undef SPI_LL_RST_MASK #undef SPI_LL_UNUSED_INT_MASK diff --git a/components/soc/esp32c6/include/soc/Kconfig.soc_caps.in b/components/soc/esp32c6/include/soc/Kconfig.soc_caps.in index fe8eaf5aba..50e474b0a0 100644 --- a/components/soc/esp32c6/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32c6/include/soc/Kconfig.soc_caps.in @@ -963,6 +963,22 @@ config SOC_SPI_SUPPORT_CLK_RC_FAST bool default y +config SOC_SPI_SCT_SUPPORTED + bool + default y + +config SOC_SPI_SCT_REG_NUM + int + default 14 + +config SOC_SPI_SCT_BUFFER_NUM_MAX + bool + default y + +config SOC_SPI_SCT_CONF_BITLEN_MAX + hex + default 0x3FFFA + config SOC_MEMSPI_IS_INDEPENDENT bool default y diff --git a/components/soc/esp32c6/include/soc/soc_caps.h b/components/soc/esp32c6/include/soc/soc_caps.h index 9179ab6402..31e6debdac 100644 --- a/components/soc/esp32c6/include/soc/soc_caps.h +++ b/components/soc/esp32c6/include/soc/soc_caps.h @@ -392,6 +392,12 @@ // host_id = 0 -> SPI0/SPI1, host_id = 1 -> SPI2, #define SOC_SPI_PERIPH_SUPPORT_MULTILINE_MODE(host_id) ({(void)host_id; 1;}) +#define SOC_SPI_SCT_SUPPORTED 1 +#define SOC_SPI_SCT_SUPPORTED_PERIPH(PERIPH_NUM) ((PERIPH_NUM==1) ? 1 : 0) //Support Segmented-Configure-Transfer +#define SOC_SPI_SCT_REG_NUM 14 +#define SOC_SPI_SCT_BUFFER_NUM_MAX (1 + SOC_SPI_SCT_REG_NUM) //1-word-bitmap + 14-word-regs +#define SOC_SPI_SCT_CONF_BITLEN_MAX 0x3FFFA //18 bits wide reg + #define SOC_MEMSPI_IS_INDEPENDENT 1 #define SOC_SPI_MAX_PRE_DIVIDER 16 From 51ffd40843823976800295d5e976ac7527e88e42 Mon Sep 17 00:00:00 2001 From: wanlei Date: Mon, 15 Jan 2024 20:02:09 +0800 Subject: [PATCH 5/5] feat(spi_master): rebase dma sct mode support, rename APIs, use malloc conf_buffer --- .../include/driver/spi_master.h | 104 ------- .../include/esp_private/spi_master_internal.h | 114 +++++++ .../esp_driver_spi/src/gpspi/spi_master.c | 293 ++++++++++++++---- .../spi_bench_mark/include/spi_performance.h | 4 +- .../test_apps/master/main/CMakeLists.txt | 7 +- .../master/main/test_spi_master_sct.c | 43 ++- components/hal/esp32s2/include/hal/spi_ll.h | 15 +- components/hal/include/hal/spi_hal.h | 119 +------ components/hal/spi_hal.c | 11 - components/hal/spi_hal_iram.c | 165 +--------- 10 files changed, 422 insertions(+), 453 deletions(-) create mode 100644 components/esp_driver_spi/include/esp_private/spi_master_internal.h rename components/{driver/test_apps/spi => esp_driver_spi/test_apps}/master/main/test_spi_master_sct.c (83%) diff --git a/components/esp_driver_spi/include/driver/spi_master.h b/components/esp_driver_spi/include/driver/spi_master.h index 3c3698046f..bfd5ebc0c8 100644 --- a/components/esp_driver_spi/include/driver/spi_master.h +++ b/components/esp_driver_spi/include/driver/spi_master.h @@ -158,48 +158,6 @@ typedef struct { uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. } spi_transaction_ext_t ; -#if SOC_SPI_SCT_SUPPORTED -/** - * @Backgrounds: `SCT Mode` - * Segmented-Configure-Transfer Mode - * - * In this mode, you could pre-configure multiple SPI transactions. - * - These whole transaction is called one `Segmented-Configure-Transaction` or one `SCT`. - * - Each of the transactions in one `SCT` is called one `Segment`. - * - * Per segment can have different SPI phase configurations - */ - -/** - * SPI SCT Mode transaction flags - */ -#define SPI_SEG_TRANS_PREP_LEN_UPDATED (1<<0) ///< Use `spi_seg_transaction_t: cs_ena_pretrans` in this segment. -#define SPI_SEG_TRANS_CMD_LEN_UPDATED (1<<1) ///< Use `spi_seg_transaction_t: command_bits` in this segment. -#define SPI_SEG_TRANS_ADDR_LEN_UPDATED (1<<2) ///< Use `spi_seg_transaction_t: address_bits` in this segment. -#define SPI_SEG_TRANS_DUMMY_LEN_UPDATED (1<<3) ///< Use `spi_seg_transaction_t: dummy_bits` in this segment. -#define SPI_SEG_TRANS_DONE_LEN_UPDATED (1<<4) ///< Use `spi_seg_transaction_t: cs_ena_posttrans` in this segment. - -/** - * This struct is for SPI SCT (Segmented-Configure-Transfer) Mode. - * - * By default, length of each SPI Phase will not change per segment. Each segment will use the phase length you set when `spi_bus_add_device()` - * However, you could force a segment to use its custom phase length. To achieve this, set the `SPI_SEG_TRANS_XX` flags, to customize phase length. - */ -typedef struct { - struct spi_transaction_t base; ///< Transaction data, so that pointer to spi_transaction_t can be converted into spi_seg_transaction_t - uint8_t cs_ena_pretrans; ///< Amount of SPI bit-cycles the cs should be activated before the transmission - uint8_t cs_ena_posttrans; ///< Amount of SPI bit-cycles the cs should stay active after the transmission - uint8_t command_bits; ///< The command length in this transaction, in bits. - uint8_t address_bits; ///< The address length in this transaction, in bits. - uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. - uint32_t seg_gap_clock_len; ///< The len of CS inactive time between segments, in clocks. - uint32_t seg_trans_flags; ///< SCT specific flags. See `SPI_SEG_TRANS_XXX` macros. - - /**< Necessary buffer required by HW, don't touch this. >**/ - uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]; -} spi_seg_transaction_t; -#endif //#if SOC_SPI_SCT_SUPPORTED - typedef struct spi_device_t *spi_device_handle_t; ///< Handle for a device on a SPI bus /** * @brief Allocate a device on a SPI bus @@ -301,68 +259,6 @@ esp_err_t spi_device_get_trans_result(spi_device_handle_t handle, spi_transactio */ esp_err_t spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc); -#if SOC_SPI_SCT_SUPPORTED -/** - * @brief Enable/Disable Segmented-Configure-Transfer (SCT) mode - * - * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode - * - * @note This API isn't thread safe. Besides, after enabling this, current SPI host will be switched into SCT mode. - * Therefore, never call this API when in multiple threads, or when an SPI transaction is ongoing (on this SPI host). - * - * @param handle Device handle obtained using spi_host_add_dev - * @param enable True: to enable SCT mode; False: to disable SCT mode - * - * @return - * - ESP_OK: On success - * - ESP_ERR_INVALID_ARG: Invalid arguments - * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SPI internal Queue isn't empty, etc. - */ -esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool enable); - - -/** - * @brief Queue an SPI Segmented-Configure-Transaction (SCT) list for interrupt transaction execution. - * - * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode - * - * @note After calling this API, call `spi_device_get_segment_trans_result` to get the transaction results. - * - * @param handle Device handle obtained using spi_host_add_dev - * @param seg_trans_desc Pointer to the transaction segments list head (a one-segment-list is also acceptable) - * @param seg_num Segment number - * @param ticks_to_wait Ticks to wait until there's room in the queue; use portMAX_DELAY to never time out. - * - * @return - * - ESP_OK: On success - * - ESP_ERR_INVALID_ARG: Invalid arguments - * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SCT mode isn't enabled, DMA descriptors not enough, etc. - * - ESP_ERR_TIMEOUT: Timeout, this SCT transaction isn't queued successfully - */ -esp_err_t spi_device_queue_segment_trans(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num, TickType_t ticks_to_wait); - - -/** - * @brief Get the result of an SPI Segmented-Configure-Transaction (SCT). - * - * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode - * - * @note Until this API returns (with `ESP_OK`), you can now recycle the memory used for this SCT list (pointed by `seg_trans_desc`). - * You must maintain the SCT list related memory before this API returns, otherwise the SCT transaction may fail - * - * @param handle Device handle obtained using spi_host_add_dev - * @param[out] seg_trans_desc Pointer to the completed SCT list head (then you can recycle this list of memory). - * @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time out. - * - * @return - * - ESP_OK: On success - * - ESP_ERR_INVALID_ARG: Invalid arguments - * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: SCT mode isn't enabled, etc. - * - ESP_ERR_TIMEOUT: Timeout, didn't get a completed SCT transaction - */ -esp_err_t spi_device_get_segment_trans_result(spi_device_handle_t handle, spi_seg_transaction_t **seg_trans_desc, TickType_t ticks_to_wait); -#endif //#if SOC_SPI_SCT_SUPPORTED - /** * @brief Immediately start a polling transaction. * diff --git a/components/esp_driver_spi/include/esp_private/spi_master_internal.h b/components/esp_driver_spi/include/esp_private/spi_master_internal.h new file mode 100644 index 0000000000..99ecd5e6d7 --- /dev/null +++ b/components/esp_driver_spi/include/esp_private/spi_master_internal.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @brief + * This file contains SPI Master private/internal APIs. Private/Internal APIs are: + * - Visible to other IDF components + * - Suggest NOT to use these APIs in your applications + * - We don't provide backward compatibility, and safety on these APIs either + */ + +#pragma once + +#include "driver/spi_master.h" + +#if SOC_SPI_SCT_SUPPORTED +/** + * @Backgrounds: `SCT Mode` + * Segmented-Configure-Transfer Mode + * + * In this mode, you could pre-configure multiple SPI transactions. + * - These whole transaction is called one `Segmented-Configure-Transaction` or one `SCT`. + * - Each of the transactions in one `SCT` is called one `Segment`. + * + * Per segment can have different SPI phase configurations + */ + +/** + * SPI SCT Mode transaction flags + */ +#define SPI_MULTI_TRANS_PREP_LEN_UPDATED (1<<0) ///< Use `spi_multi_transaction_t: cs_ena_pretrans` in this segment. +#define SPI_MULTI_TRANS_CMD_LEN_UPDATED (1<<1) ///< Use `spi_multi_transaction_t: command_bits` in this segment. +#define SPI_MULTI_TRANS_ADDR_LEN_UPDATED (1<<2) ///< Use `spi_multi_transaction_t: address_bits` in this segment. +#define SPI_MULTI_TRANS_DUMMY_LEN_UPDATED (1<<3) ///< Use `spi_multi_transaction_t: dummy_bits` in this segment. +#define SPI_MULTI_TRANS_DONE_LEN_UPDATED (1<<4) ///< Use `spi_multi_transaction_t: cs_ena_posttrans` in this segment. + +/** + * This struct is for SPI SCT (Segmented-Configure-Transfer) Mode. + * + * By default, length of each SPI Phase will not change per segment. Each segment will use the phase length you set when `spi_bus_add_device()` + * However, you could force a segment to use its custom phase length. To achieve this, set the `SPI_SEG_TRANS_XX` flags, to customize phase length. + */ +typedef struct { + struct spi_transaction_t base; ///< Transaction data, so that pointer to spi_transaction_t can be converted into spi_multi_transaction_t + uint8_t cs_ena_pretrans; ///< Amount of SPI bit-cycles the cs should be activated before the transmission + uint8_t cs_ena_posttrans; ///< Amount of SPI bit-cycles the cs should stay active after the transmission + uint8_t command_bits; ///< The command length in this transaction, in bits. + uint8_t address_bits; ///< The address length in this transaction, in bits. + uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. + uint32_t sct_gap_len; ///< The len of CS inactive time between segments, in clocks. + uint32_t seg_trans_flags; ///< SCT specific flags. See `SPI_SEG_TRANS_XXX` macros. +} spi_multi_transaction_t; + +/** + * @brief Enable/Disable Segmented-Configure-Transfer (SCT) mode + * + * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode + * + * @note This API isn't thread safe. Besides, after enabling this, current SPI host will be switched into SCT mode. + * Therefore, never call this API when in multiple threads, or when an SPI transaction is ongoing (on this SPI host). + * + * @param handle Device handle obtained using spi_host_add_dev + * @param enable True: to enable SCT mode; False: to disable SCT mode + * + * @return + * - ESP_OK: On success + * - ESP_ERR_INVALID_ARG: Invalid arguments + * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SPI internal Queue isn't empty, etc. + */ +esp_err_t spi_bus_multi_trans_mode_enable(spi_device_handle_t handle, bool enable); + +/** + * @brief Queue an SPI Segmented-Configure-Transaction (SCT) list for interrupt transaction execution. + * + * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode + * + * @note After calling this API, call `spi_device_get_multi_trans_result` to get the transaction results. + * + * @param handle Device handle obtained using spi_host_add_dev + * @param seg_trans_desc Pointer to the transaction segments list head (a one-segment-list is also acceptable) + * @param trans_num Transaction number in this segment + * @param ticks_to_wait Ticks to wait until there's room in the queue; use portMAX_DELAY to never time out. + * + * @return + * - ESP_OK: On success + * - ESP_ERR_INVALID_ARG: Invalid arguments + * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SCT mode isn't enabled, DMA descriptors not enough, etc. + * - ESP_ERR_TIMEOUT: Timeout, this SCT transaction isn't queued successfully + */ +esp_err_t spi_device_queue_multi_trans(spi_device_handle_t handle, spi_multi_transaction_t *seg_trans_desc, uint32_t trans_num, TickType_t ticks_to_wait); + +/** + * @brief Get the result of an SPI Segmented-Configure-Transaction (SCT). + * + * Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode + * + * @note Until this API returns (with `ESP_OK`), you can now recycle the memory used for this SCT list (pointed by `seg_trans_desc`). + * You must maintain the SCT list related memory before this API returns, otherwise the SCT transaction may fail + * + * @param handle Device handle obtained using spi_host_add_dev + * @param[out] seg_trans_desc Pointer to the completed SCT list head (then you can recycle this list of memory). + * @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time out. + * + * @return + * - ESP_OK: On success + * - ESP_ERR_INVALID_ARG: Invalid arguments + * - ESP_ERR_INVALID_STATE: Invalid states, e.g.: SCT mode isn't enabled, etc. + * - ESP_ERR_TIMEOUT: Timeout, didn't get a completed SCT transaction + */ +esp_err_t spi_device_get_multi_trans_result(spi_device_handle_t handle, spi_multi_transaction_t **seg_trans_desc, TickType_t ticks_to_wait); +#endif //#if SOC_SPI_SCT_SUPPORTED diff --git a/components/esp_driver_spi/src/gpspi/spi_master.c b/components/esp_driver_spi/src/gpspi/spi_master.c index 0579b630be..3eaa65f369 100644 --- a/components/esp_driver_spi/src/gpspi/spi_master.c +++ b/components/esp_driver_spi/src/gpspi/spi_master.c @@ -114,6 +114,7 @@ We have two bits to control the interrupt: #include #include "esp_private/periph_ctrl.h" #include "esp_private/spi_common_internal.h" +#include "esp_private/spi_master_internal.h" #include "driver/spi_master.h" #include "esp_clk_tree.h" #include "clk_ctrl_os.h" @@ -137,21 +138,38 @@ typedef struct spi_device_t spi_device_t; /// struct to hold private transaction data (like tx and rx buffer for DMA). typedef struct { spi_transaction_t *trans; - const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is; + const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is; //otherwise sets to the original buffer or NULL if no buffer is assigned. - uint32_t *buffer_to_rcv; // similar to buffer_to_send - uint32_t dummy; //As we create the queue when in init, to use sct mode private descriptor as a queue item (when in sct mode), we need to add a dummy member here to keep the same size with `spi_sct_desc_priv_t`. + uint32_t *buffer_to_rcv; //similar to buffer_to_send +#if SOC_SPI_SCT_SUPPORTED + uint32_t reserved[2]; //As we create the queue when in init, to use sct mode private descriptor as a queue item (when in sct mode), we need to add a dummy member here to keep the same size with `spi_sct_trans_priv_t`. +#endif } spi_trans_priv_t; #if SOC_SPI_SCT_SUPPORTED //Type of dma descriptors that used under SPI SCT mode typedef struct { - lldesc_t *tx_seg_head; - lldesc_t *rx_seg_head; - spi_seg_transaction_t *sct_trans_desc_head; - uint16_t tx_used_desc_num; - uint16_t rx_used_desc_num; -} spi_sct_desc_priv_t; + spi_dma_desc_t *tx_seg_head; + spi_dma_desc_t *rx_seg_head; + spi_multi_transaction_t *sct_trans_desc_head; + uint32_t *sct_conf_buffer; + uint16_t tx_used_desc_num; + uint16_t rx_used_desc_num; +} spi_sct_trans_priv_t; +_Static_assert(sizeof(spi_trans_priv_t) == sizeof(spi_sct_trans_priv_t)); //size of spi_trans_priv_t must be the same as size of spi_sct_trans_priv_t + +typedef struct { + /* Segmented-Configure-Transfer required, configured by driver, don't touch */ + uint32_t tx_free_desc_num; + uint32_t rx_free_desc_num; + spi_dma_desc_t *cur_tx_seg_link; ///< Current TX DMA descriptor used for sct mode. + spi_dma_desc_t *cur_rx_seg_link; ///< Current RX DMA descriptor used for sct mode. + spi_dma_desc_t *tx_seg_link_tail; ///< Tail of the TX DMA descriptor link + spi_dma_desc_t *rx_seg_link_tail; ///< Tail of the RX DMA descriptor link +} spi_sct_desc_ctx_t; + +static void spi_hal_sct_tx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num); +static void spi_hal_sct_rx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num); #endif typedef struct { @@ -161,7 +179,8 @@ typedef struct { spi_hal_context_t hal; spi_trans_priv_t cur_trans_buf; #if SOC_SPI_SCT_SUPPORTED - spi_sct_desc_priv_t cur_sct_trans; + spi_sct_desc_ctx_t sct_desc_pool; + spi_sct_trans_priv_t cur_sct_trans; #endif int cur_cs; //current device doing transaction const spi_bus_attr_t* bus_attr; @@ -781,7 +800,7 @@ static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host) } #if SOC_SPI_SCT_SUPPORTED -static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_seg_transaction_t *trans_header, spi_hal_trans_config_t *hal_trans) +static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_multi_transaction_t *trans_header, spi_hal_trans_config_t *hal_trans) { spi_transaction_t *trans = &trans_header->base; @@ -796,7 +815,27 @@ static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_seg_transaction hal_trans->line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans->line_mode.data_lines : 1; } -static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_desc_priv_t *cur_sct_trans) +static void SPI_MASTER_ISR_ATTR s_sct_load_dma_link(spi_device_t *dev, spi_dma_desc_t *rx_seg_head, spi_dma_desc_t *tx_seg_head) +{ + spi_hal_context_t *hal = &dev->host->hal; + const spi_dma_ctx_t *dma_ctx = dev->host->dma_ctx; + + spi_hal_clear_intr_mask(hal, SPI_LL_INTR_SEG_DONE); + + if (rx_seg_head) { + spi_dma_reset(dma_ctx->rx_dma_chan); + spi_hal_hw_prepare_rx(hal->hw); + spi_dma_start(dma_ctx->rx_dma_chan, rx_seg_head); + } + + if (tx_seg_head) { + spi_dma_reset(dma_ctx->tx_dma_chan); + spi_hal_hw_prepare_tx(hal->hw); + spi_dma_start(dma_ctx->tx_dma_chan, tx_seg_head); + } +} + +static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_trans_priv_t *cur_sct_trans) { dev->host->cur_cs = dev->id; @@ -805,9 +844,9 @@ static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_des #if !CONFIG_IDF_TARGET_ESP32S2 // s2 update this seg_gap_clock_len by dma from conf_buffer - spi_hal_sct_set_conf_bits_len(&dev->host->hal, cur_sct_trans->sct_trans_desc_head->seg_gap_clock_len); + spi_hal_sct_set_conf_bits_len(&dev->host->hal, cur_sct_trans->sct_trans_desc_head->sct_gap_len); #endif - spi_hal_sct_load_dma_link(&dev->host->hal, cur_sct_trans->rx_seg_head, cur_sct_trans->tx_seg_head); + s_sct_load_dma_link(dev, cur_sct_trans->rx_seg_head, cur_sct_trans->tx_seg_head); if (dev->cfg.pre_cb) { dev->cfg.pre_cb((spi_transaction_t *)cur_sct_trans->sct_trans_desc_head); } @@ -822,9 +861,10 @@ static void SPI_MASTER_ISR_ATTR spi_post_sct_trans(spi_host_t *host) assert(host->cur_sct_trans.rx_used_desc_num == 0); } + free(host->cur_sct_trans.sct_conf_buffer); portENTER_CRITICAL_ISR(&host->spinlock); - spi_hal_sct_tx_dma_desc_recycle(&host->hal, host->cur_sct_trans.tx_used_desc_num); - spi_hal_sct_rx_dma_desc_recycle(&host->hal, host->cur_sct_trans.rx_used_desc_num); + spi_hal_sct_tx_dma_desc_recycle(&host->sct_desc_pool, host->cur_sct_trans.tx_used_desc_num); + spi_hal_sct_rx_dma_desc_recycle(&host->sct_desc_pool, host->cur_sct_trans.rx_used_desc_num); portEXIT_CRITICAL_ISR(&host->spinlock); if (host->device[host->cur_cs]->cfg.post_cb) { host->device[host->cur_cs]->cfg.post_cb((spi_transaction_t *)host->cur_sct_trans.sct_trans_desc_head); @@ -846,7 +886,7 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) #endif #if SOC_SPI_SCT_SUPPORTED - assert(spi_hal_usr_is_done(&host->hal) || spi_ll_get_intr(host->hal.hw, SPI_LL_INTR_SEG_DONE)); + assert(spi_hal_usr_is_done(&host->hal) || spi_hal_get_intr_mask(&host->hal, SPI_LL_INTR_SEG_DONE)); #else assert(spi_hal_usr_is_done(&host->hal)); #endif @@ -1414,12 +1454,155 @@ esp_err_t spi_bus_get_max_transaction_len(spi_host_device_t host_id, size_t *max } #if SOC_SPI_SCT_SUPPORTED + +/*----------------------------------------------------------- + * Below functions should be in the same spinlock + *-----------------------------------------------------------*/ +/*------------------------- + * TX + *------------------------*/ +static void SPI_MASTER_ISR_ATTR spi_hal_sct_tx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num) +{ + desc_ctx->tx_free_desc_num += recycle_num; +} + +static void s_sct_prepare_tx_seg(spi_sct_desc_ctx_t *desc_ctx, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head) +{ + HAL_ASSERT(desc_ctx->tx_free_desc_num >= 1 + lldesc_get_required_num(buf_len_bytes)); + const spi_dma_ctx_t *dma_ctx = __containerof(desc_ctx, spi_host_t, sct_desc_pool)->dma_ctx; + + *trans_head = desc_ctx->cur_tx_seg_link; + spicommon_dma_desc_setup_link(desc_ctx->cur_tx_seg_link, conf_buffer, SOC_SPI_SCT_BUFFER_NUM_MAX * 4, false); + spi_dma_desc_t *conf_buffer_link = desc_ctx->cur_tx_seg_link; + desc_ctx->tx_free_desc_num -= 1; + + desc_ctx->tx_seg_link_tail = desc_ctx->cur_tx_seg_link; + desc_ctx->cur_tx_seg_link++; + if (desc_ctx->cur_tx_seg_link == dma_ctx->dmadesc_tx + dma_ctx->dma_desc_num) { + //As there is enough space, so we simply point this to the pool head + desc_ctx->cur_tx_seg_link = dma_ctx->dmadesc_tx; + } + + if (send_buffer && buf_len_bytes) { + spicommon_dma_desc_setup_link(desc_ctx->cur_tx_seg_link, send_buffer, buf_len_bytes, false); + conf_buffer_link->next = desc_ctx->cur_tx_seg_link; + for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) { + desc_ctx->tx_seg_link_tail = desc_ctx->cur_tx_seg_link; + desc_ctx->cur_tx_seg_link++; + if (desc_ctx->cur_tx_seg_link == dma_ctx->dmadesc_tx + dma_ctx->dma_desc_num) { + //As there is enough space, so we simply point this to the pool head + desc_ctx->cur_tx_seg_link = dma_ctx->dmadesc_tx; + } + } + desc_ctx->tx_free_desc_num -= lldesc_get_required_num(buf_len_bytes); + } +} + +static esp_err_t spi_hal_sct_new_tx_dma_desc_head(spi_sct_desc_ctx_t *desc_ctx, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head, uint32_t *used_desc_num) +{ + //1 desc for the conf_buffer, other for data. + if (desc_ctx->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) { + return ESP_ERR_NO_MEM; + } + + s_sct_prepare_tx_seg(desc_ctx, conf_buffer, send_buffer, buf_len_bytes, trans_head); + *used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes); + + return ESP_OK; +} + +static esp_err_t spi_hal_sct_link_tx_seg_dma_desc(spi_sct_desc_ctx_t *desc_ctx, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num) +{ + //1 desc for the conf_buffer, other for data. + if (desc_ctx->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) { + return ESP_ERR_NO_MEM; + } + + if (desc_ctx->tx_seg_link_tail) { + //Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail. + desc_ctx->tx_seg_link_tail->next = desc_ctx->cur_tx_seg_link; + } + + spi_dma_desc_t *internal_head = NULL; + s_sct_prepare_tx_seg(desc_ctx, conf_buffer, send_buffer, buf_len_bytes, &internal_head); + *used_desc_num += 1 + lldesc_get_required_num(buf_len_bytes); + + return ESP_OK; +} + +// /*------------------------- +// * RX +// *------------------------*/ +static void SPI_MASTER_ISR_ATTR spi_hal_sct_rx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num) +{ + desc_ctx->rx_free_desc_num += recycle_num; +} + +static void s_sct_prepare_rx_seg(spi_sct_desc_ctx_t *desc_ctx, const void *recv_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head) +{ + HAL_ASSERT(desc_ctx->rx_free_desc_num >= lldesc_get_required_num(buf_len_bytes)); + const spi_dma_ctx_t *dma_ctx = __containerof(desc_ctx, spi_host_t, sct_desc_pool)->dma_ctx; + + *trans_head = desc_ctx->cur_rx_seg_link; + spicommon_dma_desc_setup_link(desc_ctx->cur_rx_seg_link, recv_buffer, buf_len_bytes, true); + for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) { + desc_ctx->rx_seg_link_tail = desc_ctx->cur_rx_seg_link; + desc_ctx->cur_rx_seg_link++; + if (desc_ctx->cur_rx_seg_link == dma_ctx->dmadesc_rx + dma_ctx->dma_desc_num) { + //As there is enough space, so we simply point this to the pool head + desc_ctx->cur_rx_seg_link = dma_ctx->dmadesc_rx; + } + } + + desc_ctx->rx_free_desc_num -= lldesc_get_required_num(buf_len_bytes); +} + +static esp_err_t spi_hal_sct_new_rx_dma_desc_head(spi_sct_desc_ctx_t *desc_ctx, const void *recv_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head, uint32_t *used_desc_num) +{ + if (desc_ctx->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) { + return ESP_ERR_NO_MEM; + } + + s_sct_prepare_rx_seg(desc_ctx, recv_buffer, buf_len_bytes, trans_head); + *used_desc_num = lldesc_get_required_num(buf_len_bytes); + + return ESP_OK; +} + +static esp_err_t spi_hal_sct_link_rx_seg_dma_desc(spi_sct_desc_ctx_t *desc_ctx, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num) +{ + if (desc_ctx->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) { + return ESP_ERR_NO_MEM; + } + + if (desc_ctx->rx_seg_link_tail) { + //Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail. + desc_ctx->rx_seg_link_tail->next = desc_ctx->cur_rx_seg_link; + } + + spi_dma_desc_t *internal_head = NULL; + s_sct_prepare_rx_seg(desc_ctx, recv_buffer, buf_len_bytes, &internal_head); + *used_desc_num += lldesc_get_required_num(buf_len_bytes); + + return ESP_OK; +} + +static void s_spi_sct_reset_dma_pool(const spi_dma_ctx_t *dma_ctx, spi_sct_desc_ctx_t *sct_desc_pool) +{ + sct_desc_pool->tx_free_desc_num = dma_ctx->dma_desc_num; + sct_desc_pool->rx_free_desc_num = dma_ctx->dma_desc_num; + sct_desc_pool->cur_tx_seg_link = dma_ctx->dmadesc_tx; + sct_desc_pool->cur_rx_seg_link = dma_ctx->dmadesc_rx; + sct_desc_pool->tx_seg_link_tail = NULL; + sct_desc_pool->rx_seg_link_tail = NULL; +} + /** * This function will turn this host into SCT (segmented-configure-transfer) mode. * * No concurrency guarantee, if a transaction is ongoing, calling this will lead to wrong transaction */ -esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool enable) +esp_err_t spi_bus_multi_trans_mode_enable(spi_device_handle_t handle, bool enable) { SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); @@ -1440,11 +1623,11 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena }; spi_host_t *host = handle->host; - spi_trans_priv_t trans_buf; + spi_trans_priv_t trans_buf = { .trans = &fake_trans }; spi_hal_context_t *hal = &handle->host->hal; spi_hal_dev_config_t *hal_dev = &handle->hal_dev; //As we know the `fake_trans` are internal, so no need to `uninstall_priv_desc` - ret = setup_priv_desc(&fake_trans, &trans_buf, (host->bus_attr->dma_enabled)); + ret = setup_priv_desc(host, &trans_buf); if (ret != ESP_OK) { return ret; } @@ -1457,9 +1640,10 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena #if CONFIG_IDF_TARGET_ESP32S2 // conf_base need ensure transaction gap len more than about 2us under different freq. // conf_base only configurable on s2. - spi_hal_sct_setup_conf_base(hal, handle->real_clk_freq_hz/600000); + spi_hal_sct_setup_conf_base(hal, handle->hal_dev.timing_conf.real_freq / 600000); #endif + s_spi_sct_reset_dma_pool(host->dma_ctx, &host->sct_desc_pool); spi_hal_sct_init(hal); } else { spi_hal_sct_deinit(&handle->host->hal); @@ -1470,26 +1654,26 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena return ESP_OK; } -static void SPI_MASTER_ATTR s_sct_init_conf_buffer(spi_hal_context_t *hal, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num) +static void SPI_MASTER_ATTR s_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t *buffer, uint32_t trans_num) { // read from HW need waiting for slower APB clock domain return data, loop to contact slow clock domain will waste time. // use one imagen then copied by cpu instead. uint32_t conf_buffer_img[SOC_SPI_SCT_BUFFER_NUM_MAX]; spi_hal_sct_init_conf_buffer(hal, conf_buffer_img); - for (int i = 0; i < seg_num; i++) { - memcpy(seg_trans_desc[i].conf_buffer, conf_buffer_img, sizeof(conf_buffer_img)); + for (int i = 0; i < trans_num; i++) { + memcpy(&buffer[i * SOC_SPI_SCT_BUFFER_NUM_MAX], conf_buffer_img, sizeof(conf_buffer_img)); } } -static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, bool seg_end) +static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, spi_multi_transaction_t *seg_trans_desc, uint32_t *buffer, bool seg_end) { spi_hal_context_t *hal = &handle->host->hal; spi_hal_dev_config_t *hal_dev = &handle->hal_dev; spi_hal_seg_config_t seg_config = {}; //prep - if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_PREP_LEN_UPDATED) { + if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_PREP_LEN_UPDATED) { seg_config.cs_setup = seg_trans_desc->cs_ena_pretrans; } else { seg_config.cs_setup = handle->cfg.cs_ena_pretrans; @@ -1497,7 +1681,7 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, //cmd seg_config.cmd = seg_trans_desc->base.cmd; - if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_CMD_LEN_UPDATED) { + if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_CMD_LEN_UPDATED) { seg_config.cmd_bits = seg_trans_desc->command_bits; } else { seg_config.cmd_bits = handle->cfg.command_bits; @@ -1505,14 +1689,14 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, //addr seg_config.addr = seg_trans_desc->base.addr; - if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_ADDR_LEN_UPDATED) { + if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_ADDR_LEN_UPDATED) { seg_config.addr_bits = seg_trans_desc->address_bits; } else { seg_config.addr_bits = handle->cfg.address_bits; } //dummy - if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_DUMMY_LEN_UPDATED) { + if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_DUMMY_LEN_UPDATED) { seg_config.dummy_bits = seg_trans_desc->dummy_bits; } else { seg_config.dummy_bits = handle->cfg.dummy_bits; @@ -1525,7 +1709,7 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, seg_config.rx_bitlen = seg_trans_desc->base.rxlength; //done - if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_DONE_LEN_UPDATED) { + if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_DONE_LEN_UPDATED) { seg_config.cs_hold = seg_trans_desc->cs_ena_posttrans; } else { seg_config.cs_hold = handle->cfg.cs_ena_posttrans; @@ -1535,21 +1719,25 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, if (seg_end) { seg_config.seg_end = true; } - seg_config.seg_gap_len = seg_trans_desc->seg_gap_clock_len; + seg_config.seg_gap_len = seg_trans_desc->sct_gap_len; - // set line mode or ... + // set line mode to hal_config spi_sct_set_hal_trans_config(seg_trans_desc, &hal->trans_config); - spi_hal_sct_format_conf_buffer(hal, &seg_config, hal_dev, seg_trans_desc->conf_buffer); + spi_hal_sct_format_conf_buffer(hal, &seg_config, hal_dev, buffer); } -esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num, TickType_t ticks_to_wait) +esp_err_t SPI_MASTER_ATTR spi_device_queue_multi_trans(spi_device_handle_t handle, spi_multi_transaction_t *seg_trans_desc, uint32_t trans_num, TickType_t ticks_to_wait) { SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE); esp_err_t ret = ESP_OK; - for (int i = 0; i < seg_num; i++) { + uint16_t alignment = handle->host->bus_attr->internal_mem_align_size; + uint32_t *conf_buffer = heap_caps_aligned_alloc(alignment, (trans_num * SOC_SPI_SCT_BUFFER_NUM_MAX * sizeof(uint32_t)), MALLOC_CAP_DMA); + SPI_CHECK(conf_buffer, "No enough memory", ESP_ERR_NO_MEM); + + for (int i = 0; i < trans_num; i++) { ret = check_trans_valid(handle, (spi_transaction_t *)&seg_trans_desc[i]); if (ret != ESP_OK) { return ret; @@ -1558,53 +1746,53 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t han SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); spi_hal_context_t *hal = &handle->host->hal; - s_sct_init_conf_buffer(hal, seg_trans_desc, seg_num); + s_sct_init_conf_buffer(hal, conf_buffer, trans_num); - spi_hal_dma_desc_status_t dma_desc_status = SPI_HAL_DMA_DESC_NULL; - lldesc_t *tx_seg_head = NULL; + static esp_err_t dma_desc_status = ESP_FAIL; + spi_dma_desc_t *tx_seg_head = NULL; uint32_t tx_used_dma_desc_num = 0; uint32_t tx_buf_len = 0; - lldesc_t *rx_seg_head = NULL; + spi_dma_desc_t *rx_seg_head = NULL; uint32_t rx_used_dma_desc_num = 0; uint32_t rx_buf_len = 0; /*--------------Get segment head--------------*/ - s_sct_format_conf_buffer(handle, &seg_trans_desc[0], (seg_num == 1)); + s_sct_format_conf_buffer(handle, &seg_trans_desc[0], conf_buffer, (trans_num == 1)); //TX tx_buf_len = (seg_trans_desc[0].base.length + 8 - 1) / 8; portENTER_CRITICAL(&handle->host->spinlock); - dma_desc_status = spi_hal_sct_new_tx_dma_desc_head(hal, seg_trans_desc[0].conf_buffer, seg_trans_desc[0].base.tx_buffer, tx_buf_len, &tx_seg_head, &tx_used_dma_desc_num); + dma_desc_status = spi_hal_sct_new_tx_dma_desc_head(&handle->host->sct_desc_pool, conf_buffer, seg_trans_desc[0].base.tx_buffer, tx_buf_len, &tx_seg_head, &tx_used_dma_desc_num); portEXIT_CRITICAL(&handle->host->spinlock); - SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); + SPI_CHECK(dma_desc_status == ESP_OK, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); //RX //This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength` rx_buf_len = (seg_trans_desc[0].base.rxlength + 8 - 1) / 8; if (seg_trans_desc[0].base.rx_buffer) { portENTER_CRITICAL(&handle->host->spinlock); - dma_desc_status = spi_hal_sct_new_rx_dma_desc_head(hal, seg_trans_desc[0].base.rx_buffer, rx_buf_len, &rx_seg_head, &rx_used_dma_desc_num); + dma_desc_status = spi_hal_sct_new_rx_dma_desc_head(&handle->host->sct_desc_pool, seg_trans_desc[0].base.rx_buffer, rx_buf_len, &rx_seg_head, &rx_used_dma_desc_num); portEXIT_CRITICAL(&handle->host->spinlock); - SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); + SPI_CHECK(dma_desc_status == ESP_OK, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); } /*--------------Prepare other segments--------------*/ - for (int i = 1; i < seg_num; i++) { - s_sct_format_conf_buffer(handle, &seg_trans_desc[i], (i == (seg_num - 1))); + for (int i = 1; i < trans_num; i++) { + s_sct_format_conf_buffer(handle, &seg_trans_desc[i], &conf_buffer[i * SOC_SPI_SCT_BUFFER_NUM_MAX], (i == (trans_num - 1))); //TX tx_buf_len = (seg_trans_desc[i].base.length + 8 - 1) / 8; portENTER_CRITICAL(&handle->host->spinlock); - dma_desc_status = spi_hal_sct_link_tx_seg_dma_desc(hal, seg_trans_desc[i].conf_buffer, seg_trans_desc[i].base.tx_buffer, tx_buf_len, &tx_used_dma_desc_num); + dma_desc_status = spi_hal_sct_link_tx_seg_dma_desc(&handle->host->sct_desc_pool, &conf_buffer[i * SOC_SPI_SCT_BUFFER_NUM_MAX], seg_trans_desc[i].base.tx_buffer, tx_buf_len, &tx_used_dma_desc_num); portEXIT_CRITICAL(&handle->host->spinlock); - SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); + SPI_CHECK(dma_desc_status == ESP_OK, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); //RX if (seg_trans_desc[i].base.rx_buffer) { //This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength` rx_buf_len = (seg_trans_desc[i].base.rxlength + 8 - 1) / 8; portENTER_CRITICAL(&handle->host->spinlock); - dma_desc_status = spi_hal_sct_link_rx_seg_dma_desc(hal, seg_trans_desc[i].base.rx_buffer, rx_buf_len, &rx_used_dma_desc_num); + dma_desc_status = spi_hal_sct_link_rx_seg_dma_desc(&handle->host->sct_desc_pool, seg_trans_desc[i].base.rx_buffer, rx_buf_len, &rx_used_dma_desc_num); portEXIT_CRITICAL(&handle->host->spinlock); } } @@ -1613,10 +1801,11 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t han esp_pm_lock_acquire(handle->host->bus_attr->pm_lock); #endif - spi_sct_desc_priv_t sct_desc = { + spi_sct_trans_priv_t sct_desc = { .tx_seg_head = tx_seg_head, .rx_seg_head = rx_seg_head, .sct_trans_desc_head = seg_trans_desc, + .sct_conf_buffer = conf_buffer, .tx_used_desc_num = tx_used_dma_desc_num, .rx_used_desc_num = rx_used_dma_desc_num, }; @@ -1639,12 +1828,12 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t han return ESP_OK; } -esp_err_t SPI_MASTER_ATTR spi_device_get_segment_trans_result(spi_device_handle_t handle, spi_seg_transaction_t **seg_trans_desc, TickType_t ticks_to_wait) +esp_err_t SPI_MASTER_ATTR spi_device_get_multi_trans_result(spi_device_handle_t handle, spi_multi_transaction_t **seg_trans_desc, TickType_t ticks_to_wait) { SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE); - spi_sct_desc_priv_t sct_desc = {}; + spi_sct_trans_priv_t sct_desc = {}; BaseType_t r = xQueueReceive(handle->ret_queue, (void *)&sct_desc, ticks_to_wait); if (!r) { diff --git a/components/esp_driver_spi/test_apps/components/spi_bench_mark/include/spi_performance.h b/components/esp_driver_spi/test_apps/components/spi_bench_mark/include/spi_performance.h index 7636ef0014..4e1dc8b1f3 100644 --- a/components/esp_driver_spi/test_apps/components/spi_bench_mark/include/spi_performance.h +++ b/components/esp_driver_spi/test_apps/components/spi_bench_mark/include/spi_performance.h @@ -44,7 +44,7 @@ #if !CONFIG_FREERTOS_SMP // IDF-5223 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 15 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING_NO_DMA 15 -#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 32 +#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 33 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING_NO_DMA 30 #else #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 17 @@ -55,7 +55,7 @@ #elif CONFIG_IDF_TARGET_ESP32C6 #define IDF_PERFORMANCE_MAX_SPI_CLK_FREQ 26*1000*1000 -#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 34 +#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 35 //TODO: IDF-9551, check perform #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 17 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING_NO_DMA 32 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING_NO_DMA 15 diff --git a/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt b/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt index e72a32b797..2e2699ee16 100644 --- a/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt +++ b/components/esp_driver_spi/test_apps/master/main/CMakeLists.txt @@ -4,9 +4,14 @@ set(srcs "test_spi_master.c" "test_spi_sio.c" "test_spi_bus_lock.c" - "test_spi_master_sct.c" ) +# sct test using slave hd APIs, need slave hd support +# tmp skip sct test under iram_safe, both sct and slave hd are not cleaned +if(CONFIG_SOC_SPI_SUPPORT_SLAVE_HD_VER2 AND NOT CONFIG_COMPILER_DUMP_RTL_FILES) + list(APPEND srcs "test_spi_master_sct.c") +endif() + # In order for the cases defined by `TEST_CASE` to be linked into the final elf, # the component can be registered as WHOLE_ARCHIVE idf_component_register( diff --git a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c b/components/esp_driver_spi/test_apps/master/main/test_spi_master_sct.c similarity index 83% rename from components/driver/test_apps/spi/master/main/test_spi_master_sct.c rename to components/esp_driver_spi/test_apps/master/main/test_spi_master_sct.c index 6f317d31ab..fad0d54288 100644 --- a/components/driver/test_apps/spi/master/main/test_spi_master_sct.c +++ b/components/esp_driver_spi/test_apps/master/main/test_spi_master_sct.c @@ -14,12 +14,12 @@ #include "test_utils.h" #include "esp_heap_caps.h" #include "driver/spi_master.h" +#include "esp_private/spi_master_internal.h" #include "driver/spi_slave_hd.h" #include "driver/spi_slave.h" #include "soc/spi_pins.h" #include "test_spi_utils.h" - __attribute__((unused)) static const char *TAG = "SCT"; #if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED) @@ -62,18 +62,18 @@ static void hd_master(void) uint8_t *master_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint32_t master_rx_val = 0; uint8_t *slave_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - get_tx_buffer(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); + test_fill_random_to_buffers_dualboard(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); - spi_seg_transaction_t *ret_seg_trans = NULL; + spi_multi_transaction_t *ret_seg_trans = NULL; //---------------------Master TX---------------------------// - spi_seg_transaction_t tx_seg_trans[TEST_HD_TIMES] = { + spi_multi_transaction_t tx_seg_trans[TEST_HD_TIMES] = { { .base = { .cmd = 0x1, .addr = TEST_HD_BUF_0_ID, .length = 4 * 8, - .tx_buffer = (uint8_t *)&master_tx_val, + .tx_buffer = (uint8_t *) &master_tx_val, }, }, //TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment @@ -84,7 +84,7 @@ static void hd_master(void) .tx_buffer = master_tx_buf, }, .dummy_bits = 8, - .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED, }, { .base = { @@ -93,7 +93,7 @@ static void hd_master(void) .tx_buffer = master_tx_buf + TEST_HD_DATA_LEN_PER_SEG, }, .dummy_bits = 8, - .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED, }, { .base = { @@ -102,23 +102,22 @@ static void hd_master(void) }, }; - TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); + TEST_ESP_OK(spi_bus_multi_trans_mode_enable(handle, true)); unity_wait_for_signal("Slave ready"); - TEST_ESP_OK(spi_device_queue_segment_trans(handle, tx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); - TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); + TEST_ESP_OK(spi_device_queue_multi_trans(handle, tx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); + TEST_ESP_OK(spi_device_get_multi_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); TEST_ASSERT(ret_seg_trans == tx_seg_trans); ESP_LOG_BUFFER_HEX("Master tx", master_tx_buf, TEST_HD_DATA_LEN); - TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, false)); - + TEST_ESP_OK(spi_bus_multi_trans_mode_enable(handle, false)); //---------------------Master RX---------------------------// - spi_seg_transaction_t rx_seg_trans[TEST_HD_TIMES] = { + spi_multi_transaction_t rx_seg_trans[TEST_HD_TIMES] = { { .base = { .cmd = 0x2, .addr = TEST_HD_BUF_1_ID, .rxlength = 4 * 8, - .rx_buffer = (uint8_t *)&master_rx_val, + .rx_buffer = (uint8_t *) &master_rx_val, }, }, // TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment @@ -129,7 +128,7 @@ static void hd_master(void) .rx_buffer = master_rx_buf, }, .dummy_bits = 8, - .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED, }, { .base = { @@ -138,7 +137,7 @@ static void hd_master(void) .rx_buffer = master_rx_buf + TEST_HD_DATA_LEN_PER_SEG, }, .dummy_bits = 8, - .seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, + .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED, }, { .base = { @@ -146,11 +145,11 @@ static void hd_master(void) } }, }; - TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); + TEST_ESP_OK(spi_bus_multi_trans_mode_enable(handle, true)); unity_wait_for_signal("Slave ready"); - TEST_ESP_OK(spi_device_queue_segment_trans(handle, rx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); - TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); + TEST_ESP_OK(spi_device_queue_multi_trans(handle, rx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); + TEST_ESP_OK(spi_device_get_multi_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); TEST_ASSERT(ret_seg_trans == rx_seg_trans); ESP_LOGI("Master", "Slave Reg[%d] value is: 0x%" PRIx32, TEST_HD_BUF_1_ID, master_rx_val); @@ -184,7 +183,7 @@ static void hd_slave(void) uint8_t *slave_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint32_t slave_rx_val = 0; uint8_t *master_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - get_tx_buffer(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); + test_fill_random_to_buffers_dualboard(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); unity_wait_for_signal("Master ready"); @@ -194,7 +193,7 @@ static void hd_slave(void) .len = TEST_HD_DATA_LEN, }; TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_RX, &slave_rx_trans, portMAX_DELAY)); - unity_send_signal("slave ready"); + unity_send_signal("Slave ready"); TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY)); TEST_ASSERT(ret_trans == &slave_rx_trans); @@ -212,7 +211,7 @@ static void hd_slave(void) .len = TEST_HD_DATA_LEN, }; TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_TX, &slave_tx_trans, portMAX_DELAY)); - unity_send_signal("slave ready"); + unity_send_signal("Slave ready"); TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY)); TEST_ASSERT(ret_trans == &slave_tx_trans); ESP_LOG_BUFFER_HEX("Slave tx", slave_tx_buf, TEST_HD_DATA_LEN); diff --git a/components/hal/esp32s2/include/hal/spi_ll.h b/components/hal/esp32s2/include/hal/spi_ll.h index 1bd5aa058a..5a7dc9ee2f 100644 --- a/components/hal/esp32s2/include/hal/spi_ll.h +++ b/components/hal/esp32s2/include/hal/spi_ll.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -1533,6 +1533,19 @@ static inline void spi_ll_set_conf_base_bitslen(spi_dev_t *hw, uint8_t conf_base } } +/** + * Set conf phase bits len to HW for segment config trans mode. + * + * @param hw Beginning address of the peripheral registers. + * @param conf_bitlen Value of field conf_bitslen in cmd reg. + */ +static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen) +{ + if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) { + hw->cmd.conf_bitlen = conf_bitlen; + } +} + /** * Set conf phase bits len to config buffer for segment config trans mode. * diff --git a/components/hal/include/hal/spi_hal.h b/components/hal/include/hal/spi_hal.h index c9e5e3017b..41f32a5f52 100644 --- a/components/hal/include/hal/spi_hal.h +++ b/components/hal/include/hal/spi_hal.h @@ -48,15 +48,6 @@ typedef dma_descriptor_align4_t spi_dma_desc_t; typedef dma_descriptor_align8_t spi_dma_desc_t; #endif -/** - * @brief Enum for DMA descriptor status - */ -typedef enum spi_hal_dma_desc_status_t { - SPI_HAL_DMA_DESC_NULL = 0, ///< Null descriptos - SPI_HAL_DMA_DESC_RUN_OUT = 1, ///< DMA descriptors are not enough for data - SPI_HAL_DMA_DESC_LINKED = 2, ///< DMA descriptors are linked successfully -} spi_hal_dma_desc_status_t; - /** * Input parameters to the ``spi_hal_cal_clock_conf`` to calculate the timing configuration */ @@ -112,17 +103,6 @@ typedef struct { /* Configured by driver at initialization, don't touch */ spi_dev_t *hw; ///< Beginning address of the peripheral registers. bool dma_enabled; ///< Whether the DMA is enabled, do not update after initialization - -#if SOC_SPI_SCT_SUPPORTED - /* Segmented-Configure-Transfer required, configured by driver, don't touch */ - uint32_t tx_free_desc_num; - uint32_t rx_free_desc_num; - lldesc_t *cur_tx_seg_link; ///< Current TX DMA descriptor used for sct mode. - lldesc_t *cur_rx_seg_link; ///< Current RX DMA descriptor used for sct mode. - lldesc_t *tx_seg_link_tail; ///< Tail of the TX DMA descriptor link - lldesc_t *rx_seg_link_tail; ///< Tail of the RX DMA descriptor link -#endif //#if SOC_SPI_SCT_SUPPORTED - /* Internal parameters, don't touch */ spi_hal_trans_config_t trans_config; ///< Transaction configuration } spi_hal_context_t; @@ -341,92 +321,6 @@ void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[S */ void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_config_t *config, const spi_hal_dev_config_t *dev, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]); -/** - * Format tx dma descriptor(s) for a SCT head - * - * @param hal Context of the HAL layer. - * @param conf_buffer Conf buffer - * @param send_buffer TX buffer - * @param buf_len_bytes TX buffer length, in bytes - * @param[out] trans_head SCT dma descriptor head - * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used - * - * @return - * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together - * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) - */ -spi_hal_dma_desc_status_t spi_hal_sct_new_tx_dma_desc_head(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num); - -/** - * Format tx dma descriptor(s) for a segment, and linked it to its previous segment - * - * @param hal Context of the HAL layer. - * @param conf_buffer Conf buffer - * @param send_buffer TX buffer - * @param buf_len_bytes TX buffer length, in bytes - * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used - * - * @return - * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together - * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) - */ -spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num); - -/** - * Recycle used tx dma descriptors (back to available state, NOT a memory free) - * - * @param hal Context of the HAL layer. - * @param recycle_num Number of the to-be-recycled descriptors - */ -void spi_hal_sct_tx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num); - -/** - * Format rx dma descriptor(s) for a SCT head - * - * @param hal Context of the HAL layer. - * @param recv_buffer RX buffer - * @param buf_len_bytes RX buffer length, in bytes - * @param[out] trans_head SCT dma descriptor head - * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used - * - * @return - * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together - * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) - */ -spi_hal_dma_desc_status_t spi_hal_sct_new_rx_dma_desc_head(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num); - -/** - * Format rx dma descriptor(s) for a segment, and linked it to its previous segment - * - * @param hal Context of the HAL layer. - * @param send_buffer RX buffer - * @param buf_len_bytes RX buffer length, in bytes - * @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used - * - * @return - * - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together - * - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`) - */ -spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num); - -/** - * Recycle used rx dma descriptors (back to available state, NOT a memory free) - * - * @param hal Context of the HAL layer. - * @param recycle_num Number of the to-be-recycled descriptors - */ -void spi_hal_sct_rx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num); - -/** - * Load dma descriptors to dma - * Will do nothing to TX or RX dma, when `tx_seg_head` or `rx_seg_head` is NULL - * - * @param hal Context of the HAL layer. - * @param rx_seg_head Head of the SCT RX dma descriptors - * @param tx_seg_head Head of the SCT TX dma descriptors - */ -void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head); - /** * Deinit SCT mode related registers and hal states */ @@ -435,13 +329,22 @@ void spi_hal_sct_deinit(spi_hal_context_t *hal); /** * Set conf_bitslen to HW for sct. */ -#define spi_hal_sct_set_conf_bits_len(hal, conf_len) spi_ll_set_conf_phase_bits_len((hal)->hw, conf_len) +void spi_hal_sct_set_conf_bits_len(spi_hal_context_t *hal, uint32_t conf_len); + +/** + * Clear SPI interrupt bits by mask + */ +void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask); + +/** + * Get SPI interrupt bits status by mask + */ +bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask); /** * Set conf_bitslen base to HW for sct, only supported on s2. */ #define spi_hal_sct_setup_conf_base(hal, conf_base) spi_ll_set_conf_base_bitslen((hal)->hw, conf_base) - #endif //#if SOC_SPI_SCT_SUPPORTED #endif //#if SOC_GPSPI_SUPPORTED diff --git a/components/hal/spi_hal.c b/components/hal/spi_hal.c index eab8594829..8d1dca9006 100644 --- a/components/hal/spi_hal.c +++ b/components/hal/spi_hal.c @@ -53,19 +53,8 @@ void spi_hal_deinit(spi_hal_context_t *hal) } #if SOC_SPI_SCT_SUPPORTED -static void s_sct_reset_dma_link(spi_hal_context_t *hal) -{ - hal->tx_free_desc_num = hal->dmadesc_n; - hal->rx_free_desc_num = hal->dmadesc_n; - hal->cur_tx_seg_link = hal->dmadesc_tx; - hal->cur_rx_seg_link = hal->dmadesc_rx; - hal->tx_seg_link_tail = NULL; - hal->rx_seg_link_tail = NULL; -} - void spi_hal_sct_init(spi_hal_context_t *hal) { - s_sct_reset_dma_link(hal); spi_ll_conf_state_enable(hal->hw, true); spi_ll_set_magic_number(hal->hw, SPI_LL_SCT_MAGIC_NUMBER); spi_ll_disable_int(hal->hw); //trans_done intr enabled in `add device` phase, sct mode shoud use sct_trans_done only diff --git a/components/hal/spi_hal_iram.c b/components/hal/spi_hal_iram.c index c19d02be0c..59e380b583 100644 --- a/components/hal/spi_hal_iram.c +++ b/components/hal/spi_hal_iram.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -167,6 +167,18 @@ void spi_hal_fetch_result(const spi_hal_context_t *hal) /*------------------------------------------------------------------------------ * Segmented-Configure-Transfer *----------------------------------------------------------------------------*/ +void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask) { + spi_ll_clear_intr(hal->hw, mask); +} + +bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask) { + return spi_ll_get_intr(hal->hw, mask); +} + +void spi_hal_sct_set_conf_bits_len(spi_hal_context_t *hal, uint32_t conf_len) { + spi_ll_set_conf_phase_bits_len(hal->hw, conf_len); +} + void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) { spi_ll_init_conf_buffer(hal->hw, conf_buffer); @@ -189,155 +201,4 @@ void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_co #endif } -void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head) -{ - spi_ll_clear_intr(hal->hw, SPI_LL_INTR_SEG_DONE); - - HAL_ASSERT(hal->dma_enabled); - if (rx_seg_head) { - spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan); - spi_ll_dma_rx_fifo_reset(hal->hw); - spi_ll_infifo_full_clr(hal->hw); - spi_ll_dma_rx_enable(hal->hw, 1); - spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, rx_seg_head); - } - - if (tx_seg_head) { - spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan); - spi_ll_dma_tx_fifo_reset(hal->hw); - spi_ll_outfifo_empty_clr(hal->hw); - spi_ll_dma_tx_enable(hal->hw, 1); - spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, tx_seg_head); - } -} - -/*----------------------------------------------------------- - * Below hal functions should be in the same spinlock - *-----------------------------------------------------------*/ -/*------------------------- - * TX - *------------------------*/ -void spi_hal_sct_tx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num) -{ - hal->tx_free_desc_num += recycle_num; -} - -static void s_sct_prepare_tx_seg(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head) -{ - HAL_ASSERT(hal->tx_free_desc_num >= 1 + lldesc_get_required_num(buf_len_bytes)); - - *trans_head = hal->cur_tx_seg_link; - lldesc_setup_link(hal->cur_tx_seg_link, conf_buffer, SOC_SPI_SCT_BUFFER_NUM_MAX * 4, false); - lldesc_t *conf_buffer_link = hal->cur_tx_seg_link; - hal->tx_free_desc_num -= 1; - - hal->tx_seg_link_tail = hal->cur_tx_seg_link; - hal->cur_tx_seg_link++; - if (hal->cur_tx_seg_link == hal->dmadesc_tx + hal->dmadesc_n) { - //As there is enough space, so we simply point this to the pool head - hal->cur_tx_seg_link = hal->dmadesc_tx; - } - - if(send_buffer && buf_len_bytes) { - lldesc_setup_link(hal->cur_tx_seg_link, send_buffer, buf_len_bytes, false); - STAILQ_NEXT(conf_buffer_link, qe) = hal->cur_tx_seg_link; - for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) { - hal->tx_seg_link_tail = hal->cur_tx_seg_link; - hal->cur_tx_seg_link++; - if (hal->cur_tx_seg_link == hal->dmadesc_tx + hal->dmadesc_n) { - //As there is enough space, so we simply point this to the pool head - hal->cur_tx_seg_link = hal->dmadesc_tx; - } - } - hal->tx_free_desc_num -= lldesc_get_required_num(buf_len_bytes); - } -} - -spi_hal_dma_desc_status_t spi_hal_sct_new_tx_dma_desc_head(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num) -{ - //1 desc for the conf_buffer, other for data. - if (hal->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) { - return SPI_HAL_DMA_DESC_RUN_OUT; - } - - s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, trans_head); - *used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes); - - return SPI_HAL_DMA_DESC_LINKED; -} - -spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num) -{ - //1 desc for the conf_buffer, other for data. - if (hal->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) { - return SPI_HAL_DMA_DESC_RUN_OUT; - } - - if (hal->tx_seg_link_tail) { - //Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail. - STAILQ_NEXT(hal->tx_seg_link_tail, qe) = hal->cur_tx_seg_link; - } - - lldesc_t *internal_head = NULL; - s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, &internal_head); - *used_desc_num += 1 + lldesc_get_required_num(buf_len_bytes); - - return SPI_HAL_DMA_DESC_LINKED; -} - -/*------------------------- - * RX - *------------------------*/ -void spi_hal_sct_rx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num) -{ - hal->rx_free_desc_num += recycle_num; -} - -static void s_sct_prepare_rx_seg(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head) -{ - HAL_ASSERT(hal->rx_free_desc_num >= lldesc_get_required_num(buf_len_bytes)); - - *trans_head = hal->cur_rx_seg_link; - lldesc_setup_link(hal->cur_rx_seg_link, recv_buffer, buf_len_bytes, true); - for (int i = 0; i< lldesc_get_required_num(buf_len_bytes); i++) { - hal->rx_seg_link_tail = hal->cur_rx_seg_link; - hal->cur_rx_seg_link++; - if (hal->cur_rx_seg_link == hal->dmadesc_rx + hal->dmadesc_n) { - //As there is enough space, so we simply point this to the pool head - hal->cur_rx_seg_link = hal->dmadesc_rx; - } - } - - hal->rx_free_desc_num -= lldesc_get_required_num(buf_len_bytes); -} - -spi_hal_dma_desc_status_t spi_hal_sct_new_rx_dma_desc_head(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num) -{ - if (hal->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) { - return SPI_HAL_DMA_DESC_RUN_OUT; - } - - s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, trans_head); - *used_desc_num = lldesc_get_required_num(buf_len_bytes); - - return SPI_HAL_DMA_DESC_LINKED; -} - -spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num) -{ - if (hal->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) { - return SPI_HAL_DMA_DESC_RUN_OUT; - } - - if (hal->rx_seg_link_tail) { - //Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail. - STAILQ_NEXT(hal->rx_seg_link_tail, qe) = hal->cur_rx_seg_link; - } - - lldesc_t *internal_head = NULL; - s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, &internal_head); - *used_desc_num += lldesc_get_required_num(buf_len_bytes); - - return SPI_HAL_DMA_DESC_LINKED; -} #endif //#if SOC_SPI_SCT_SUPPORTED