From 3e061d6af40092d1161ddc2c0aa1ddde98b7d65e Mon Sep 17 00:00:00 2001 From: Ioannis Karachalios Date: Wed, 5 Jun 2024 00:00:39 +0300 Subject: [PATCH] drivers: spi: smartbond: Add async API support This commit should deal with adding support for asynchronous operations. It also adds support for DMA acceleration via a Kconfig variable (enaled by default as DMA should be considered scales faster than the interrupt-driven approach). Signed-off-by: Ioannis Karachalios --- drivers/spi/Kconfig.smartbond | 11 + drivers/spi/spi_smartbond.c | 900 +++++++++++++++++++++++++++++++++- 2 files changed, 898 insertions(+), 13 deletions(-) diff --git a/drivers/spi/Kconfig.smartbond b/drivers/spi/Kconfig.smartbond index 9c003eb915ddcf2..41860ed0ab8b05f 100644 --- a/drivers/spi/Kconfig.smartbond +++ b/drivers/spi/Kconfig.smartbond @@ -7,3 +7,14 @@ config SPI_SMARTBOND depends on DT_HAS_RENESAS_SMARTBOND_SPI_ENABLED help Enables SPI driver for Renesas SmartBond(tm) DA1469x series MCU. + +config SPI_SMARTBOND_DMA + bool "Renesas Smartbond(tm) SPI with DMA acceleration" + default y + depends on SPI_ASYNC + depends on SPI_SMARTBOND + select DMA + help + Enables using the DMA engine instead of interrupt-driven + approach. This acceleration is available only for + asynchronous transfers. diff --git a/drivers/spi/spi_smartbond.c b/drivers/spi/spi_smartbond.c index 730a3b37a9db44a..2e9de42a6e563dd 100644 --- a/drivers/spi/spi_smartbond.c +++ b/drivers/spi/spi_smartbond.c @@ -18,6 +18,8 @@ LOG_MODULE_REGISTER(spi_smartbond); #include #include #include +#include +#include #include #include @@ -32,15 +34,66 @@ struct spi_smartbond_cfg { SPI_Type *regs; int periph_clock_config; const struct pinctrl_dev_config *pcfg; +#ifdef CONFIG_SPI_SMARTBOND_DMA + int tx_dma_chan; + int rx_dma_chan; + uint8_t tx_slot_mux; + uint8_t rx_slot_mux; + const struct device *tx_dma_ctrl; + const struct device *rx_dma_ctrl; +#endif +}; + +enum spi_smartbond_transfer { + SPI_SMARTBOND_TRANSFER_TX_ONLY, + SPI_SMARTBOND_TRANSFER_RX_ONLY, + SPI_SMARTBOND_TRANSFER_TX_RX, + SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED, + SPI_SMARTBOND_TRANSFER_NONE +}; + +enum spi_smartbond_dma_channel { + SPI_SMARTBOND_DMA_TX_CHANNEL, + SPI_SMARTBOND_DMA_RX_CHANNEL }; struct spi_smartbond_data { struct spi_context ctx; uint8_t dfs; + #if defined(CONFIG_PM_DEVICE) ATOMIC_DEFINE(pm_policy_state_flag, 1); uint32_t spi_ctrl_reg; #endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA + struct dma_config tx_dma_cfg; + struct dma_config rx_dma_cfg; + struct dma_block_config tx_dma_block_cfg; + struct dma_block_config rx_dma_block_cfg; + struct k_sem rx_dma_sync; + struct k_sem tx_dma_sync; + + ATOMIC_DEFINE(dma_channel_atomic_flag, 2); + +#endif + +#ifdef CONFIG_SPI_ASYNC + size_t rx_len; + size_t tx_len; + size_t tranferred; + enum spi_smartbond_transfer transfer_mode; +#endif +}; + +enum spi_smartbond_fifo_mode { + /* Bi-directional mode */ + SPI_SMARTBOND_FIFO_MODE_TX_RX, + /* TX FIFO single depth, no flow control */ + SPI_SMARTBOND_FIFO_MODE_RX_ONLY, + /* RX FIFO single depth, no flow control */ + SPI_SMARTBOND_FIFO_MODE_TX_ONLY, + SPI_SMARTBOND_FIFO_NONE }; static inline void spi_smartbond_enable(const struct spi_smartbond_cfg *cfg, bool enable) @@ -60,6 +113,364 @@ static inline bool spi_smartbond_isenabled(const struct spi_smartbond_cfg *cfg) (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_RST_Msk)); } +static inline void spi_smartbond_write_word(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + + /* + * No need to typecast the register address as the controller will automatically + * generate the necessary clock cycles based on the data size. + */ + switch (data->dfs) { + case 1: + cfg->regs->SPI_RX_TX_REG = *(uint8_t *)data->ctx.tx_buf; + break; + case 2: + cfg->regs->SPI_RX_TX_REG = sys_get_le16(data->ctx.tx_buf); + break; + case 4: + cfg->regs->SPI_RX_TX_REG = sys_get_le32(data->ctx.tx_buf); + break; + } +} + +static inline void spi_smartbond_write_dummy(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + cfg->regs->SPI_RX_TX_REG = 0x0; +} + +static inline void spi_smartbond_read_word(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + + switch (data->dfs) { + case 1: + *(uint8_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG; + break; + case 2: + sys_put_le16((uint16_t)cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf); + break; + case 4: + sys_put_le32(cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf); + break; + } +} + +static inline void spi_smartbond_read_discard(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + (void)cfg->regs->SPI_RX_TX_REG; +} + +#ifdef CONFIG_SPI_ASYNC +static inline bool spi_smartbond_is_tx_full(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_TXH_Msk); +} + +static inline bool spi_smartbond_is_busy(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_BUSY_Msk); +} + +static inline uint8_t spi_smartbond_get_fifo_mode(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return ((cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) >> + SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos); +} + +static void spi_smartbond_set_fifo_mode(const struct device *dev, enum spi_smartbond_fifo_mode mode) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + bool is_enabled = spi_smartbond_isenabled(cfg); + enum spi_smartbond_fifo_mode current_mode = spi_smartbond_get_fifo_mode(dev); + + if ((current_mode != mode) || (data->dfs == 4)) { + if (current_mode != SPI_SMARTBOND_FIFO_MODE_RX_ONLY) { + while (spi_smartbond_is_busy(dev)) { + ; + } + } + /* Controller should be disabled when FIFO mode is updated */ + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk; + +#ifdef CONFIG_SPI_SMARTBOND_DMA + /* + * Workaround for the controller that cannot generate DMA requests + * for 4-byte bus length or when in RX FIFO mode. + */ + if ((data->dfs == 4) || (mode == SPI_SMARTBOND_FIFO_MODE_RX_ONLY)) { + mode = SPI_SMARTBOND_FIFO_NONE; + } +#endif + + cfg->regs->SPI_CTRL_REG = + ((cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) | + ((mode << SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos) & + SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk)); + + + if (mode != SPI_SMARTBOND_FIFO_NONE) { + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_DMA_TXREQ_MODE_Msk; + } else { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_DMA_TXREQ_MODE_Msk; + } + + if (is_enabled) { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_ON_Msk; + } + } +} + +static inline void spi_smartbond_clear_interrupt(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + cfg->regs->SPI_CLEAR_INT_REG = 0x1; +} + +static inline void spi_smartbond_isr_set_status(const struct device *dev, bool status) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + if (status) { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_MINT_Msk; + } else { + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk; + } +} + +/* 0 = No RX data available, 1 = data has been transmitted and received */ +static inline bool spi_smartbond_is_rx_data(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk); +} +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +static int spi_smartbond_dma_tx_channel_request(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL)) { + if (dma_request_channel(config->tx_dma_ctrl, (void *)&config->tx_dma_chan) < 0) { + atomic_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL); + return -EIO; + } + } + + return 0; +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_tx_channel_release(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL)) { + dma_release_channel(config->tx_dma_ctrl, config->tx_dma_chan); + } +} +#endif + +static int spi_smartbond_dma_rx_channel_request(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL)) { + if (dma_request_channel(config->rx_dma_ctrl, (void *)&config->rx_dma_chan) < 0) { + atomic_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL); + return -EIO; + } + } + + return 0; +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_rx_channel_release(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL)) { + dma_release_channel(config->rx_dma_ctrl, config->rx_dma_chan); + } +} +#endif + +static void spi_smartbond_tx_dma_cb(const struct device *dma, void *arg, + uint32_t id, int status) +{ + const struct device *dev = arg; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (status < 0) { + LOG_WRN("DMA transfer did not complete"); + } + + spi_context_update_tx(ctx, data->dfs, data->tx_len); + k_sem_give(&data->tx_dma_sync); +} + +static void spi_smartbond_rx_dma_cb(const struct device *dma, void *arg, + uint32_t id, int status) +{ + const struct device *dev = arg; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (status < 0) { + LOG_WRN("DMA transfer did not complete"); + } + + spi_context_update_rx(ctx, data->dfs, data->rx_len); + k_sem_give(&data->rx_dma_sync); +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_deconfig(const struct device *dev) +{ + const struct spi_smartbond_cfg *config = dev->config; + + dma_stop(config->rx_dma_ctrl, config->rx_dma_chan); + dma_stop(config->tx_dma_ctrl, config->tx_dma_chan); + + spi_smartbond_dma_rx_channel_release(dev); + spi_smartbond_dma_tx_channel_release(dev); +} +#endif + +static int spi_smartbond_dma_config(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + struct dma_config *tx = &data->tx_dma_cfg; + struct dma_config *rx = &data->rx_dma_cfg; + struct dma_block_config *tx_block = &data->tx_dma_block_cfg; + struct dma_block_config *rx_block = &data->rx_dma_block_cfg; + + /* + * DMA RX should be assigned an even number and + * DMA TX should be assigned the right next + * channel (odd number). + */ + if (!(config->tx_dma_chan & 0x1) || + (config->rx_dma_chan & 0x1) || + (config->tx_dma_chan != (config->rx_dma_chan + 1))) { + LOG_ERR("Invalid RX/TX channel selection"); + return -EINVAL; + } + + if (config->tx_slot_mux != config->rx_slot_mux) { + LOG_ERR("TX/RX DMA slots mismatch"); + return -EINVAL; + } + + if (!device_is_ready(config->tx_dma_ctrl) || + !device_is_ready(config->rx_dma_ctrl)) { + LOG_ERR("TX/RX DMA device is not ready"); + return -ENODEV; + } + + if (spi_smartbond_dma_tx_channel_request(dev) < 0) { + LOG_ERR("TX DMA channel is already occupied"); + return -EIO; + } + + if (spi_smartbond_dma_rx_channel_request(dev) < 0) { + LOG_ERR("RX DMA channel is already occupied"); + return -EIO; + } + + tx->channel_direction = MEMORY_TO_PERIPHERAL; + tx->dma_callback = spi_smartbond_tx_dma_cb; + tx->user_data = (void *)dev; + tx->block_count = 1; + tx->head_block = &data->tx_dma_block_cfg; + tx->error_callback_dis = 1; + tx->dma_slot = config->tx_slot_mux; + tx->channel_priority = 2; + + /* Burst mode is not using when DREQ is one */ + tx->source_burst_length = 1; + tx->dest_burst_length = 1; + /* Source and destination data size should reflect DFS value */ + tx->source_data_size = 0; + tx->dest_data_size = 0; + + /* Do not change */ + tx_block->dest_addr_adj = 0x2; + /* Incremental */ + tx_block->source_addr_adj = 0x0; + tx_block->dest_address = (uint32_t)&config->regs->SPI_RX_TX_REG; + + /* + * To be filled when a transaction is requested and + * should reflect the total number of bytes. + */ + tx_block->block_size = 0; + /* Should reflect the TX buffer */ + tx_block->source_address = 0; + + rx->channel_direction = PERIPHERAL_TO_MEMORY; + rx->dma_callback = spi_smartbond_rx_dma_cb; + rx->user_data = (void *)dev; + rx->block_count = 1; + rx->head_block = &data->rx_dma_block_cfg; + rx->error_callback_dis = 1; + rx->dma_slot = config->rx_slot_mux; + rx->channel_priority = 2; + + /* Burst mode is not using when DREQ is one */ + rx->source_burst_length = 1; + rx->dest_burst_length = 1; + /* Source and destination data size should reflect DFS value */ + rx->source_data_size = 0; + rx->dest_data_size = 0; + + /* Do not change */ + rx_block->source_addr_adj = 0x2; + /* Incremenetal */ + rx_block->dest_addr_adj = 0x0; + rx_block->source_address = (uint32_t)&config->regs->SPI_RX_TX_REG; + + /* + * To be filled when a transaction is requested and + * should reflect the total number of bytes. + */ + rx_block->block_size = 0; + /* Should reflect the RX buffer */ + rx_block->dest_address = 0; + + return 0; +} +#endif + static inline int spi_smartbond_set_speed(const struct spi_smartbond_cfg *cfg, const uint32_t frequency) { @@ -168,11 +579,6 @@ static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg, return -ENOTSUP; } - if (spi_cfg->operation & SPI_MODE_LOOP) { - LOG_ERR("Loopback mode is not supported"); - return -ENOTSUP; - } - if (spi_smartbond_isenabled(cfg)) { spi_smartbond_enable(cfg, false); } @@ -208,6 +614,339 @@ static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg, return 0; } +#ifdef CONFIG_SPI_ASYNC +static void spi_smartbond_write(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_tx_buf_on(ctx)) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + spi_smartbond_clear_interrupt(dev); + break; + } + /* Send to TX FIFO and update buffer pointer. */ + spi_smartbond_write_word(dev); + spi_context_update_tx(ctx, data->dfs, 1); + } +} + +static void spi_smartbond_transfer(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_rx_buf_on(ctx)) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + spi_smartbond_read_word(dev); + spi_smartbond_clear_interrupt(dev); + + /* Update context */ + spi_context_update_rx(ctx, data->dfs, 1); + } + + while (spi_context_tx_buf_on(ctx)) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_word(dev); + + spi_context_update_tx(ctx, data->dfs, 1); + } +} + +static void spi_smartbond_transfer_truncated(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (data->rx_len) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + spi_smartbond_read_word(dev); + spi_smartbond_clear_interrupt(dev); + + /* Update context */ + spi_context_update_rx(ctx, data->dfs, 1); + data->rx_len--; + data->tranferred++; + } + + while (data->tx_len) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_word(dev); + + spi_context_update_tx(ctx, data->dfs, 1); + data->tx_len--; + } +} + +static void spi_smartbond_read(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_rx_buf_on(ctx)) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + + spi_smartbond_read_word(dev); + spi_context_update_rx(ctx, data->dfs, 1); + spi_smartbond_clear_interrupt(dev); + } + + /* Perform dummy access to generate the required clock cycles */ + while (data->tx_len) { + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_dummy(dev); + + data->tx_len--; + } +} + +static int spi_smartbond_transfer_mode_get(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (spi_context_rx_buf_on(ctx) || spi_context_tx_buf_on(ctx)) { + if (!spi_context_rx_buf_on(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_ONLY); + return SPI_SMARTBOND_TRANSFER_TX_ONLY; + } + + if (!spi_context_tx_buf_on(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_RX_ONLY; + } + + if (spi_context_total_rx_len(ctx) == spi_context_total_tx_len(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_TX_RX; + } else if (spi_context_total_rx_len(ctx) != spi_context_total_tx_len(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED; + } + } + + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_NONE); + return SPI_SMARTBOND_TRANSFER_NONE; +} + +static inline void spi_smartbond_transfer_mode_check_and_update(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + enum spi_smartbond_transfer transfer_mode_new = spi_smartbond_transfer_mode_get(dev); + + if (data->transfer_mode != transfer_mode_new) { + data->transfer_mode = transfer_mode_new; + } +} + +static void spi_smartbond_isr(void *args) +{ + struct device *dev = args; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + spi_smartbond_read(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_write(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + spi_smartbond_transfer(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED: + /* Truncated transfer should be done and so the transfer mode should be updated */ + if (!data->rx_len && !data->tx_len) { + spi_smartbond_transfer_mode_check_and_update(dev); + if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_RX_ONLY) { + data->tx_len = spi_context_total_rx_len(ctx) - data->tranferred; + /* Clear in case another truncated transfer should be executed */ + data->tranferred = 0; + spi_smartbond_read(dev); + } else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_ONLY) { + spi_smartbond_write(dev); + } + } else { + spi_smartbond_transfer_truncated(dev); + } + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + /* All buffers have been exercised, signal completion */ + if (!spi_context_tx_buf_on(ctx) && !spi_context_rx_buf_on(ctx)) { + spi_smartbond_isr_set_status(dev, false); + + /* Mark completion to trigger callback function */ + spi_context_complete(ctx, dev, 0); + spi_context_cs_control(ctx, false); + spi_smartbond_pm_policy_state_lock_put(data); + } +} + +static void spi_smartbond_isr_trigger(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); + + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + data->tx_len = spi_context_total_rx_len(ctx); + spi_smartbond_read(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_write(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + spi_smartbond_transfer(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED: + size_t min_len = MIN(spi_context_total_rx_len(ctx), spi_context_total_tx_len(ctx)); + + data->rx_len = min_len; + data->tx_len = min_len; + spi_smartbond_transfer_truncated(dev); + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + spi_smartbond_isr_set_status(dev, true); +} +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +static uint32_t spi_smartbond_read_dummy_buf; + +static int spi_smartbond_dma_trigger(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + struct spi_context *ctx = &data->ctx; + struct dma_config *tx = &data->tx_dma_cfg; + struct dma_config *rx = &data->rx_dma_cfg; + struct dma_block_config *tx_block = &data->tx_dma_block_cfg; + struct dma_block_config *rx_block = &data->rx_dma_block_cfg; + + rx->source_data_size = data->dfs; + rx->dest_data_size = data->dfs; + tx->source_data_size = data->dfs; + tx->dest_data_size = data->dfs; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); + do { + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + + rx_block->block_size = data->rx_len * data->dfs; + rx_block->dest_address = (uint32_t)ctx->rx_buf; + tx_block->block_size = rx_block->block_size; + tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf; + /* Do not increment */ + tx_block->source_addr_adj = 0x2; + + if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) { + LOG_ERR("RX DMA configuration failed"); + return -EINVAL; + } + dma_start(config->rx_dma_ctrl, config->rx_dma_chan); + dma_start(config->tx_dma_ctrl, config->tx_dma_chan); + + /* Wait for the current DMA transfer to complete */ + k_sem_take(&data->tx_dma_sync, K_FOREVER); + k_sem_take(&data->rx_dma_sync, K_FOREVER); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + data->tx_len = spi_context_max_continuous_chunk(ctx); + data->rx_len = 0; + + tx_block->block_size = data->tx_len * data->dfs; + tx_block->source_address = (uint32_t)ctx->tx_buf; + tx_block->source_addr_adj = 0x0; + + if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + dma_start(config->tx_dma_ctrl, config->tx_dma_chan); + + /* Wait for the current DMA transfer to complete */ + k_sem_take(&data->tx_dma_sync, K_FOREVER); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + case SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED: + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + + tx_block->block_size = data->tx_len * data->dfs; + tx_block->source_address = (uint32_t)ctx->tx_buf; + /* Incremental */ + tx_block->source_addr_adj = 0x0; + rx_block->block_size = tx_block->block_size; + rx_block->dest_address = (uint32_t)ctx->rx_buf; + + if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) { + LOG_ERR("RX DMA configuration failed"); + return -EINVAL; + } + dma_start(config->rx_dma_ctrl, config->rx_dma_chan); + dma_start(config->tx_dma_ctrl, config->tx_dma_chan); + + k_sem_take(&data->tx_dma_sync, K_FOREVER); + k_sem_take(&data->rx_dma_sync, K_FOREVER); + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + spi_smartbond_transfer_mode_check_and_update(dev); + } while (data->transfer_mode != SPI_SMARTBOND_TRANSFER_NONE); + + return 0; +} +#endif + static int spi_smartbond_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) @@ -215,44 +954,45 @@ static int spi_smartbond_transceive(const struct device *dev, const struct spi_c const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; - uint32_t bitmask; int rc; spi_smartbond_pm_policy_state_lock_get(data); spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg); + rc = spi_smartbond_configure(cfg, data, spi_cfg); if (rc == 0) { spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(ctx, true); - bitmask = ~((~0UL) << SPI_WORD_SIZE_GET(data->ctx.config->operation)); while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) { if (spi_context_tx_buf_on(ctx)) { - cfg->regs->SPI_RX_TX_REG = (*(uint32_t *)ctx->tx_buf) & bitmask; + spi_smartbond_write_word(dev); spi_context_update_tx(ctx, data->dfs, 1); } else { - cfg->regs->SPI_RX_TX_REG = 0UL; + spi_smartbond_write_dummy(dev); } while (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk)) { }; if (spi_context_rx_buf_on(ctx)) { - (*(uint32_t *)ctx->rx_buf) = cfg->regs->SPI_RX_TX_REG & bitmask; + spi_smartbond_read_word(dev); spi_context_update_rx(ctx, data->dfs, 1); } else { - (void)cfg->regs->SPI_RX_TX_REG; + spi_smartbond_read_discard(dev); } cfg->regs->SPI_CLEAR_INT_REG = 1UL; } + + spi_context_cs_control(ctx, false); } - spi_context_cs_control(ctx, false); spi_context_release(&data->ctx, rc); spi_smartbond_pm_policy_state_lock_put(data); return rc; } + #ifdef CONFIG_SPI_ASYNC static int spi_smartbond_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, @@ -260,7 +1000,38 @@ static int spi_smartbond_transceive_async(const struct device *dev, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { - return -ENOTSUP; + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + int rc; + + spi_context_lock(ctx, true, cb, userdata, spi_cfg); + + rc = spi_smartbond_configure(cfg, data, spi_cfg); + if (rc == 0) { + spi_smartbond_pm_policy_state_lock_get(data); + spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); + spi_context_cs_control(ctx, true); + +#ifndef CONFIG_SPI_SMARTBOND_DMA + spi_smartbond_isr_trigger(dev); + + /* + * PM constraints will be released within ISR once all transfers + * are exercised along with de-asserting the #CS line. + */ +#else + rc = spi_smartbond_dma_trigger(dev); + /* Mark completion to trigger callback function */ + spi_context_complete(ctx, dev, 0); + spi_context_cs_control(ctx, false); + spi_smartbond_pm_policy_state_lock_put(data); +#endif + } + + spi_context_release(ctx, rc); + + return rc; } #endif @@ -308,6 +1079,14 @@ static int spi_smartbond_resume(const struct device *dev) return rc; } +#ifdef CONFIG_SPI_SMARTBOND_DMA + rc = spi_smartbond_dma_config(dev); + if (rc < 0) { + LOG_ERR("Failed to configure DMA"); + return rc; + } +#endif + spi_context_unlock_unconditionally(&data->ctx); return 0; @@ -331,6 +1110,10 @@ static int spi_smartbond_suspend(const struct device *dev) LOG_WRN("Failed to configure the SPI pins to inactive state"); } +#ifdef CONFIG_SPI_SMARTBOND_DMA + spi_smartbond_dma_deconfig(dev); +#endif + return ret; } @@ -356,11 +1139,51 @@ static int spi_smartbond_pm_action(const struct device *dev, } #endif +#define SPI_SMARTBOND_ISR_CONNECT \ + IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi)), DT_IRQ(DT_NODELABEL(spi), priority), \ + spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi)), 0); \ + irq_enable(DT_IRQN(DT_NODELABEL(spi))); + +#define SPI2_SMARTBOND_ISR_CONNECT \ + IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi2)), DT_IRQ(DT_NODELABEL(spi2), priority), \ + spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi2)), 0); \ + irq_enable(DT_IRQN(DT_NODELABEL(spi2))); + +#if defined(CONFIG_SPI_ASYNC) +static int spi_smartbond_isr_connect(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + switch ((uint32_t)cfg->regs) { + case (uint32_t)SPI: + COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(spi), okay), + (SPI_SMARTBOND_ISR_CONNECT), (NULL)); + break; + case (uint32_t)SPI2: + COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(spi2), okay), + (SPI2_SMARTBOND_ISR_CONNECT), (NULL)); + break; + default: + return -EINVAL; + } + + return 0; +} +#endif + static int spi_smartbond_init(const struct device *dev) { int ret; struct spi_smartbond_data *data = dev->data; +#ifdef CONFIG_SPI_ASYNC + data->transfer_mode = SPI_SMARTBOND_TRANSFER_NONE; +#ifdef CONFIG_SPI_SMARTBOND_DMA + k_sem_init(&data->tx_dma_sync, 0, 1); + k_sem_init(&data->rx_dma_sync, 0, 1); +#endif +#endif + #ifdef CONFIG_PM_DEVICE_RUNTIME /* Make sure device state is marked as suspended */ pm_device_init_suspended(dev); @@ -373,15 +1196,66 @@ static int spi_smartbond_init(const struct device *dev) #endif spi_context_unlock_unconditionally(&data->ctx); +#ifdef CONFIG_SPI_ASYNC + ret = spi_smartbond_isr_connect(dev); +#endif + return ret; } +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_TX_INIT(id) \ + .tx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, tx, channel), \ + .tx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, tx, config), \ + .tx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), +#else +#define SPI_SMARTBOND_DMA_TX_INIT(id) +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_RX_INIT(id) \ + .rx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, rx, channel), \ + .rx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, rx, config), \ + .rx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), +#else +#define SPI_SMARTBOND_DMA_RX_INIT(id) +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) \ + .tx_dma_chan = 255, \ + .tx_slot_mux = 255, \ + .tx_dma_ctrl = NULL, +#else +#define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) \ + .rx_dma_chan = 255, \ + .rx_slot_mux = 255, \ + .rx_dma_ctrl = NULL, +#else +#define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) +#endif + +#define SPI_SMARTBOND_DMA_RX_CHAN_INIT(id) \ + .rx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, rx, channel), \ + .rx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, rx, config), \ + .rx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), + #define SPI_SMARTBOND_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static const struct spi_smartbond_cfg spi_smartbond_##id##_cfg = { \ .regs = (SPI_Type *)DT_INST_REG_ADDR(id), \ .periph_clock_config = DT_INST_PROP(id, periph_clock_config), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ + COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, tx), \ + (SPI_SMARTBOND_DMA_TX_INIT(id)), \ + (SPI_SMARTBOND_DMA_TX_INVALIDATE(id))) \ + COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, rx), \ + (SPI_SMARTBOND_DMA_RX_INIT(id)), \ + (SPI_SMARTBOND_DMA_RX_INVALIDATE(id))) \ }; \ static struct spi_smartbond_data spi_smartbond_##id##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_smartbond_##id##_data, ctx), \