diff --git a/drivers/spi/Kconfig.smartbond b/drivers/spi/Kconfig.smartbond index 9c003eb915ddcf2..619837500363486 100644 --- a/drivers/spi/Kconfig.smartbond +++ b/drivers/spi/Kconfig.smartbond @@ -7,3 +7,13 @@ config SPI_SMARTBOND depends on DT_HAS_RENESAS_SMARTBOND_SPI_ENABLED help Enables SPI driver for Renesas SmartBond(tm) DA1469x series MCU. + +config SPI_SMARTBOND_DMA + bool "Renesas Smartbond(tm) SPI with DMA acceleration" + default y + depends on SPI_SMARTBOND + select DMA + help + Enables using the DMA engine instead of interrupt-driven + approach. This acceleration is available only for + asynchronous transfers. diff --git a/drivers/spi/spi_smartbond.c b/drivers/spi/spi_smartbond.c index 730a3b37a9db44a..b479f7b71cf8ba4 100644 --- a/drivers/spi/spi_smartbond.c +++ b/drivers/spi/spi_smartbond.c @@ -18,29 +18,81 @@ LOG_MODULE_REGISTER(spi_smartbond); #include #include #include +#include +#include #include #include -#define DIVN_CLK 32000000 /* divN_clk 32MHz */ -#define SCLK_FREQ_2MHZ (DIVN_CLK / 14) /* 2.285714MHz*/ -#define SCLK_FREQ_4MHZ (DIVN_CLK / 8) /* 4MHz */ -#define SCLK_FREQ_8MHZ (DIVN_CLK / 4) /* 8MHz */ -#define SCLK_FREQ_16MHZ (DIVN_CLK / 2) /* 16MHz */ +#define DIVN_CLK 32000000 /* DIVN clock: fixed @32MHz */ +#define SCLK_FREQ_2MHZ (DIVN_CLK / 14) /* 2.285714 MHz*/ +#define SCLK_FREQ_4MHZ (DIVN_CLK / 8) /* 4 MHz */ +#define SCLK_FREQ_8MHZ (DIVN_CLK / 4) /* 8 MHz */ +#define SCLK_FREQ_16MHZ (DIVN_CLK / 2) /* 16 MHz */ + +enum spi_smartbond_transfer { + SPI_SMARTBOND_TRANSFER_TX_ONLY, + SPI_SMARTBOND_TRANSFER_RX_ONLY, + SPI_SMARTBOND_TRANSFER_TX_RX, + SPI_SMARTBOND_TRANSFER_NONE +}; + +enum spi_smartbond_dma_channel { + SPI_SMARTBOND_DMA_TX_CHANNEL, + SPI_SMARTBOND_DMA_RX_CHANNEL +}; + +enum spi_smartbond_fifo_mode { + /* Bi-directional mode */ + SPI_SMARTBOND_FIFO_MODE_TX_RX, + /* TX FIFO single depth, no flow control */ + SPI_SMARTBOND_FIFO_MODE_RX_ONLY, + /* RX FIFO single depth, no flow control */ + SPI_SMARTBOND_FIFO_MODE_TX_ONLY, + SPI_SMARTBOND_FIFO_NONE +}; struct spi_smartbond_cfg { SPI_Type *regs; int periph_clock_config; const struct pinctrl_dev_config *pcfg; +#ifdef CONFIG_SPI_SMARTBOND_DMA + int tx_dma_chan; + int rx_dma_chan; + uint8_t tx_slot_mux; + uint8_t rx_slot_mux; + const struct device *tx_dma_ctrl; + const struct device *rx_dma_ctrl; +#endif }; struct spi_smartbond_data { struct spi_context ctx; uint8_t dfs; + #if defined(CONFIG_PM_DEVICE) ATOMIC_DEFINE(pm_policy_state_flag, 1); uint32_t spi_ctrl_reg; #endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA + struct dma_config tx_dma_cfg; + struct dma_config rx_dma_cfg; + struct dma_block_config tx_dma_block_cfg; + struct dma_block_config rx_dma_block_cfg; + struct k_sem rx_dma_sync; + struct k_sem tx_dma_sync; + + ATOMIC_DEFINE(dma_channel_atomic_flag, 2); + +#endif + +#if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) + size_t rx_len; + size_t tx_len; + size_t transferred; + enum spi_smartbond_transfer transfer_mode; +#endif }; static inline void spi_smartbond_enable(const struct spi_smartbond_cfg *cfg, bool enable) @@ -60,6 +112,60 @@ static inline bool spi_smartbond_isenabled(const struct spi_smartbond_cfg *cfg) (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_RST_Msk)); } +static inline void spi_smartbond_write_word(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + + /* + * No need to typecast the register address as the controller will automatically + * generate the necessary clock cycles based on the data size. + */ + switch (data->dfs) { + case 1: + cfg->regs->SPI_RX_TX_REG = *(uint8_t *)data->ctx.tx_buf; + break; + case 2: + cfg->regs->SPI_RX_TX_REG = sys_get_le16(data->ctx.tx_buf); + break; + case 4: + cfg->regs->SPI_RX_TX_REG = sys_get_le32(data->ctx.tx_buf); + break; + } +} + +static inline void spi_smartbond_write_dummy(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + cfg->regs->SPI_RX_TX_REG = 0x0; +} + +static inline void spi_smartbond_read_word(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + + switch (data->dfs) { + case 1: + *(uint8_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG; + break; + case 2: + sys_put_le16((uint16_t)cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf); + break; + case 4: + sys_put_le32(cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf); + break; + } +} + +static inline void spi_smartbond_read_discard(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + (void)cfg->regs->SPI_RX_TX_REG; +} + static inline int spi_smartbond_set_speed(const struct spi_smartbond_cfg *cfg, const uint32_t frequency) { @@ -208,6 +314,733 @@ static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg, return 0; } +#if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) +static inline void spi_smartbond_isr_set_status(const struct device *dev, bool status) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + if (status) { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_MINT_Msk; + } else { + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk; + } +} + +static inline bool spi_smartbond_is_busy(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_BUSY_Msk); +} + +static inline void spi_smartbond_clear_interrupt(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + cfg->regs->SPI_CLEAR_INT_REG = 0x1; +} + +/* 0 = No RX data available, 1 = data has been transmitted and received */ +static inline bool spi_smartbond_is_rx_data(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk); +} + +static inline uint8_t spi_smartbond_get_fifo_mode(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return ((cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) >> + SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos); +} + +static void spi_smartbond_set_fifo_mode(const struct device *dev, enum spi_smartbond_fifo_mode mode) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + bool is_enabled = spi_smartbond_isenabled(cfg); + enum spi_smartbond_fifo_mode current_mode = spi_smartbond_get_fifo_mode(dev); + +#ifdef CONFIG_SPI_SMARTBOND_DMA + struct spi_smartbond_data *data = dev->data; +#endif + + if ((current_mode != mode) +#ifdef CONFIG_SPI_SMARTBOND_DMA + || (data->dfs == 4) +#endif + ) { + if (current_mode != SPI_SMARTBOND_FIFO_MODE_RX_ONLY) { + while (spi_smartbond_is_busy(dev)) { + ; + } + } + /* Controller should be disabled when FIFO mode is updated */ + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk; + +#ifdef CONFIG_SPI_SMARTBOND_DMA + /* + * Workaround for the controller that cannot generate DMA requests + * for 4-byte bus length. + */ + if (data->dfs == 4) { + mode = SPI_SMARTBOND_FIFO_NONE; + } +#endif + + cfg->regs->SPI_CTRL_REG = + ((cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) | + ((mode << SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos) & + SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk)); + + + if (mode != SPI_SMARTBOND_FIFO_NONE) { + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_DMA_TXREQ_MODE_Msk; + } else { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_DMA_TXREQ_MODE_Msk; + } + + if (is_enabled) { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_ON_Msk; + } + } +} + +static int spi_smartbond_transfer_mode_get(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (spi_context_rx_buf_on(ctx) || spi_context_tx_buf_on(ctx)) { + /* + * Check only buffers' length as it might happen that current buffer is NULL. + * In such a case the context should be updated and a dummy write/read should + * take place. + */ + if (ctx->rx_len || ctx->tx_len) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_TX_RX; + } + + if (!spi_context_rx_buf_on(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_ONLY); + return SPI_SMARTBOND_TRANSFER_TX_ONLY; + } + + if (!spi_context_tx_buf_on(ctx)) { + /* + * Use the TX/RX mode with TX being dummy. Using the RX only mode + * is a bit tricky as the controller should generate clock cycles + * automatically and immediately after the ISR is enabled. + */ + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_RX_ONLY; + } + } + + /* Return waiting updating the fifo mode */ + return SPI_SMARTBOND_TRANSFER_NONE; +} + +static inline void spi_smartbond_transfer_mode_check_and_update(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); +} +#endif + +#ifdef CONFIG_SPI_ASYNC +static inline bool spi_smartbond_is_tx_full(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_TXH_Msk); +} + +static void spi_smartbond_write(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_tx_buf_on(ctx)) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + spi_smartbond_clear_interrupt(dev); + break; + } + /* Send to TX FIFO and update buffer pointer. */ + spi_smartbond_write_word(dev); + spi_context_update_tx(ctx, data->dfs, 1); + + /* + * It might happen that a NULL buffer with a non-zero length is provided. + * In that case, the bytes should be consumed. + */ + if (ctx->rx_len && !ctx->rx_buf) { + spi_smartbond_read_discard(dev); + spi_context_update_rx(ctx, data->dfs, 1); + } + } +} + +static void spi_smartbond_transfer(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (data->rx_len) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + + if (ctx->rx_buf) { + spi_smartbond_read_word(dev); + } else { + spi_smartbond_read_discard(dev); + } + spi_context_update_rx(ctx, data->dfs, 1); + + spi_smartbond_clear_interrupt(dev); + + data->rx_len--; + data->transferred++; + } + + while (data->tx_len) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + break; + } + + if (ctx->tx_buf) { + spi_smartbond_write_word(dev); + } else { + spi_smartbond_write_dummy(dev); + } + spi_context_update_tx(ctx, data->dfs, 1); + + data->tx_len--; + } +} + +static void spi_smartbond_read(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_rx_buf_on(ctx)) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + + spi_smartbond_read_word(dev); + spi_context_update_rx(ctx, data->dfs, 1); + spi_smartbond_clear_interrupt(dev); + } + + /* Perform dummy access to generate the required clock cycles */ + while (data->tx_len) { + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_dummy(dev); + + data->tx_len--; + } +} + +static void spi_smartbond_isr_trigger(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); + + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + data->tx_len = spi_context_total_rx_len(ctx); + spi_smartbond_read(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_write(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + /* + * Each sub-transfer in the descriptor list should be exercised + * separately as it might happen that a buffer is NULL with + * non-zero length. + */ + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + spi_smartbond_transfer(dev); + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + spi_smartbond_isr_set_status(dev, true); +} + +static int spi_smartbond_transceive_async(const struct device *dev, + const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, + const struct spi_buf_set *rx_bufs, spi_callback_t cb, + void *userdata) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + int rc; + + spi_context_lock(ctx, true, cb, userdata, spi_cfg); + + rc = spi_smartbond_configure(cfg, data, spi_cfg); + if (rc == 0) { + spi_smartbond_pm_policy_state_lock_get(data); + spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); + spi_context_cs_control(ctx, true); + + /* + * PM constraints will be released within ISR once all transfers + * are exercised along with de-asserting the #CS line. + */ + spi_smartbond_isr_trigger(dev); + } + /* + * Context will actually be released when \sa spi_context_complete + * is called. + */ + spi_context_release(ctx, rc); + + return rc; +} +#endif + +#if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) +static void spi_smartbond_isr(void *args) +{ +#ifdef CONFIG_SPI_ASYNC + struct device *dev = args; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + spi_smartbond_read(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_write(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + /* Exersice the type of the next sub-transfer */ + if (!data->rx_len && !data->tx_len) { + spi_smartbond_transfer_mode_check_and_update(dev); + + if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_RX_ONLY) { + data->tx_len = spi_context_total_rx_len(ctx) - data->transferred; + /* Clear in case another truncated transfer should be executed */ + data->transferred = 0; + spi_smartbond_read(dev); + } else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_ONLY) { + spi_smartbond_write(dev); + } else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_RX) { + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + spi_smartbond_transfer(dev); + } + } else { + spi_smartbond_transfer(dev); + } + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + /* All buffers have been exercised, signal completion */ + if (!spi_context_tx_buf_on(ctx) && !spi_context_rx_buf_on(ctx)) { + spi_smartbond_isr_set_status(dev, false); + + /* Mark completion to trigger callback function */ + spi_context_complete(ctx, dev, 0); + + spi_context_cs_control(ctx, false); + spi_smartbond_pm_policy_state_lock_put(data); + } +#endif +} +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +static uint32_t spi_smartbond_read_dummy_buf; + +/* + * Should be used to flush the RX FIFO in case a transaction is requested + * with NULL pointer and non-zero length. In such a case, data will be + * shifted into the RX FIFO (regardless of whether or not the RX mode is + * disabled) which should then be flushed. Otherwise, a next read operation + * will result in fetching old bytes. + */ +static void spi_smartbond_flush_rx_fifo(const struct device *dev) +{ + while (spi_smartbond_is_busy(dev)) { + }; + while (spi_smartbond_is_rx_data(dev)) { + spi_smartbond_read_discard(dev); + spi_smartbond_clear_interrupt(dev); + } +} + +static int spi_smartbond_dma_tx_channel_request(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL)) { + if (dma_request_channel(config->tx_dma_ctrl, (void *)&config->tx_dma_chan) < 0) { + atomic_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL); + return -EIO; + } + } + + return 0; +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_tx_channel_release(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL)) { + dma_release_channel(config->tx_dma_ctrl, config->tx_dma_chan); + } +} +#endif + +static int spi_smartbond_dma_rx_channel_request(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL)) { + if (dma_request_channel(config->rx_dma_ctrl, (void *)&config->rx_dma_chan) < 0) { + atomic_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL); + return -EIO; + } + } + + return 0; +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_rx_channel_release(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL)) { + dma_release_channel(config->rx_dma_ctrl, config->rx_dma_chan); + } +} +#endif + +static void spi_smartbond_tx_dma_cb(const struct device *dma, void *arg, + uint32_t id, int status) +{ + const struct device *dev = arg; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (status < 0) { + LOG_WRN("DMA transfer did not complete"); + } + + spi_context_update_tx(ctx, data->dfs, data->tx_len); + k_sem_give(&data->tx_dma_sync); +} + +static void spi_smartbond_rx_dma_cb(const struct device *dma, void *arg, + uint32_t id, int status) +{ + const struct device *dev = arg; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (status < 0) { + LOG_WRN("DMA transfer did not complete"); + } + + spi_context_update_rx(ctx, data->dfs, data->rx_len); + k_sem_give(&data->rx_dma_sync); +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_deconfig(const struct device *dev) +{ + const struct spi_smartbond_cfg *config = dev->config; + + dma_stop(config->rx_dma_ctrl, config->rx_dma_chan); + dma_stop(config->tx_dma_ctrl, config->tx_dma_chan); + + spi_smartbond_dma_rx_channel_release(dev); + spi_smartbond_dma_tx_channel_release(dev); +} +#endif + +static int spi_smartbond_dma_config(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + struct dma_config *tx = &data->tx_dma_cfg; + struct dma_config *rx = &data->rx_dma_cfg; + struct dma_block_config *tx_block = &data->tx_dma_block_cfg; + struct dma_block_config *rx_block = &data->rx_dma_block_cfg; + + /* + * DMA RX should be assigned an even number and + * DMA TX should be assigned the right next + * channel (odd number). + */ + if (!(config->tx_dma_chan & 0x1) || + (config->rx_dma_chan & 0x1) || + (config->tx_dma_chan != (config->rx_dma_chan + 1))) { + LOG_ERR("Invalid RX/TX channel selection"); + return -EINVAL; + } + + if (config->tx_slot_mux != config->rx_slot_mux) { + LOG_ERR("TX/RX DMA slots mismatch"); + return -EINVAL; + } + + if (!device_is_ready(config->tx_dma_ctrl) || + !device_is_ready(config->rx_dma_ctrl)) { + LOG_ERR("TX/RX DMA device is not ready"); + return -ENODEV; + } + + if (spi_smartbond_dma_tx_channel_request(dev) < 0) { + LOG_ERR("TX DMA channel is already occupied"); + return -EIO; + } + + if (spi_smartbond_dma_rx_channel_request(dev) < 0) { + LOG_ERR("RX DMA channel is already occupied"); + return -EIO; + } + + tx->channel_direction = MEMORY_TO_PERIPHERAL; + tx->dma_callback = spi_smartbond_tx_dma_cb; + tx->user_data = (void *)dev; + tx->block_count = 1; + tx->head_block = &data->tx_dma_block_cfg; + tx->error_callback_dis = 1; + tx->dma_slot = config->tx_slot_mux; + tx->channel_priority = 2; + + /* Burst mode is not using when DREQ is one */ + tx->source_burst_length = 1; + tx->dest_burst_length = 1; + /* Source and destination data size should reflect DFS value */ + tx->source_data_size = 0; + tx->dest_data_size = 0; + + /* Do not change */ + tx_block->dest_addr_adj = 0x2; + /* Incremental */ + tx_block->source_addr_adj = 0x0; + tx_block->dest_address = (uint32_t)&config->regs->SPI_RX_TX_REG; + + /* + * To be filled when a transaction is requested and + * should reflect the total number of bytes. + */ + tx_block->block_size = 0; + /* Should reflect the TX buffer */ + tx_block->source_address = 0; + + rx->channel_direction = PERIPHERAL_TO_MEMORY; + rx->dma_callback = spi_smartbond_rx_dma_cb; + rx->user_data = (void *)dev; + rx->block_count = 1; + rx->head_block = &data->rx_dma_block_cfg; + rx->error_callback_dis = 1; + rx->dma_slot = config->rx_slot_mux; + rx->channel_priority = 2; + + /* Burst mode is not using when DREQ is one */ + rx->source_burst_length = 1; + rx->dest_burst_length = 1; + /* Source and destination data size should reflect DFS value */ + rx->source_data_size = 0; + rx->dest_data_size = 0; + + /* Do not change */ + rx_block->source_addr_adj = 0x2; + /* Incremenetal */ + rx_block->dest_addr_adj = 0x0; + rx_block->source_address = (uint32_t)&config->regs->SPI_RX_TX_REG; + + /* + * To be filled when a transaction is requested and + * should reflect the total number of bytes. + */ + rx_block->block_size = 0; + /* Should reflect the RX buffer */ + rx_block->dest_address = 0; + + return 0; +} + +static int spi_smartbond_dma_trigger(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + struct spi_context *ctx = &data->ctx; + struct dma_config *tx = &data->tx_dma_cfg; + struct dma_config *rx = &data->rx_dma_cfg; + struct dma_block_config *tx_block = &data->tx_dma_block_cfg; + struct dma_block_config *rx_block = &data->rx_dma_block_cfg; + + rx->source_data_size = data->dfs; + rx->dest_data_size = data->dfs; + tx->source_data_size = data->dfs; + tx->dest_data_size = data->dfs; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); + do { + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + spi_smartbond_flush_rx_fifo(dev); + + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + + rx_block->block_size = data->rx_len * data->dfs; + tx_block->block_size = rx_block->block_size; + + rx_block->dest_address = (uint32_t)ctx->rx_buf; + rx_block->dest_addr_adj = 0x0; + tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf; + /* Non-incremental */ + tx_block->source_addr_adj = 0x2; + + if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) { + LOG_ERR("RX DMA configuration failed"); + return -EINVAL; + } + dma_start(config->rx_dma_ctrl, config->rx_dma_chan); + dma_start(config->tx_dma_ctrl, config->tx_dma_chan); + + /* Wait for the current DMA transfer to complete */ + k_sem_take(&data->tx_dma_sync, K_FOREVER); + k_sem_take(&data->rx_dma_sync, K_FOREVER); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_flush_rx_fifo(dev); + + data->tx_len = spi_context_max_continuous_chunk(ctx); + data->rx_len = data->tx_len; + + tx_block->block_size = data->tx_len * data->dfs; + tx_block->source_address = (uint32_t)ctx->tx_buf; + tx_block->source_addr_adj = 0x0; + + if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + dma_start(config->tx_dma_ctrl, config->tx_dma_chan); + + /* Wait for the current DMA transfer to complete */ + k_sem_take(&data->tx_dma_sync, K_FOREVER); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + spi_smartbond_flush_rx_fifo(dev); + + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + /* + * DMA block size represents total number of bytes whilist, + * context length is divided by the data size (dfs). + */ + tx_block->block_size = data->tx_len * data->dfs; + rx_block->block_size = tx_block->block_size; + + if (ctx->tx_buf) { + tx_block->source_address = (uint32_t)ctx->tx_buf; + tx_block->source_addr_adj = 0x0; + } else { + tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf; + tx_block->source_addr_adj = 0x2; + } + + if (ctx->rx_buf) { + rx_block->dest_address = (uint32_t)ctx->rx_buf; + rx_block->dest_addr_adj = 0x0; + } else { + rx_block->dest_address = (uint32_t)&spi_smartbond_read_dummy_buf; + rx_block->dest_addr_adj = 0x2; + } + + if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) { + LOG_ERR("RX DMA configuration failed"); + return -EINVAL; + } + dma_start(config->rx_dma_ctrl, config->rx_dma_chan); + dma_start(config->tx_dma_ctrl, config->tx_dma_chan); + + k_sem_take(&data->tx_dma_sync, K_FOREVER); + k_sem_take(&data->rx_dma_sync, K_FOREVER); + + /* + * Regardless of whether or not the RX FIFO is enabled, received + * bytes are pushed into it. As such, the RXI FIFO should be + * flushed so that a next read access retrives the correct bytes + * and not old ones. + */ + if (!ctx->rx_buf) { + spi_smartbond_flush_rx_fifo(dev); + } + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + spi_smartbond_transfer_mode_check_and_update(dev); + } while (data->transfer_mode != SPI_SMARTBOND_TRANSFER_NONE); + + return 0; +} +#endif + static int spi_smartbond_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) @@ -215,54 +1048,57 @@ static int spi_smartbond_transceive(const struct device *dev, const struct spi_c const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; - uint32_t bitmask; int rc; spi_smartbond_pm_policy_state_lock_get(data); spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg); + rc = spi_smartbond_configure(cfg, data, spi_cfg); if (rc == 0) { spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(ctx, true); - bitmask = ~((~0UL) << SPI_WORD_SIZE_GET(data->ctx.config->operation)); +#ifdef CONFIG_SPI_SMARTBOND_DMA + rc = spi_smartbond_dma_trigger(dev); + /* Mark completion to trigger callback function */ + spi_context_complete(ctx, dev, 0); +#else while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) { if (spi_context_tx_buf_on(ctx)) { - cfg->regs->SPI_RX_TX_REG = (*(uint32_t *)ctx->tx_buf) & bitmask; + spi_smartbond_write_word(dev); spi_context_update_tx(ctx, data->dfs, 1); } else { - cfg->regs->SPI_RX_TX_REG = 0UL; + spi_smartbond_write_dummy(dev); } while (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk)) { }; if (spi_context_rx_buf_on(ctx)) { - (*(uint32_t *)ctx->rx_buf) = cfg->regs->SPI_RX_TX_REG & bitmask; + spi_smartbond_read_word(dev); spi_context_update_rx(ctx, data->dfs, 1); } else { - (void)cfg->regs->SPI_RX_TX_REG; + spi_smartbond_read_discard(dev); + /* + * It might happen that a NULL buffer with a non-zero length + * is provided. In that case, the bytes should be consumed. + */ + if (ctx->rx_len) { + spi_context_update_rx(ctx, data->dfs, 1); + } } cfg->regs->SPI_CLEAR_INT_REG = 1UL; } +#endif + + spi_context_cs_control(ctx, false); } - spi_context_cs_control(ctx, false); spi_context_release(&data->ctx, rc); spi_smartbond_pm_policy_state_lock_put(data); return rc; } -#ifdef CONFIG_SPI_ASYNC -static int spi_smartbond_transceive_async(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs, spi_callback_t cb, - void *userdata) -{ - return -ENOTSUP; -} -#endif static int spi_smartbond_release(const struct device *dev, const struct spi_config *spi_cfg) { @@ -308,6 +1144,14 @@ static int spi_smartbond_resume(const struct device *dev) return rc; } +#ifdef CONFIG_SPI_SMARTBOND_DMA + rc = spi_smartbond_dma_config(dev); + if (rc < 0) { + LOG_ERR("Failed to configure DMA"); + return rc; + } +#endif + spi_context_unlock_unconditionally(&data->ctx); return 0; @@ -331,6 +1175,10 @@ static int spi_smartbond_suspend(const struct device *dev) LOG_WRN("Failed to configure the SPI pins to inactive state"); } +#ifdef CONFIG_SPI_SMARTBOND_DMA + spi_smartbond_dma_deconfig(dev); +#endif + return ret; } @@ -356,11 +1204,51 @@ static int spi_smartbond_pm_action(const struct device *dev, } #endif +#define SPI_SMARTBOND_ISR_CONNECT \ + IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi)), DT_IRQ(DT_NODELABEL(spi), priority), \ + spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi)), 0); \ + irq_enable(DT_IRQN(DT_NODELABEL(spi))); + +#define SPI2_SMARTBOND_ISR_CONNECT \ + IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi2)), DT_IRQ(DT_NODELABEL(spi2), priority), \ + spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi2)), 0); \ + irq_enable(DT_IRQN(DT_NODELABEL(spi2))); + +#if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) +static int spi_smartbond_isr_connect(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + switch ((uint32_t)cfg->regs) { + case (uint32_t)SPI: + COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(spi), okay), + (SPI_SMARTBOND_ISR_CONNECT), (NULL)); + break; + case (uint32_t)SPI2: + COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(spi2), okay), + (SPI2_SMARTBOND_ISR_CONNECT), (NULL)); + break; + default: + return -EINVAL; + } + + return 0; +} +#endif + static int spi_smartbond_init(const struct device *dev) { int ret; struct spi_smartbond_data *data = dev->data; +#if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) + data->transfer_mode = SPI_SMARTBOND_TRANSFER_NONE; +#endif +#ifdef CONFIG_SPI_SMARTBOND_DMA + k_sem_init(&data->tx_dma_sync, 0, 1); + k_sem_init(&data->rx_dma_sync, 0, 1); +#endif + #ifdef CONFIG_PM_DEVICE_RUNTIME /* Make sure device state is marked as suspended */ pm_device_init_suspended(dev); @@ -373,15 +1261,61 @@ static int spi_smartbond_init(const struct device *dev) #endif spi_context_unlock_unconditionally(&data->ctx); +#if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) + ret = spi_smartbond_isr_connect(dev); +#endif + return ret; } +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_TX_INIT(id) \ + .tx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, tx, channel), \ + .tx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, tx, config), \ + .tx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), +#else +#define SPI_SMARTBOND_DMA_TX_INIT(id) +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_RX_INIT(id) \ + .rx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, rx, channel), \ + .rx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, rx, config), \ + .rx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), +#else +#define SPI_SMARTBOND_DMA_RX_INIT(id) +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) \ + .tx_dma_chan = 255, \ + .tx_slot_mux = 255, \ + .tx_dma_ctrl = NULL, +#else +#define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) \ + .rx_dma_chan = 255, \ + .rx_slot_mux = 255, \ + .rx_dma_ctrl = NULL, +#else +#define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) +#endif + #define SPI_SMARTBOND_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static const struct spi_smartbond_cfg spi_smartbond_##id##_cfg = { \ .regs = (SPI_Type *)DT_INST_REG_ADDR(id), \ .periph_clock_config = DT_INST_PROP(id, periph_clock_config), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ + COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, tx), \ + (SPI_SMARTBOND_DMA_TX_INIT(id)), \ + (SPI_SMARTBOND_DMA_TX_INVALIDATE(id))) \ + COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, rx), \ + (SPI_SMARTBOND_DMA_RX_INIT(id)), \ + (SPI_SMARTBOND_DMA_RX_INVALIDATE(id))) \ }; \ static struct spi_smartbond_data spi_smartbond_##id##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_smartbond_##id##_data, ctx), \