diff --git a/drivers/spi/Kconfig.smartbond b/drivers/spi/Kconfig.smartbond index 9c003eb915ddcf2..41860ed0ab8b05f 100644 --- a/drivers/spi/Kconfig.smartbond +++ b/drivers/spi/Kconfig.smartbond @@ -7,3 +7,14 @@ config SPI_SMARTBOND depends on DT_HAS_RENESAS_SMARTBOND_SPI_ENABLED help Enables SPI driver for Renesas SmartBond(tm) DA1469x series MCU. + +config SPI_SMARTBOND_DMA + bool "Renesas Smartbond(tm) SPI with DMA acceleration" + default y + depends on SPI_ASYNC + depends on SPI_SMARTBOND + select DMA + help + Enables using the DMA engine instead of interrupt-driven + approach. This acceleration is available only for + asynchronous transfers. diff --git a/drivers/spi/spi_smartbond.c b/drivers/spi/spi_smartbond.c index 730a3b37a9db44a..6f2e395716f367d 100644 --- a/drivers/spi/spi_smartbond.c +++ b/drivers/spi/spi_smartbond.c @@ -18,6 +18,9 @@ LOG_MODULE_REGISTER(spi_smartbond); #include #include #include +#include +#include +#include #include #include @@ -32,15 +35,62 @@ struct spi_smartbond_cfg { SPI_Type *regs; int periph_clock_config; const struct pinctrl_dev_config *pcfg; +#ifdef CONFIG_SPI_SMARTBOND_DMA + int tx_dma_chan; + int rx_dma_chan; +#endif +}; + +enum spi_smartbond_transfer { + SPI_SMARTBOND_TRANSFER_TX_ONLY, + SPI_SMARTBOND_TRANSFER_RX_ONLY, + SPI_SMARTBOND_TRANSFER_TX_RX, + SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED, + SPI_SMARTBOND_TRANSFER_NONE +}; + +enum spi_smartbond_dma_channel { + SPI_SMARTBOND_DMA_TX_CHANNEL, + SPI_SMARTBOND_DMA_RX_CHANNEL }; struct spi_smartbond_data { struct spi_context ctx; uint8_t dfs; + #if defined(CONFIG_PM_DEVICE) ATOMIC_DEFINE(pm_policy_state_flag, 1); uint32_t spi_ctrl_reg; #endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA + const struct device *dma; + struct dma_config tx_dma_cfg; + struct dma_config rx_dma_cfg; + struct dma_block_config tx_dma_block_cfg; + struct dma_block_config rx_dma_block_cfg; + struct k_sem rx_dma_sync; + struct k_sem tx_dma_sync; + + ATOMIC_DEFINE(dma_channel_atomic_flag, 2); + +#endif + +#ifdef CONFIG_SPI_ASYNC + size_t rx_len; + size_t tx_len; + enum spi_smartbond_transfer transfer_mode; +#endif +}; + +enum spi_smartbond_fifo_mode { + /* Bi-directional mode */ + SPI_SMARTBOND_FIFO_MODE_TX_RX, + /* TX FIFO single depth, no flow control */ + SPI_SMARTBOND_FIFO_MODE_RX_ONLY, + /* RX FIFO single depth, no flow control */ + SPI_SMARTBOND_FIFO_MODE_TX_ONLY, + SPI_SMARTBOND_FIFO_NONE }; static inline void spi_smartbond_enable(const struct spi_smartbond_cfg *cfg, bool enable) @@ -60,6 +110,377 @@ static inline bool spi_smartbond_isenabled(const struct spi_smartbond_cfg *cfg) (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_RST_Msk)); } +static inline void spi_smartbond_write_word(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + + /* + * No need to typecast the register address as the controller will automatically + * generate the necessary clock cycles based on the data size. + */ + switch (data->dfs) { + case 1: + cfg->regs->SPI_RX_TX_REG = *(uint8_t *)data->ctx.tx_buf; + break; + case 2: + cfg->regs->SPI_RX_TX_REG = sys_get_le16(data->ctx.tx_buf); + break; + case 4: + cfg->regs->SPI_RX_TX_REG = sys_get_le32(data->ctx.tx_buf); + break; + } +} + +static inline void spi_smartbond_write_dummy(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + cfg->regs->SPI_RX_TX_REG = 0x0; +} + +static inline void spi_smartbond_read_word(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + + switch (data->dfs) { + case 1: + *(uint8_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG; + break; + case 2: + __ASSERT(((uint32_t)data->ctx.rx_buf & 0x1) == 0, "Unaligned RX BUF"); + *(uint16_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG; + break; + case 4: + __ASSERT(((uint32_t)data->ctx.rx_buf & 0x3) == 0, "Unaligned RX BUF"); + *(uint32_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG; + break; + } +} + +static inline void spi_smartbond_read_discard(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + (void)cfg->regs->SPI_RX_TX_REG; +} + +#ifdef CONFIG_SPI_ASYNC +static inline bool spi_smartbond_is_tx_full(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_TXH_Msk); +} + +static inline bool spi_smartbond_is_busy(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_BUSY_Msk); +} + +static inline uint8_t spi_smartbond_get_fifo_mode(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return ((cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) >> + SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos); +} + +static void spi_smartbond_set_fifo_mode(const struct device *dev, enum spi_smartbond_fifo_mode mode) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + bool is_enabled = spi_smartbond_isenabled(cfg); + enum spi_smartbond_fifo_mode current_mode = spi_smartbond_get_fifo_mode(dev); + + if (current_mode != mode) { + if (current_mode != SPI_SMARTBOND_FIFO_MODE_RX_ONLY) { + while (spi_smartbond_is_busy(dev)) { + ; + } + } + /* Controller should be disabled when FIFO mode is updated */ + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk; + + /* + * Workaround for the controller that cannot generate DMA requests + * for 4-byte bus length. + */ + if (data->dfs == 4) { + mode = SPI_SMARTBOND_FIFO_NONE; + } + + cfg->regs->SPI_CTRL_REG = + ((cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) | + ((mode << SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos) & + SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk)); + + + if (mode != SPI_SMARTBOND_FIFO_NONE) { + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_DMA_TXREQ_MODE_Msk; + } else { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_DMA_TXREQ_MODE_Msk; + } + + if (is_enabled) { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_ON_Msk; + } + } +} + +static inline void spi_smartbond_clear_interrupt(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + cfg->regs->SPI_CLEAR_INT_REG = 0x1; +} + +static inline void spi_smartbond_isr_set_status(const struct device *dev, bool status) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + if (status) { + cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_MINT_Msk; + } else { + cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk; + } +} + +/* 0 = No RX data available, 1 = data has been transmitted and received */ +static inline bool spi_smartbond_is_rx_data(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk); +} +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +static int spi_smartbond_dma_tx_channel_request(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL)) { + if (dma_request_channel(data->dma, (void *)&config->tx_dma_chan) < 0) { + atomic_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL); + return -EIO; + } + } + + return 0; +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_tx_channel_release(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_TX_CHANNEL)) { + dma_release_channel(data->dma, config->tx_dma_chan); + } +} +#endif + +static int spi_smartbond_dma_rx_channel_request(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL)) { + if (dma_request_channel(data->dma, (void *)&config->rx_dma_chan) < 0) { + atomic_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL); + return -EIO; + } + } + + return 0; +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_rx_channel_release(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, + SPI_SMARTBOND_DMA_RX_CHANNEL)) { + dma_release_channel(data->dma, config->rx_dma_chan); + } +} +#endif + +static void spi_smartbond_tx_dma_cb(const struct device *dma, void *arg, + uint32_t id, int status) +{ + const struct device *dev = arg; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (status < 0) { + LOG_WRN("DMA transfer did not complete"); + } + + spi_context_update_tx(ctx, data->dfs, data->tx_len); + k_sem_give(&data->tx_dma_sync); +} + +static void spi_smartbond_rx_dma_cb(const struct device *dma, void *arg, + uint32_t id, int status) +{ + const struct device *dev = arg; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (status < 0) { + LOG_WRN("DMA transfer did not complete"); + } + + spi_context_update_rx(ctx, data->dfs, data->rx_len); + k_sem_give(&data->rx_dma_sync); +} + +static int spi_smartbond_dma_trig_mux_get(const struct device *dev) +{ + const struct spi_smartbond_cfg *config = dev->config; + + switch ((uint32_t)config->regs) { + case (uint32_t)SPI: + return DMA_SMARTBOND_TRIG_MUX_SPI; + case (uint32_t)SPI2: + return DMA_SMARTBOND_TRIG_MUX_SPI2; + default: + __ASSERT_MSG_INFO("Invalid SPI Instance"); + return DMA_SMARTBOND_TRIG_MUX_NONE; + } +} + +#ifdef CONFIG_PM_DEVICE +static void spi_smartbond_dma_deconfig(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + + dma_stop(data->dma, config->rx_dma_chan); + dma_stop(data->dma, config->tx_dma_chan); + + spi_smartbond_dma_rx_channel_release(dev); + spi_smartbond_dma_tx_channel_release(dev); +} +#endif + +static int spi_smartbond_dma_config(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + struct dma_config *tx = &data->tx_dma_cfg; + struct dma_config *rx = &data->rx_dma_cfg; + struct dma_block_config *tx_block = &data->tx_dma_block_cfg; + struct dma_block_config *rx_block = &data->rx_dma_block_cfg; + + if (!(config->tx_dma_chan & 0x1)) { + LOG_ERR("TX DMA channel should be assigned an odd value"); + return -EINVAL; + } + + if ((config->rx_dma_chan & 0x1)) { + LOG_ERR("RX DMA channel should be assigned an even number"); + return -EINVAL; + } + + data->dma = DEVICE_DT_GET(DT_NODELABEL(dma)); + if (!device_is_ready(data->dma)) { + LOG_ERR("DMA device is not ready"); + return -ENODEV; + } + + if (spi_smartbond_dma_tx_channel_request(dev) < 0) { + LOG_ERR("TX DMA channel is already occupied"); + return -EIO; + } + + if (spi_smartbond_dma_rx_channel_request(dev) < 0) { + LOG_ERR("RX DMA channel is already occupied"); + return -EIO; + } + + tx->channel_direction = MEMORY_TO_PERIPHERAL; + tx->dma_callback = spi_smartbond_tx_dma_cb; + tx->user_data = (void *)dev; + tx->block_count = 1; + tx->head_block = &data->tx_dma_block_cfg; + tx->error_callback_dis = 1; + tx->dma_slot = spi_smartbond_dma_trig_mux_get(dev); + tx->channel_priority = 2; + + /* Burst mode is not using when DREQ is one */ + tx->source_burst_length = 1; + tx->dest_burst_length = 1; + /* Source and destination data size should reflect DFS value */ + tx->source_data_size = 0; + tx->dest_data_size = 0; + + /* Do not change */ + tx_block->dest_addr_adj = 0x2; + /* Incremental */ + tx_block->source_addr_adj = 0x0; + tx_block->dest_address = (uint32_t)&config->regs->SPI_RX_TX_REG; + + /* + * To be filled when a transaction is requested and + * should reflect the total number of bytes. + */ + tx_block->block_size = 0; + /* Should reflect the TX buffer */ + tx_block->source_address = 0; + + rx->channel_direction = PERIPHERAL_TO_MEMORY; + rx->dma_callback = spi_smartbond_rx_dma_cb; + rx->user_data = (void *)dev; + rx->block_count = 1; + rx->head_block = &data->rx_dma_block_cfg; + rx->error_callback_dis = 1; + rx->dma_slot = spi_smartbond_dma_trig_mux_get(dev); + if (rx->dma_slot == DMA_SMARTBOND_TRIG_MUX_NONE) { + LOG_ERR("Invalid RX DMA trigger mux"); + return -EIO; + } + rx->channel_priority = 2; + + /* Burst mode is not using when DREQ is one */ + rx->source_burst_length = 1; + rx->dest_burst_length = 1; + /* Source and destination data size should reflect DFS value */ + rx->source_data_size = 0; + rx->dest_data_size = 0; + + /* Do not change */ + rx_block->source_addr_adj = 0x2; + /* Incremenetal */ + rx_block->dest_addr_adj = 0x0; + rx_block->source_address = (uint32_t)&config->regs->SPI_RX_TX_REG; + + /* + * To be filled when a transaction is requested and + * should reflect the total number of bytes. + */ + rx_block->block_size = 0; + /* Should reflect the RX buffer */ + rx_block->dest_address = 0; + + return 0; +} +#endif + static inline int spi_smartbond_set_speed(const struct spi_smartbond_cfg *cfg, const uint32_t frequency) { @@ -168,11 +589,6 @@ static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg, return -ENOTSUP; } - if (spi_cfg->operation & SPI_MODE_LOOP) { - LOG_ERR("Loopback mode is not supported"); - return -ENOTSUP; - } - if (spi_smartbond_isenabled(cfg)) { spi_smartbond_enable(cfg, false); } @@ -208,6 +624,348 @@ static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg, return 0; } +#ifdef CONFIG_SPI_ASYNC +static void spi_smartbond_write(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_tx_buf_on(ctx)) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + spi_smartbond_clear_interrupt(dev); + break; + } + /* Send to TX FIFO and update buffer pointer. */ + spi_smartbond_write_word(dev); + spi_context_update_tx(ctx, data->dfs, 1); + } +} + +static void spi_smartbond_read(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_rx_buf_on(ctx)) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + + spi_smartbond_read_word(dev); + spi_context_update_rx(ctx, data->dfs, 1); + spi_smartbond_clear_interrupt(dev); + } + + /* Perform dummy access to generate the required clock cycles */ + while (data->rx_len) { + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_dummy(dev); + data->rx_len--; + } +} + +static void spi_smartbond_transfer(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (spi_context_rx_buf_on(ctx)) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + spi_smartbond_read_word(dev); + spi_smartbond_clear_interrupt(dev); + + /* Update context */ + spi_context_update_rx(ctx, data->dfs, 1); + } + + while (spi_context_tx_buf_on(ctx)) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_word(dev); + + spi_context_update_tx(ctx, data->dfs, 1); + } +} + +#ifndef CONFIG_SPI_SMARTBOND_DMA +static void spi_smartbond_transfer_truncated(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + while (data->rx_len) { + /* Zero means that RX FIFO or register is empty */ + if (!spi_smartbond_is_rx_data(dev)) { + break; + } + spi_smartbond_read_word(dev); + spi_smartbond_clear_interrupt(dev); + + /* Update context */ + spi_context_update_rx(ctx, data->dfs, 1); + data->rx_len--; + } + + while (data->rx_len) { + /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ + if (spi_smartbond_is_tx_full(dev)) { + break; + } + spi_smartbond_write_word(dev); + + spi_context_update_tx(ctx, data->dfs, 1); + data->tx_len--; + } +} +#endif + +static int spi_smartbond_transfer_mode_get(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + if (spi_context_rx_buf_on(ctx) || spi_context_tx_buf_on(ctx)) { + if (!spi_context_rx_buf_on(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_ONLY); + return SPI_SMARTBOND_TRANSFER_TX_ONLY; + } + + if (!spi_context_tx_buf_on(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_RX_ONLY); + return SPI_SMARTBOND_TRANSFER_RX_ONLY; + } + + if (spi_context_total_rx_len(ctx) == spi_context_total_tx_len(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_TX_RX; + } else if (spi_context_total_rx_len(ctx) != spi_context_total_tx_len(ctx)) { + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); + return SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED; + } + } + + spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_NONE); + return SPI_SMARTBOND_TRANSFER_NONE; +} + +static inline void spi_smartbond_transfer_mode_check_and_update(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + enum spi_smartbond_transfer transfer_mode_new = spi_smartbond_transfer_mode_get(dev); + + if (data->transfer_mode != transfer_mode_new) { + data->transfer_mode = transfer_mode_new; + } +} + +#ifndef CONFIG_SPI_SMARTBOND_DMA +static void spi_smartbond_isr(void *args) +{ + struct device *dev = args; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + data->rx_len = spi_context_total_rx_len(ctx); + spi_smartbond_read(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_write(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + spi_smartbond_transfer(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED: + spi_smartbond_transfer_truncated(dev); + /* Truncated transfer should be done and so the transfer mode should be updated */ + if (!data->rx_len && !data->tx_len) { + spi_smartbond_transfer_mode_check_and_update(dev); + } + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + /* All buffers have been exercised, signal completion */ + if (!spi_context_tx_buf_on(ctx) && !spi_context_rx_buf_on(ctx)) { + /* Unblock waiting task */ + spi_context_complete(ctx, dev, 0); + spi_smartbond_isr_set_status(dev, false); + spi_smartbond_pm_policy_state_lock_put(data); + } +} + +static void spi_smartbond_isr_trigger(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); + + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + data->rx_len = spi_context_total_rx_len(ctx); + spi_smartbond_read(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + spi_smartbond_write(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + spi_smartbond_transfer(dev); + break; + case SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED: + size_t min_len = MIN(spi_context_total_rx_len(ctx), spi_context_total_tx_len(ctx)); + + data->rx_len = min_len; + data->tx_len = min_len; + spi_smartbond_transfer_truncated(dev); + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + spi_smartbond_isr_set_status(dev, true); +} +#endif +#endif + +#ifdef CONFIG_SPI_SMARTBOND_DMA +static uint32_t spi_smartbond_read_dummy_buf; + +static int spi_smartbond_dma_trigger(const struct device *dev) +{ + struct spi_smartbond_data *data = dev->data; + const struct spi_smartbond_cfg *config = dev->config; + struct spi_context *ctx = &data->ctx; + struct dma_config *tx = &data->tx_dma_cfg; + struct dma_config *rx = &data->rx_dma_cfg; + struct dma_block_config *tx_block = &data->tx_dma_block_cfg; + struct dma_block_config *rx_block = &data->rx_dma_block_cfg; + + rx->source_data_size = data->dfs; + rx->dest_data_size = data->dfs; + tx->source_data_size = data->dfs; + tx->dest_data_size = data->dfs; + + data->transfer_mode = spi_smartbond_transfer_mode_get(dev); + do { + switch (data->transfer_mode) { + case SPI_SMARTBOND_TRANSFER_RX_ONLY: + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + + if (data->rx_len == 1) { + while (spi_context_rx_buf_on(ctx)) { + spi_smartbond_read(dev); + } + } else { + rx_block->block_size = data->rx_len * data->dfs; + rx_block->dest_address = (uint32_t)ctx->rx_buf; + tx_block->block_size = rx_block->block_size; + tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf; + /* Do not increment */ + tx_block->source_addr_adj = 0x2; + + if (dma_config(data->dma, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + if (dma_config(data->dma, config->rx_dma_chan, rx) < 0) { + LOG_ERR("RX DMA configuration failed"); + return -EINVAL; + } + dma_start(data->dma, config->rx_dma_chan); + dma_start(data->dma, config->tx_dma_chan); + + /* Wait for the current DMA transfer to complete */ + k_sem_take(&data->tx_dma_sync, K_FOREVER); + k_sem_take(&data->rx_dma_sync, K_FOREVER); + } + break; + case SPI_SMARTBOND_TRANSFER_TX_ONLY: + data->tx_len = spi_context_max_continuous_chunk(ctx); + + if (data->tx_len == 1) { + while (spi_context_tx_buf_on(ctx)) { + spi_smartbond_write(dev); + } + } else { + tx_block->block_size = data->tx_len * data->dfs; + tx_block->source_address = (uint32_t)ctx->tx_buf; + tx_block->source_addr_adj = 0x0; + + if (dma_config(data->dma, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + dma_start(data->dma, config->tx_dma_chan); + + /* Wait for the current DMA transfer to complete */ + k_sem_take(&data->tx_dma_sync, K_FOREVER); + } + break; + case SPI_SMARTBOND_TRANSFER_TX_RX: + case SPI_SMARTBOND_TRANSFER_TX_RX_TRUNCATED: + data->rx_len = spi_context_max_continuous_chunk(ctx); + data->tx_len = data->rx_len; + + if (data->rx_len == 1) { + while (spi_context_rx_on(ctx) || spi_context_tx_on(ctx)) { + spi_smartbond_transfer(dev); + } + } else { + tx_block->block_size = data->tx_len * data->dfs; + tx_block->source_address = (uint32_t)ctx->tx_buf; + /* Incremental */ + tx_block->source_addr_adj = 0x0; + rx_block->block_size = tx_block->block_size; + rx_block->dest_address = (uint32_t)ctx->rx_buf; + + if (dma_config(data->dma, config->tx_dma_chan, tx) < 0) { + LOG_ERR("TX DMA configuration failed"); + return -EINVAL; + } + if (dma_config(data->dma, config->rx_dma_chan, rx) < 0) { + LOG_ERR("RX DMA configuration failed"); + return -EINVAL; + } + dma_start(data->dma, config->rx_dma_chan); + dma_start(data->dma, config->tx_dma_chan); + + k_sem_take(&data->tx_dma_sync, K_FOREVER); + k_sem_take(&data->rx_dma_sync, K_FOREVER); + } + break; + case SPI_SMARTBOND_TRANSFER_NONE: + __fallthrough; + default: + __ASSERT_MSG_INFO("Invalid transfer mode"); + break; + } + + spi_smartbond_transfer_mode_check_and_update(dev); + } while (data->transfer_mode != SPI_SMARTBOND_TRANSFER_NONE); + + return 0; +} +#endif + static int spi_smartbond_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) @@ -215,33 +973,32 @@ static int spi_smartbond_transceive(const struct device *dev, const struct spi_c const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; - uint32_t bitmask; int rc; spi_smartbond_pm_policy_state_lock_get(data); spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg); + rc = spi_smartbond_configure(cfg, data, spi_cfg); if (rc == 0) { spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(ctx, true); - bitmask = ~((~0UL) << SPI_WORD_SIZE_GET(data->ctx.config->operation)); while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) { if (spi_context_tx_buf_on(ctx)) { - cfg->regs->SPI_RX_TX_REG = (*(uint32_t *)ctx->tx_buf) & bitmask; + spi_smartbond_write_word(dev); spi_context_update_tx(ctx, data->dfs, 1); } else { - cfg->regs->SPI_RX_TX_REG = 0UL; + spi_smartbond_write_dummy(dev); } while (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk)) { }; if (spi_context_rx_buf_on(ctx)) { - (*(uint32_t *)ctx->rx_buf) = cfg->regs->SPI_RX_TX_REG & bitmask; + spi_smartbond_read_word(dev); spi_context_update_rx(ctx, data->dfs, 1); } else { - (void)cfg->regs->SPI_RX_TX_REG; + spi_smartbond_read_discard(dev); } cfg->regs->SPI_CLEAR_INT_REG = 1UL; } @@ -253,6 +1010,7 @@ static int spi_smartbond_transceive(const struct device *dev, const struct spi_c return rc; } + #ifdef CONFIG_SPI_ASYNC static int spi_smartbond_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, @@ -260,7 +1018,35 @@ static int spi_smartbond_transceive_async(const struct device *dev, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { - return -ENOTSUP; + const struct spi_smartbond_cfg *cfg = dev->config; + struct spi_smartbond_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + int rc; + + spi_context_lock(ctx, true, cb, userdata, spi_cfg); + + rc = spi_smartbond_configure(cfg, data, spi_cfg); + if (rc == 0) { + spi_smartbond_pm_policy_state_lock_get(data); + spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); + spi_context_cs_control(ctx, true); + +#ifndef CONFIG_SPI_SMARTBOND_DMA + spi_smartbond_isr_trigger(dev); + rc = spi_context_wait_for_completion(ctx); +#else + rc = spi_smartbond_dma_trigger(dev); + /* Mark completion to trigger callback function */ + spi_context_complete(ctx, dev, 0); + + spi_smartbond_pm_policy_state_lock_put(data); +#endif + } + spi_context_cs_control(ctx, false); + + spi_context_release(ctx, rc); + + return rc; } #endif @@ -308,6 +1094,14 @@ static int spi_smartbond_resume(const struct device *dev) return rc; } +#ifdef CONFIG_SPI_SMARTBOND_DMA + rc = spi_smartbond_dma_config(dev); + if (rc < 0) { + LOG_ERR("Failed to configure DMA"); + return rc; + } +#endif + spi_context_unlock_unconditionally(&data->ctx); return 0; @@ -331,6 +1125,10 @@ static int spi_smartbond_suspend(const struct device *dev) LOG_WRN("Failed to configure the SPI pins to inactive state"); } +#ifdef CONFIG_SPI_SMARTBOND_DMA + spi_smartbond_dma_deconfig(dev); +#endif + return ret; } @@ -356,11 +1154,57 @@ static int spi_smartbond_pm_action(const struct device *dev, } #endif +#if defined(CONFIG_SPI_ASYNC) +#if DT_NODE_HAS_STATUS(DT_NODELABEL(spi), okay) && !defined(CONFIG_SPI_SMARTBOND_DMA) +#define SPI_SMARTBOND_ISR_CONNECT \ + IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi)), DT_IRQ(DT_NODELABEL(spi), priority), \ + spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi)), 0); \ + irq_enable(DT_IRQN(DT_NODELABEL(spi))); +#else +#define SPI_SMARTBOND_ISR_CONNECT +#endif + +#if DT_NODE_HAS_STATUS(DT_NODELABEL(spi2), okay) && !defined(CONFIG_SPI_SMARTBOND_DMA) +#define SPI2_SMARTBOND_ISR_CONNECT \ + IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi2)), DT_IRQ(DT_NODELABEL(spi2), priority), + spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi2)), 0); + irq_enable(DT_IRQN(DT_NODELABEL(spi2))); +#else +#define SPI2_SMARTBOND_ISR_CONNECT +#endif + +static int spi_smartbond_isr_connect(const struct device *dev) +{ + const struct spi_smartbond_cfg *cfg = dev->config; + + switch ((uint32_t)cfg->regs) { + case (uint32_t)SPI: + SPI_SMARTBOND_ISR_CONNECT + break; + case (uint32_t)SPI2: + SPI2_SMARTBOND_ISR_CONNECT + break; + default: + return -EINVAL; + } + + return 0; +} +#endif + static int spi_smartbond_init(const struct device *dev) { int ret; struct spi_smartbond_data *data = dev->data; +#ifdef CONFIG_SPI_ASYNC + data->transfer_mode = SPI_SMARTBOND_TRANSFER_NONE; +#ifdef CONFIG_SPI_SMARTBOND_DMA + k_sem_init(&data->tx_dma_sync, 0, 1); + k_sem_init(&data->rx_dma_sync, 0, 1); +#endif +#endif + #ifdef CONFIG_PM_DEVICE_RUNTIME /* Make sure device state is marked as suspended */ pm_device_init_suspended(dev); @@ -373,15 +1217,28 @@ static int spi_smartbond_init(const struct device *dev) #endif spi_context_unlock_unconditionally(&data->ctx); +#ifdef CONFIG_SPI_ASYNC + ret = spi_smartbond_isr_connect(dev); +#endif + return ret; } +#ifdef CONFIG_SPI_SMARTBOND_DMA +#define SPI_SMARTBOND_DMA_CHAN_INIT(id) \ + .tx_dma_chan = DT_INST_PROP_OR(id, tx_dma_channel, 0), \ + .rx_dma_chan = DT_INST_PROP_OR(id, rx_dma_channel, 0), +#else +#define SPI_SMARTBOND_DMA_CHAN_INIT(id) +#endif + #define SPI_SMARTBOND_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static const struct spi_smartbond_cfg spi_smartbond_##id##_cfg = { \ .regs = (SPI_Type *)DT_INST_REG_ADDR(id), \ .periph_clock_config = DT_INST_PROP(id, periph_clock_config), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ + SPI_SMARTBOND_DMA_CHAN_INIT(id) \ }; \ static struct spi_smartbond_data spi_smartbond_##id##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_smartbond_##id##_data, ctx), \ diff --git a/dts/bindings/spi/renesas,smartbond-spi.yaml b/dts/bindings/spi/renesas,smartbond-spi.yaml index bd2b29f1862e96d..42ff6f319afbe22 100644 --- a/dts/bindings/spi/renesas,smartbond-spi.yaml +++ b/dts/bindings/spi/renesas,smartbond-spi.yaml @@ -18,3 +18,17 @@ properties: type: int description: Peripheral clock register configuration (COM domain) required: true + + tx-dma-channel: + type: int + description: | + DMA channel to be reserved for SPI TX trasmissions. + It is imperative that a TX channel be assigned + an odd number that is [1, 3, 5, 7]. + + rx-dma-channel: + type: int + description: | + DMA channel to be reserved for SPI RX receptions. + It is imperative that a RX channel be assigned + an even number that is [0, 2, 4, 6].