From e175978218486f74b2e4bb0c9286e70e32eca5cb Mon Sep 17 00:00:00 2001 From: "Mike J. Chen" Date: Tue, 27 Jun 2023 18:48:14 -0700 Subject: [PATCH] drivers: dma: dma_lpc: fix bug with transfer size/width Fix for bug: https://github.com/zephyrproject-rtos/zephyr/issues/59802 The DMA controller only supports one transfer size, but the Zephyr DMA driver api allows specifying a source_data_size and dest_data_size which might be different. An old version was always using dest_data_size for the transfer size (variable is called "width"), but a recent change made the driver use the MIN for the source and dest data sizes. The MIN choice breaks the I2S driver because it always set source_data_size to 1, but dest_data_size was typically 4 for like two-channel 16-bit PCM data. So the old driver worked using dest_data_size, but the new driver broke I2S using MIN since source_data_size was 1. To prevent confusion, change the DMA driver to assert that source_data_size and dest_data_size are the same. Also assert that the source_address and dest_address for each block_config are properly aligned for the transfer size, since that is a documentated requirement for the DMA controller. Also rename max_xfer to max_xfer-bytes to be more clear what the units are, and use this value in many places that are comparing block_size in bytes rather than converting block_size to words by dividing by width and then comparing to NXP_LPC_DMA_MAX_XFER. Signed-off-by: Mike J. Chen --- drivers/dma/dma_mcux_lpc.c | 56 ++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/drivers/dma/dma_mcux_lpc.c b/drivers/dma/dma_mcux_lpc.c index 65716cc5262a7c1..6be791361cec892 100644 --- a/drivers/dma/dma_mcux_lpc.c +++ b/drivers/dma/dma_mcux_lpc.c @@ -115,7 +115,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, uint32_t xfer_config = 0U; dma_descriptor_t *next_descriptor = NULL; uint32_t width = data->width; - uint32_t max_xfer = NXP_LPC_DMA_MAX_XFER * width; + uint32_t max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width; bool setup_extra_descriptor = false; uint8_t enable_interrupt; uint8_t reload; @@ -135,7 +135,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, return -ENOMEM; } /* Do we need to queue additional DMA descriptors for this block */ - if ((local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) || + if ((local_block.block_size > max_xfer_bytes) || (local_block.next_block != NULL)) { /* Allocate DMA descriptors */ next_descriptor = @@ -183,7 +183,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, } /* Fire an interrupt after the whole block has been transferred */ - if (local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) { + if (local_block.block_size > max_xfer_bytes) { enable_interrupt = 0; } else { enable_interrupt = 1; @@ -201,7 +201,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, width, src_inc, dest_inc, - MIN(local_block.block_size, max_xfer)); + MIN(local_block.block_size, max_xfer_bytes)); DMA_SetupDescriptor(data->curr_descriptor, xfer_config, @@ -211,13 +211,13 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, data->curr_descriptor = next_descriptor; - if (local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) { - local_block.block_size -= max_xfer; + if (local_block.block_size > max_xfer_bytes) { + local_block.block_size -= max_xfer_bytes; if (src_inc) { - local_block.source_address += max_xfer; + local_block.source_address += max_xfer_bytes; } if (dest_inc) { - local_block.dest_address += max_xfer; + local_block.dest_address += max_xfer_bytes; } } else { local_block.block_size = 0; @@ -241,7 +241,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, width, src_inc, dest_inc, - MIN(local_block.block_size, max_xfer)); + MIN(local_block.block_size, max_xfer_bytes)); /* Mark this as invalid */ xfer_config &= ~DMA_CHANNEL_XFERCFG_CFGVALID_MASK; DMA_SetupDescriptor(data->curr_descriptor, @@ -271,7 +271,7 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, uint8_t src_inc, dst_inc; bool is_periph = true; uint8_t width; - uint32_t max_xfer; + uint32_t max_xfer_bytes; uint8_t reload = 0; if (NULL == dev || NULL == config) { @@ -281,8 +281,14 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, dev_config = dev->config; dma_data = dev->data; block_config = config->head_block; - width = MIN(config->source_data_size, config->dest_data_size); - max_xfer = NXP_LPC_DMA_MAX_XFER * width; + /* The DMA controller deals with just one transfer + * size, though the API provides separate sizes + * for source and dest. So assert that the source + * and dest sizes are the same. + */ + assert(config->dest_data_size == config->source_data_size); + width = config->dest_data_size; + max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width; /* * Check if circular mode is requested. @@ -461,12 +467,12 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, k_spin_unlock(&configuring_otrigs, otrigs_key); /* Check if we need to queue DMA descriptors */ - if ((block_config->block_size / width > NXP_LPC_DMA_MAX_XFER) || + if ((block_config->block_size > max_xfer_bytes) || (block_config->next_block != NULL)) { /* Allocate a DMA descriptor */ data->curr_descriptor = data->dma_descriptor_table; - if (block_config->block_size / width > NXP_LPC_DMA_MAX_XFER) { + if (block_config->block_size > max_xfer_bytes) { /* Disable interrupt as this is not the entire data. * Reload for the descriptor */ @@ -474,7 +480,7 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, width, src_inc, dst_inc, - MIN(block_config->block_size, max_xfer)); + max_xfer_bytes); } else { /* Enable interrupt and reload for the descriptor */ @@ -482,7 +488,7 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, width, src_inc, dst_inc, - MIN(block_config->block_size, max_xfer)); + block_config->block_size); } } else { /* Enable interrupt for the descriptor */ @@ -492,6 +498,9 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, dst_inc, block_config->block_size); } + /* DMA controller requires that the address be aligned to transfer size */ + assert(block_config->source_address == ROUND_UP(block_config->source_address, width)); + assert(block_config->dest_address == ROUND_UP(block_config->dest_address, width)); DMA_SubmitChannelTransferParameter(p_handle, xfer_config, @@ -501,7 +510,7 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, /* Start queuing DMA descriptors */ if (data->curr_descriptor) { - if ((block_config->block_size / width > NXP_LPC_DMA_MAX_XFER)) { + if (block_config->block_size > max_xfer_bytes) { /* Queue additional DMA descriptors because the amount of data to * be transferred is greater that the DMA descriptors max XFERCOUNT. */ @@ -509,16 +518,17 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, if (src_inc) { local_block.source_address = block_config->source_address - + max_xfer; + + max_xfer_bytes; } else { local_block.source_address = block_config->source_address; } if (dst_inc) { - local_block.dest_address = block_config->dest_address + max_xfer; + local_block.dest_address = block_config->dest_address + + max_xfer_bytes; } else { local_block.dest_address = block_config->dest_address; } - local_block.block_size = block_config->block_size - max_xfer; + local_block.block_size = block_config->block_size - max_xfer_bytes; local_block.next_block = block_config->next_block; local_block.source_reload_en = reload; @@ -532,6 +542,12 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, while (block_config != NULL) { block_config->source_reload_en = reload; + /* DMA controller requires that the address be aligned to transfer size */ + assert(block_config->source_address == + ROUND_UP(block_config->source_address, width)); + assert(block_config->dest_address == + ROUND_UP(block_config->dest_address, width)); + if (dma_mcux_lpc_queue_descriptors(data, block_config, src_inc, dst_inc)) { return -ENOMEM; }