Skip to content

Commit

Permalink
drivers: dma: dma_lpc: fix bug with transfer size/width
Browse files Browse the repository at this point in the history
Fix for bug:
#59802

The DMA controller only supports one transfer size, but
the Zephyr DMA driver api allows specifying a source_data_size
and dest_data_size which might be different. An old
version was always using dest_data_size for the transfer
size (variable is called "width"), but a recent change
made the driver use the MIN for the source and dest data
sizes. The MIN choice breaks the I2S driver because it
always set source_data_size to 1, but dest_data_size was
typically 4 for like two-channel 16-bit PCM data. So the
old driver worked using dest_data_size, but the new driver
broke I2S using MIN since source_data_size was 1.

To prevent confusion, change the DMA driver to assert that
source_data_size and dest_data_size are the same.

Also assert that the source_address and dest_address for
each block_config are properly aligned for the transfer size,
since that is a documentated requirement for the DMA controller.

Also rename max_xfer to max_xfer-bytes to be more clear what
the units are, and use this value in many places that
are comparing block_size in bytes rather than converting
block_size to words by dividing by width and
then comparing to NXP_LPC_DMA_MAX_XFER.

Signed-off-by: Mike J. Chen <[email protected]>
  • Loading branch information
mjchen0 committed Jul 17, 2023
1 parent 006554f commit fcf0b63
Showing 1 changed file with 36 additions and 20 deletions.
56 changes: 36 additions & 20 deletions drivers/dma/dma_mcux_lpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
uint32_t xfer_config = 0U;
dma_descriptor_t *next_descriptor = NULL;
uint32_t width = data->width;
uint32_t max_xfer = NXP_LPC_DMA_MAX_XFER * width;
uint32_t max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width;
bool setup_extra_descriptor = false;
uint8_t enable_interrupt;
uint8_t reload;
Expand All @@ -135,7 +135,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
return -ENOMEM;
}
/* Do we need to queue additional DMA descriptors for this block */
if ((local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) ||
if ((local_block.block_size > max_xfer_bytes) ||
(local_block.next_block != NULL)) {
/* Allocate DMA descriptors */
next_descriptor =
Expand Down Expand Up @@ -183,7 +183,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
}

/* Fire an interrupt after the whole block has been transferred */
if (local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) {
if (local_block.block_size > max_xfer_bytes) {
enable_interrupt = 0;
} else {
enable_interrupt = 1;
Expand All @@ -201,7 +201,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
width,
src_inc,
dest_inc,
MIN(local_block.block_size, max_xfer));
MIN(local_block.block_size, max_xfer_bytes));

DMA_SetupDescriptor(data->curr_descriptor,
xfer_config,
Expand All @@ -211,13 +211,13 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,

data->curr_descriptor = next_descriptor;

if (local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) {
local_block.block_size -= max_xfer;
if (local_block.block_size > max_xfer_bytes) {
local_block.block_size -= max_xfer_bytes;
if (src_inc) {
local_block.source_address += max_xfer;
local_block.source_address += max_xfer_bytes;
}
if (dest_inc) {
local_block.dest_address += max_xfer;
local_block.dest_address += max_xfer_bytes;
}
} else {
local_block.block_size = 0;
Expand All @@ -241,7 +241,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
width,
src_inc,
dest_inc,
MIN(local_block.block_size, max_xfer));
MIN(local_block.block_size, max_xfer_bytes));
/* Mark this as invalid */
xfer_config &= ~DMA_CHANNEL_XFERCFG_CFGVALID_MASK;
DMA_SetupDescriptor(data->curr_descriptor,
Expand Down Expand Up @@ -270,7 +270,7 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
uint8_t src_inc, dst_inc;
bool is_periph = true;
uint8_t width;
uint32_t max_xfer;
uint32_t max_xfer_bytes;
uint8_t reload = 0;

if (NULL == dev || NULL == config) {
Expand All @@ -280,8 +280,14 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
dev_config = dev->config;
dma_data = dev->data;
block_config = config->head_block;
width = MIN(config->source_data_size, config->dest_data_size);
max_xfer = NXP_LPC_DMA_MAX_XFER * width;
/* The DMA controller deals with just one transfer
* size, though the API provides separate sizes
* for source and dest. So assert that the source
* and dest sizes are the same.
*/
assert(config->dest_data_size == config->source_data_size);
width = config->dest_data_size;
max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width;

/*
* Check if circular mode is requested.
Expand Down Expand Up @@ -454,28 +460,28 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
k_spin_unlock(&configuring_otrigs, otrigs_key);

/* Check if we need to queue DMA descriptors */
if ((block_config->block_size / width > NXP_LPC_DMA_MAX_XFER) ||
if ((block_config->block_size > max_xfer_bytes) ||
(block_config->next_block != NULL)) {
/* Allocate a DMA descriptor */
data->curr_descriptor = data->dma_descriptor_table;

if (block_config->block_size / width > NXP_LPC_DMA_MAX_XFER) {
if (block_config->block_size > max_xfer_bytes) {
/* Disable interrupt as this is not the entire data.
* Reload for the descriptor
*/
xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, 0UL, 0UL,
width,
src_inc,
dst_inc,
MIN(block_config->block_size, max_xfer));
max_xfer_bytes);
} else {
/* Enable interrupt and reload for the descriptor
*/
xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, 1UL, 0UL,
width,
src_inc,
dst_inc,
MIN(block_config->block_size, max_xfer));
block_config->block_size);
}
} else {
/* Enable interrupt for the descriptor */
Expand All @@ -485,6 +491,9 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
dst_inc,
block_config->block_size);
}
/* DMA controller requires that the address be aligned to transfer size */
assert(block_config->source_address == ROUND_UP(block_config->source_address, width));
assert(block_config->dest_address == ROUND_UP(block_config->dest_address, width));

DMA_SubmitChannelTransferParameter(p_handle,
xfer_config,
Expand All @@ -494,24 +503,25 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,

/* Start queuing DMA descriptors */
if (data->curr_descriptor) {
if ((block_config->block_size / width > NXP_LPC_DMA_MAX_XFER)) {
if (block_config->block_size > max_xfer_bytes) {
/* Queue additional DMA descriptors because the amount of data to
* be transferred is greater that the DMA descriptors max XFERCOUNT.
*/
struct dma_block_config local_block = { 0 };

if (src_inc) {
local_block.source_address = block_config->source_address
+ max_xfer;
+ max_xfer_bytes;
} else {
local_block.source_address = block_config->source_address;
}
if (dst_inc) {
local_block.dest_address = block_config->dest_address + max_xfer;
local_block.dest_address = block_config->dest_address
+ max_xfer_bytes;
} else {
local_block.dest_address = block_config->dest_address;
}
local_block.block_size = block_config->block_size - max_xfer;
local_block.block_size = block_config->block_size - max_xfer_bytes;
local_block.next_block = block_config->next_block;
local_block.source_reload_en = reload;

Expand All @@ -525,6 +535,12 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
while (block_config != NULL) {
block_config->source_reload_en = reload;

/* DMA controller requires that the address be aligned to transfer size */
assert(block_config->source_address ==
ROUND_UP(block_config->source_address, width));
assert(block_config->dest_address ==
ROUND_UP(block_config->dest_address, width));

if (dma_mcux_lpc_queue_descriptors(data, block_config, src_inc, dst_inc)) {
return -ENOMEM;
}
Expand Down

0 comments on commit fcf0b63

Please sign in to comment.