Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix bugs in mcux i2s and dma #59832

Merged
merged 3 commits into from
Jul 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 36 additions & 20 deletions drivers/dma/dma_mcux_lpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
uint32_t xfer_config = 0U;
dma_descriptor_t *next_descriptor = NULL;
uint32_t width = data->width;
uint32_t max_xfer = NXP_LPC_DMA_MAX_XFER * width;
uint32_t max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width;
bool setup_extra_descriptor = false;
uint8_t enable_interrupt;
uint8_t reload;
Expand All @@ -135,7 +135,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
return -ENOMEM;
}
/* Do we need to queue additional DMA descriptors for this block */
if ((local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) ||
if ((local_block.block_size > max_xfer_bytes) ||
(local_block.next_block != NULL)) {
/* Allocate DMA descriptors */
next_descriptor =
Expand Down Expand Up @@ -183,7 +183,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
}

/* Fire an interrupt after the whole block has been transferred */
if (local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) {
if (local_block.block_size > max_xfer_bytes) {
enable_interrupt = 0;
} else {
enable_interrupt = 1;
Expand All @@ -201,7 +201,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
width,
src_inc,
dest_inc,
MIN(local_block.block_size, max_xfer));
MIN(local_block.block_size, max_xfer_bytes));

DMA_SetupDescriptor(data->curr_descriptor,
xfer_config,
Expand All @@ -211,13 +211,13 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,

data->curr_descriptor = next_descriptor;

if (local_block.block_size / width > NXP_LPC_DMA_MAX_XFER) {
local_block.block_size -= max_xfer;
if (local_block.block_size > max_xfer_bytes) {
local_block.block_size -= max_xfer_bytes;
if (src_inc) {
local_block.source_address += max_xfer;
local_block.source_address += max_xfer_bytes;
}
if (dest_inc) {
local_block.dest_address += max_xfer;
local_block.dest_address += max_xfer_bytes;
}
} else {
local_block.block_size = 0;
Expand All @@ -241,7 +241,7 @@ static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
width,
src_inc,
dest_inc,
MIN(local_block.block_size, max_xfer));
MIN(local_block.block_size, max_xfer_bytes));
/* Mark this as invalid */
xfer_config &= ~DMA_CHANNEL_XFERCFG_CFGVALID_MASK;
DMA_SetupDescriptor(data->curr_descriptor,
Expand Down Expand Up @@ -270,7 +270,7 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
uint8_t src_inc, dst_inc;
bool is_periph = true;
uint8_t width;
uint32_t max_xfer;
uint32_t max_xfer_bytes;
uint8_t reload = 0;

if (NULL == dev || NULL == config) {
Expand All @@ -280,8 +280,14 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
dev_config = dev->config;
dma_data = dev->data;
block_config = config->head_block;
width = MIN(config->source_data_size, config->dest_data_size);
max_xfer = NXP_LPC_DMA_MAX_XFER * width;
/* The DMA controller deals with just one transfer
* size, though the API provides separate sizes
* for source and dest. So assert that the source
* and dest sizes are the same.
*/
assert(config->dest_data_size == config->source_data_size);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This assert may cause failures in some of the other drivers that use this driver. I would suggest removing it and updating the comment.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the caller has the dest_data_size == source_data_size, then the assert won't trip, and it won't trip if asserts are not enabled.

If the caller has different values for src and dest sizes, then it's a problem they need to think about and fix.
The old driver (prior to the change to support chaining for long transfers) used MIN. The driver
that supports the long transfers used MAX. This changing of behavior already broke existing drivers silently (e.g. the I2S usage). I think it'd be better to detect such problems as soon as possible, instead of silently doing something they might not be expecting and possibly setting the size the opposite of what the caller intended.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know we will have to update the SPI driver to address this. Let me know if you would like me to send you a patch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can add that. I thought I had checked the SPI driver, since we use that too, but I think we didn't catch it because our usage of SPI is always using 1-byte dest_data_size (src_data_size is fixed at 1).

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think also the UART driver will be fine because destination and source data sizes will both be 1 always

width = config->dest_data_size;
max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width;

/*
* Check if circular mode is requested.
Expand Down Expand Up @@ -454,28 +460,28 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
k_spin_unlock(&configuring_otrigs, otrigs_key);

/* Check if we need to queue DMA descriptors */
if ((block_config->block_size / width > NXP_LPC_DMA_MAX_XFER) ||
if ((block_config->block_size > max_xfer_bytes) ||
(block_config->next_block != NULL)) {
/* Allocate a DMA descriptor */
data->curr_descriptor = data->dma_descriptor_table;

if (block_config->block_size / width > NXP_LPC_DMA_MAX_XFER) {
if (block_config->block_size > max_xfer_bytes) {
/* Disable interrupt as this is not the entire data.
* Reload for the descriptor
*/
xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, 0UL, 0UL,
width,
src_inc,
dst_inc,
MIN(block_config->block_size, max_xfer));
max_xfer_bytes);
} else {
/* Enable interrupt and reload for the descriptor
*/
xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, 1UL, 0UL,
width,
src_inc,
dst_inc,
MIN(block_config->block_size, max_xfer));
block_config->block_size);
}
} else {
/* Enable interrupt for the descriptor */
Expand All @@ -485,6 +491,9 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
dst_inc,
block_config->block_size);
}
/* DMA controller requires that the address be aligned to transfer size */
Copy link
Collaborator

@mmahadevan108 mmahadevan108 Jul 17, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not aware of such a alignment requirement. Were you seeing an issue?
There is a requirement for the DMA descriptors which this driver addresses.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the RT500 Reference Manual, section 16.4.2.4 says:

16.4.2.4 Address alignment for data transfers
Transfers of 16 bit width require an address alignment to a multiple of 2 bytes. Transfers of 32 bit width require an address alignment to a multiple of 4 bytes. Transfers of 8 bit width can be at any address

assert(block_config->source_address == ROUND_UP(block_config->source_address, width));
assert(block_config->dest_address == ROUND_UP(block_config->dest_address, width));

DMA_SubmitChannelTransferParameter(p_handle,
xfer_config,
Expand All @@ -494,24 +503,25 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,

/* Start queuing DMA descriptors */
if (data->curr_descriptor) {
if ((block_config->block_size / width > NXP_LPC_DMA_MAX_XFER)) {
if (block_config->block_size > max_xfer_bytes) {
/* Queue additional DMA descriptors because the amount of data to
* be transferred is greater that the DMA descriptors max XFERCOUNT.
*/
struct dma_block_config local_block = { 0 };

if (src_inc) {
local_block.source_address = block_config->source_address
+ max_xfer;
+ max_xfer_bytes;
} else {
local_block.source_address = block_config->source_address;
}
if (dst_inc) {
local_block.dest_address = block_config->dest_address + max_xfer;
local_block.dest_address = block_config->dest_address
+ max_xfer_bytes;
} else {
local_block.dest_address = block_config->dest_address;
}
local_block.block_size = block_config->block_size - max_xfer;
local_block.block_size = block_config->block_size - max_xfer_bytes;
local_block.next_block = block_config->next_block;
local_block.source_reload_en = reload;

Expand All @@ -525,6 +535,12 @@ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
while (block_config != NULL) {
block_config->source_reload_en = reload;

/* DMA controller requires that the address be aligned to transfer size */
assert(block_config->source_address ==
ROUND_UP(block_config->source_address, width));
assert(block_config->dest_address ==
ROUND_UP(block_config->dest_address, width));

if (dma_mcux_lpc_queue_descriptors(data, block_config, src_inc, dst_inc)) {
return -ENOMEM;
}
Expand Down
Loading
Loading