From bb8c5e721a9eecafaa6a65a753af928ed2a69334 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Thu, 4 Jul 2024 14:18:54 +0200 Subject: [PATCH] drivers: serial: nrfx_uarte: Rework driver to support new features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework driver to support new way of asynchronous RX handling. Previously RX was handled in two modes: using RXDRDY interrupt for byte counting or TIMER + PPI. Both modes had flaws. RXDRDY interrupt mode could miscalculated amount of received bytes when interrupt was not handled on time. Data was not lost but was not reported on time that could lead to issues. PPI+TIMER mode requires additional resources thus it was not the default mode. Often user was not aware of that option and was expiriencing driver RX faults. New RX mode is switching buffers when there is new data (RXDRDY event not set for given amount of time). It does not require additional resources to get precise byte counting. Additionally, this is in line with new UARTE feature (RX frame timeout) which is present in nRF54X devices. The behavior of the driver is the same for legacy devices and new one. For legacy devices k_timer periodic interrupts are used to check if there are any new bytes and it is not needed when RX frame timeout is present. Improved RX mode is enabled by default (CONFIG_UART_NRFX_UARTE_ENHANCED_RX=y) but legacy modes are still available though not recommended to be used. Note that new RX mode behaves a bit different because timeout always triggers switch of buffers which means that there will be no UART_RX_RDY events with non-zero offset. It also means that every UART_RX_RDY will be followed by UART_RX_BUF_RELEASED. Driver structures are reworked and asynchronous data is split into two structures: tx and rx data. Additionally, support for DMM is added to asynchronous RX. For TX a fixed size, statically allocated buffer is used. Same goes for single byte buffers used for polling and interrupt driven mode (and RX FIFO flush buffer). After rework, driver is recommended to be used for all platforms as it performs much better and takes much less code than the second UART shim available for Nordic devices. Signed-off-by: Krzysztof Chruściński --- drivers/serial/Kconfig.nrfx | 17 +- drivers/serial/Kconfig.nrfx_uart_instance | 1 + drivers/serial/uart_nrfx_uarte.c | 983 +++++++++++++--------- 3 files changed, 620 insertions(+), 381 deletions(-) diff --git a/drivers/serial/Kconfig.nrfx b/drivers/serial/Kconfig.nrfx index 61cf8a29c080cc2..650404fceecb381 100644 --- a/drivers/serial/Kconfig.nrfx +++ b/drivers/serial/Kconfig.nrfx @@ -31,10 +31,19 @@ config UART_NRFX_UARTE config UART_NRFX_UARTE_LEGACY_SHIM bool "Legacy UARTE shim" depends on UART_NRFX_UARTE - # New shim takes more ROM. Until it is fixed use legacy shim. - depends on RISCV || !SOC_SERIES_NRF54HX - default y if !SOC_SERIES_NRF54LX - default n + default y + +config UART_NRFX_UARTE_ENHANCED_RX + bool "Enhanced RX handling" + depends on UART_ASYNC_API + depends on UART_NRFX_UARTE_LEGACY_SHIM + default y if !(UART_0_NRF_HW_ASYNC || UART_1_NRF_HW_ASYNC || UART_2_NRF_HW_ASYNC) + help + Enable RX handling mode which is switching buffers on timeout. This is an + enhancement compared to other two modes (default and hardware assisted). + Default mode could miscount bytes when interrupt was not handled on time + and hardware assisted required TIMER peripheral instance and PPI channel + for accurate byte counting. config UART_ASYNC_TX_CACHE_SIZE int "TX cache buffer size" diff --git a/drivers/serial/Kconfig.nrfx_uart_instance b/drivers/serial/Kconfig.nrfx_uart_instance index aae78e9ca6b454f..8766db310ac8ccb 100644 --- a/drivers/serial/Kconfig.nrfx_uart_instance +++ b/drivers/serial/Kconfig.nrfx_uart_instance @@ -68,6 +68,7 @@ config UART_$(nrfx_uart_num)_NRF_ASYNC_LOW_POWER depends on HAS_HW_NRF_UARTE$(nrfx_uart_num) depends on UART_ASYNC_API depends on UART_NRFX_UARTE_LEGACY_SHIM + default y help When enabled, UARTE is enabled before each TX or RX usage and disabled when not used. Disabling UARTE while in idle allows to achieve lowest diff --git a/drivers/serial/uart_nrfx_uarte.c b/drivers/serial/uart_nrfx_uarte.c index ed73f9aee28c2b8..6a73d1fc9dfaece 100644 --- a/drivers/serial/uart_nrfx_uarte.c +++ b/drivers/serial/uart_nrfx_uarte.c @@ -8,37 +8,29 @@ * @brief Driver for Nordic Semiconductor nRF UARTE */ +#include #include +#include #include -#include -#include #include -#include -#include -#include #include #include - +#include +#include +#include +#include +#include #include -LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); - -#include - -/* Generalize PPI or DPPI channel management */ -#if defined(PPI_PRESENT) -#include -#define gppi_channel_t nrf_ppi_channel_t -#define gppi_channel_alloc nrfx_ppi_channel_alloc -#define gppi_channel_enable nrfx_ppi_channel_enable -#elif defined(DPPI_PRESENT) -#include -#define gppi_channel_t uint8_t -#define gppi_channel_alloc nrfx_dppi_channel_alloc -#define gppi_channel_enable nrfx_dppi_channel_enable +LOG_MODULE_REGISTER(uart_nrfx_uarte, 0); +#ifdef CONFIG_HAS_NORDIC_DMM +#include #else -#error "No PPI or DPPI" +#define DMM_MEMORY_SECTION(...) +#define DMM_IS_REG_CACHEABLE(...) false #endif +#define RX_FLUSH_WORKAROUND 1 + #define UARTE(idx) DT_NODELABEL(uart##idx) #define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop) #define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop) @@ -78,7 +70,7 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #define IS_HW_ASYNC(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_HW_ASYNC) #if UARTE_FOR_EACH_INSTANCE(IS_HW_ASYNC, (||), (0)) -#define UARTE_HW_ASYNC 1 +#define UARTE_ANY_HW_ASYNC 1 #endif /* Determine if any instance is using enhanced poll_out feature. */ @@ -89,12 +81,16 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #define UARTE_ENHANCED_POLL_OUT 1 #endif -#define INSTANCE_HAS_PROP(unused, prefix, i, prop) UARTE_HAS_PROP(prefix##i, prop) +#define INSTANCE_HAS_PROP(unused, prefix, i, prop) UARTE_PROP(prefix##i, prop) #if UARTE_FOR_EACH_INSTANCE(INSTANCE_HAS_PROP, (+), (0), short_endtx_stoptx) #define UARTE_HAS_ENDTX_STOPTX_SHORT 1 #endif +#if UARTE_FOR_EACH_INSTANCE(INSTANCE_HAS_PROP, (+), (0), frame_timeout) +#define UARTE_HAS_FRAME_TIMEOUT 1 +#endif + /* * RX timeout is divided into time slabs, this define tells how many divisions * should be made. More divisions - higher timeout accuracy and processor usage. @@ -105,44 +101,56 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #define UARTE_HW_RX_FIFO_SIZE 5 #ifdef UARTE_ANY_ASYNC -struct uarte_async_cb { - uart_callback_t user_callback; - void *user_data; - const uint8_t *tx_buf; - volatile size_t tx_size; +struct uarte_async_tx { + struct k_timer timer; + const uint8_t *buf; + volatile size_t len; const uint8_t *xfer_buf; size_t xfer_len; + size_t cache_offset; + volatile int amount; + bool pending; +}; - size_t tx_cache_offset; - - struct k_timer tx_timeout_timer; - - uint8_t *rx_buf; - size_t rx_buf_len; - size_t rx_offset; - uint8_t *rx_next_buf; - size_t rx_next_buf_len; - uint32_t rx_total_byte_cnt; /* Total number of bytes received */ - uint32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */ - int32_t rx_timeout; /* Timeout set by user */ - int32_t rx_timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */ - int32_t rx_timeout_left; /* Current time left until user callback */ - struct k_timer rx_timeout_timer; +struct uarte_async_rx { + struct k_timer timer; +#ifdef CONFIG_HAS_NORDIC_DMM + uint8_t *usr_buf; + uint8_t *next_usr_buf; +#endif + uint8_t *buf; + size_t buf_len; + size_t offset; + uint8_t *next_buf; + size_t next_buf_len; +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + uint32_t idle_cnt; + k_timeout_t timeout; +#else + uint32_t total_byte_cnt; /* Total number of bytes received */ + uint32_t total_user_byte_cnt; /* Total number of bytes passed to user */ + int32_t timeout; /* Timeout set by user */ + int32_t timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */ + int32_t timeout_left; /* Current time left until user callback */ union { - gppi_channel_t ppi; + uint8_t ppi; uint32_t cnt; - } rx_cnt; - volatile int tx_amount; - - atomic_t low_power_mask; - uint8_t rx_flush_buffer[UARTE_HW_RX_FIFO_SIZE]; - uint8_t rx_flush_cnt; - volatile bool rx_enabled; - volatile bool discard_rx_fifo; - bool pending_tx; + } cnt; /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */ volatile bool is_in_irq; +#endif + uint8_t flush_cnt; + volatile bool enabled; + volatile bool discard_fifo; + volatile bool aborted; +}; + +struct uarte_async_cb { + uart_callback_t user_callback; + void *user_data; + struct uarte_async_tx tx; + struct uarte_async_rx rx; }; #endif /* UARTE_ANY_ASYNC */ @@ -163,7 +171,6 @@ struct uarte_nrfx_int_driven { /* Device data structure */ struct uarte_nrfx_data { - const struct device *dev; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE struct uart_config uart_config; #endif @@ -174,13 +181,15 @@ struct uarte_nrfx_data { struct uarte_async_cb *async; #endif atomic_val_t poll_out_lock; - uint8_t *char_out; - uint8_t *rx_data; - gppi_channel_t ppi_ch_endtx; +#ifdef UARTE_ENHANCED_POLL_OUT + uint8_t ppi_ch_endtx; +#endif + atomic_t flags; }; -#define UARTE_LOW_POWER_TX BIT(0) -#define UARTE_LOW_POWER_RX BIT(1) +#define UARTE_FLAG_LOW_POWER_TX BIT(0) +#define UARTE_FLAG_LOW_POWER_RX BIT(1) +#define UARTE_FLAG_TRIG_RXTO BIT(2) /* If enabled, pins are managed when going to low power mode. */ #define UARTE_CFG_FLAG_GPIO_MGMT BIT(0) @@ -191,6 +200,9 @@ struct uarte_nrfx_data { /* If enabled then TIMER and PPI is used for byte counting. */ #define UARTE_CFG_FLAG_HW_BYTE_COUNTING BIT(2) +/* If enabled then TIMER and PPI is used for byte counting. */ +#define UARTE_CFG_FLAG_CACHEABLE BIT(3) + /* If enabled then UARTE peripheral is disabled when not used. This allows * to achieve lowest power consumption in idle. */ @@ -229,14 +241,27 @@ struct uarte_nrfx_config { uint32_t flags; bool disable_rx; const struct pinctrl_dev_config *pcfg; -#ifndef CONFIG_UART_USE_RUNTIME_CONFIGURE - nrf_uarte_baudrate_t baudrate; - nrf_uarte_config_t hw_config; +#ifdef CONFIG_HAS_NORDIC_DMM + void *mem_reg; #endif +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + /* Bigger than 1 in case of high speed instances. Baudrate is adjusted by that ratio. */ + uint32_t baudrate_div; +#else +#ifdef UARTE_HAS_FRAME_TIMEOUT + uint32_t baudrate; +#endif + nrf_uarte_baudrate_t nrf_baudrate; + nrf_uarte_config_t hw_config; +#endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ + #ifdef UARTE_ANY_ASYNC nrfx_timer_t timer; uint8_t *tx_cache; + uint8_t *rx_flush_buf; #endif + uint8_t *poll_out_byte; + uint8_t *poll_in_byte; }; static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev) @@ -349,6 +374,7 @@ static int baudrate_set(const struct device *dev, uint32_t baudrate) { nrf_uarte_baudrate_t nrf_baudrate; /* calculated baudrate divisor */ NRF_UARTE_Type *uarte = get_uarte_instance(dev); + const struct uarte_nrfx_config *config = dev->config; switch (baudrate) { case 300: @@ -421,6 +447,8 @@ static int baudrate_set(const struct device *dev, uint32_t baudrate) return -EINVAL; } + /* Baudrate needs to be adjusted for instances that are clocked by the higher clock. */ + nrf_baudrate = nrf_baudrate / config->baudrate_div; nrf_uarte_baudrate_set(uarte, nrf_baudrate); return 0; @@ -488,6 +516,9 @@ static int uarte_nrfx_configure(const struct device *dev, return -ENOTSUP; } +#ifdef UARTE_HAS_FRAME_TIMEOUT + uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN; +#endif nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg); data->uart_config = *cfg; @@ -562,7 +593,6 @@ static int wait_tx_ready(const struct device *dev) return key; } -#if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE) static int pins_state_change(const struct device *dev, bool on) { const struct uarte_nrfx_config *config = dev->config; @@ -574,40 +604,42 @@ static int pins_state_change(const struct device *dev, bool on) return 0; } -#endif #ifdef UARTE_ANY_ASYNC /* Using Macro instead of static inline function to handle NO_OPTIMIZATIONS case * where static inline fails on linking. */ -#define HW_RX_COUNTING_ENABLED(config) \ - (IS_ENABLED(UARTE_HW_ASYNC) ? (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) +#define HW_RX_COUNTING_ENABLED(config) \ + (IS_ENABLED(UARTE_ANY_HW_ASYNC) ? \ + (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) #endif /* UARTE_ANY_ASYNC */ -static int uarte_enable(const struct device *dev, uint32_t mask) +static int uarte_enable(const struct device *dev, uint32_t act_mask, uint32_t sec_mask) { -#ifdef UARTE_ANY_ASYNC - const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; + int err; - if (data->async) { - bool disabled = data->async->low_power_mask == 0; - int ret; + if (atomic_or(&data->flags, act_mask) & sec_mask) { + /* Second direction already enabled so UARTE is enabled. */ + return 0; + } - data->async->low_power_mask |= mask; - ret = pins_state_change(dev, true); - if (ret < 0) { - return ret; - } + err = pins_state_change(dev, true); + if (err < 0) { + return err; + } +#ifdef UARTE_ANY_ASYNC + if (data->async) { + const struct uarte_nrfx_config *config = dev->config; - if (HW_RX_COUNTING_ENABLED(config) && disabled) { + if (HW_RX_COUNTING_ENABLED(config)) { const nrfx_timer_t *timer = &config->timer; nrfx_timer_enable(timer); - for (int i = 0; i < data->async->rx_flush_cnt; i++) { + for (int i = 0; i < data->async->rx.flush_cnt; i++) { nrfx_timer_increment(timer); } } @@ -639,7 +671,7 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED); if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { - (void)uarte_enable(dev, UARTE_LOW_POWER_TX); + (void)uarte_enable(dev, UARTE_FLAG_LOW_POWER_TX, UARTE_FLAG_LOW_POWER_RX); nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); } @@ -647,30 +679,39 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) } #if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE) -static void uart_disable(const struct device *dev) +static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask, uint32_t sec_mask) { -#ifdef UARTE_ANY_ASYNC - const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; + data->flags &= ~dis_mask; + if (data->flags & sec_mask) { + return; + } + +#if defined(UARTE_ANY_ASYNC) && !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + const struct uarte_nrfx_config *config = dev->config; + if (data->async && HW_RX_COUNTING_ENABLED(config)) { nrfx_timer_disable(&config->timer); /* Timer/counter value is reset when disabled. */ - data->async->rx_total_byte_cnt = 0; - data->async->rx_total_user_byte_cnt = 0; + data->async->rx.total_byte_cnt = 0; + data->async->rx.total_user_byte_cnt = 0; } #endif + (void)pins_state_change(dev, false); nrf_uarte_disable(get_uarte_instance(dev)); } #endif #ifdef UARTE_ANY_ASYNC -static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } static void rx_timeout(struct k_timer *timer); static void tx_timeout(struct k_timer *timer); +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) +static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } + static int uarte_nrfx_rx_counting_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; @@ -681,6 +722,8 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) if (HW_RX_COUNTING_ENABLED(cfg)) { nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG( NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); + uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY); + uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT); tmr_config.mode = NRF_TIMER_MODE_COUNTER; tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32; @@ -692,11 +735,10 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) "switching to software byte counting."); return -EINVAL; } else { - nrfx_timer_enable(&cfg->timer); nrfx_timer_clear(&cfg->timer); } - ret = gppi_channel_alloc(&data->async->rx_cnt.ppi); + ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel, " "switching to software byte counting."); @@ -704,27 +746,8 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) return -EINVAL; } -#if CONFIG_HAS_HW_NRF_PPI - ret = nrfx_ppi_channel_assign( - data->async->rx_cnt.ppi, - nrf_uarte_event_address_get(uarte, - NRF_UARTE_EVENT_RXDRDY), - nrfx_timer_task_address_get(&cfg->timer, - NRF_TIMER_TASK_COUNT)); - - if (ret != NRFX_SUCCESS) { - return -EIO; - } -#else - nrf_uarte_publish_set(uarte, - NRF_UARTE_EVENT_RXDRDY, - data->async->rx_cnt.ppi); - nrf_timer_subscribe_set(cfg->timer.p_reg, - NRF_TIMER_TASK_COUNT, - data->async->rx_cnt.ppi); - -#endif - ret = gppi_channel_enable(data->async->rx_cnt.ppi); + nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr); + nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi)); if (ret != NRFX_SUCCESS) { return -EIO; } @@ -734,24 +757,30 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) return 0; } +#endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */ static int uarte_nrfx_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - + static const uint32_t rx_int_mask = + NRF_UARTE_INT_ENDRX_MASK | + NRF_UARTE_INT_RXSTARTED_MASK | + NRF_UARTE_INT_ERROR_MASK | + NRF_UARTE_INT_RXTO_MASK | + (IS_ENABLED(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) ? NRF_UARTE_INT_RXDRDY_MASK : 0); + +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); +#else int ret = uarte_nrfx_rx_counting_init(dev); if (ret != 0) { return ret; } +#endif - data->async->low_power_mask = UARTE_LOW_POWER_TX; - nrf_uarte_int_enable(uarte, - NRF_UARTE_INT_ENDRX_MASK | - NRF_UARTE_INT_RXSTARTED_MASK | - NRF_UARTE_INT_ERROR_MASK | - NRF_UARTE_INT_RXTO_MASK); + nrf_uarte_int_enable(uarte, rx_int_mask); nrf_uarte_enable(uarte); /** @@ -770,10 +799,10 @@ static int uarte_nrfx_init(const struct device *dev) nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO); } - k_timer_init(&data->async->rx_timeout_timer, rx_timeout, NULL); - k_timer_user_data_set(&data->async->rx_timeout_timer, data); - k_timer_init(&data->async->tx_timeout_timer, tx_timeout, NULL); - k_timer_user_data_set(&data->async->tx_timeout_timer, data); + k_timer_init(&data->async->rx.timer, rx_timeout, NULL); + k_timer_user_data_set(&data->async->rx.timer, (void *)dev); + k_timer_init(&data->async->tx.timer, tx_timeout, NULL); + k_timer_user_data_set(&data->async->tx.timer, (void *)dev); return 0; } @@ -786,11 +815,11 @@ static void start_tx_locked(const struct device *dev, struct uarte_nrfx_data *da { if (!is_tx_ready(dev)) { /* Active poll out, postpone until it is completed. */ - data->async->pending_tx = true; + data->async->tx.pending = true; } else { - data->async->pending_tx = false; - data->async->tx_amount = -1; - tx_start(dev, data->async->xfer_buf, data->async->xfer_len); + data->async->tx.pending = false; + data->async->tx.amount = -1; + tx_start(dev, data->async->tx.xfer_buf, data->async->tx.xfer_len); } } @@ -803,7 +832,7 @@ static bool setup_tx_cache(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *config = dev->config; - size_t remaining = data->async->tx_size - data->async->tx_cache_offset; + size_t remaining = data->async->tx.len - data->async->tx.cache_offset; if (!remaining) { return false; @@ -811,9 +840,9 @@ static bool setup_tx_cache(const struct device *dev) size_t len = MIN(remaining, CONFIG_UART_ASYNC_TX_CACHE_SIZE); - data->async->xfer_len = len; - data->async->xfer_buf = config->tx_cache; - memcpy(config->tx_cache, &data->async->tx_buf[data->async->tx_cache_offset], len); + data->async->tx.xfer_len = len; + data->async->tx.xfer_buf = config->tx_cache; + memcpy(config->tx_cache, &data->async->tx.buf[data->async->tx.cache_offset], len); return true; } @@ -839,20 +868,20 @@ static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf, unsigned int key = irq_lock(); - if (data->async->tx_size) { + if (data->async->tx.len) { irq_unlock(key); return -EBUSY; } - data->async->tx_size = len; - data->async->tx_buf = buf; + data->async->tx.len = len; + data->async->tx.buf = buf; nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - if (nrfx_is_in_ram(buf)) { - data->async->xfer_buf = buf; - data->async->xfer_len = len; + if (nrf_dma_accessible_check(uarte, buf)) { + data->async->tx.xfer_buf = buf; + data->async->tx.xfer_len = len; } else { - data->async->tx_cache_offset = 0; + data->async->tx.cache_offset = 0; (void)setup_tx_cache(dev); } @@ -861,7 +890,7 @@ static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf, irq_unlock(key); if (has_hwfc(dev) && timeout != SYS_FOREVER_US) { - k_timer_start(&data->async->tx_timeout_timer, K_USEC(timeout), K_NO_WAIT); + k_timer_start(&data->async->tx.timer, K_USEC(timeout), K_NO_WAIT); } return 0; } @@ -871,12 +900,12 @@ static int uarte_nrfx_tx_abort(const struct device *dev) struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - if (data->async->tx_buf == NULL) { + if (data->async->tx.buf == NULL) { return -EFAULT; } - data->async->pending_tx = false; - k_timer_stop(&data->async->tx_timeout_timer); + data->async->tx.pending = false; + k_timer_stop(&data->async->tx.timer); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); return 0; @@ -896,25 +925,22 @@ static void notify_uart_rx_rdy(const struct device *dev, size_t len) struct uarte_nrfx_data *data = dev->data; struct uart_event evt = { .type = UART_RX_RDY, - .data.rx.buf = data->async->rx_buf, + .data.rx.buf = data->async->rx.buf, .data.rx.len = len, - .data.rx.offset = data->async->rx_offset + .data.rx.offset = data->async->rx.offset }; user_callback(dev, &evt); } -static void rx_buf_release(const struct device *dev, uint8_t **buf) +static void rx_buf_release(const struct device *dev, uint8_t *buf) { - if (*buf) { - struct uart_event evt = { - .type = UART_RX_BUF_RELEASED, - .data.rx_buf.buf = *buf, - }; + struct uart_event evt = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = buf, + }; - user_callback(dev, &evt); - *buf = NULL; - } + user_callback(dev, &evt); } static void notify_rx_disable(const struct device *dev) @@ -926,11 +952,22 @@ static void notify_rx_disable(const struct device *dev) user_callback(dev, (struct uart_event *)&evt); } +#ifdef UARTE_HAS_FRAME_TIMEOUT +static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout) +{ + uint32_t divisor = 1000000 / timeout; + uint32_t bauds = baudrate / divisor; + + return MIN(bauds, UARTE_FRAMETIMEOUT_COUNTERTOP_Msk); +} +#endif + static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; const struct uarte_nrfx_config *cfg = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); int ret = 0; @@ -944,36 +981,81 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, * for the RXTO event after a call to uart_rx_disable() to discard * data from the UARTE internal RX FIFO. */ - if (data->async->rx_enabled || data->async->discard_rx_fifo) { + if (rdata->enabled || rdata->discard_fifo) { return -EBUSY; } - data->async->rx_timeout = timeout; - data->async->rx_timeout_slab = timeout / RX_TIMEOUT_DIV; +#ifdef CONFIG_HAS_NORDIC_DMM + uint8_t *dma_buf; + + ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf); + if (ret < 0) { + return ret; + } + + rdata->usr_buf = buf; + buf = dma_buf; +#endif - data->async->rx_buf = buf; - data->async->rx_buf_len = len; - data->async->rx_offset = 0; - data->async->rx_next_buf = NULL; - data->async->rx_next_buf_len = 0; +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX +#ifdef UARTE_HAS_FRAME_TIMEOUT + if (timeout && (timeout != SYS_FOREVER_US)) { + uint32_t baudrate = COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, + (data->uart_config.baudrate), (cfg->baudrate)); + + rdata->timeout = K_USEC(timeout); + nrf_uarte_frame_timeout_set(uarte, us_to_bauds(baudrate, timeout)); + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX); + rdata->idle_cnt = 1; + } else { + rdata->timeout = K_NO_WAIT; + } +#else + rdata->timeout = (timeout && timeout == SYS_FOREVER_US) ? + K_NO_WAIT : K_USEC(timeout / RX_TIMEOUT_DIV); + rdata->idle_cnt = RX_TIMEOUT_DIV - 1; +#endif +#else + rdata->timeout = timeout; + rdata->timeout_slab = timeout / RX_TIMEOUT_DIV; +#endif + + rdata->buf = buf; + rdata->buf_len = len; + rdata->offset = 0; + rdata->next_buf = NULL; + rdata->next_buf_len = 0; if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { - if (data->async->rx_flush_cnt) { - int cpy_len = MIN(len, data->async->rx_flush_cnt); + if (rdata->flush_cnt) { + int cpy_len = MIN(len, rdata->flush_cnt); - memcpy(buf, data->async->rx_flush_buffer, cpy_len); + memcpy(buf, cfg->rx_flush_buf, cpy_len); buf += cpy_len; len -= cpy_len; +#ifdef CONFIG_HAS_NORDIC_DMM + if (cfg->flags & UARTE_CFG_FLAG_CACHEABLE) { + sys_cache_data_flush_range(buf, cpy_len); + } +#endif /* If flush content filled whole new buffer complete the * request and indicate rx being disabled. */ if (!len) { - data->async->rx_flush_cnt -= cpy_len; - notify_uart_rx_rdy(dev, cpy_len); - rx_buf_release(dev, &data->async->rx_buf); - notify_rx_disable(dev); + rdata->flush_cnt -= cpy_len; + memmove(cfg->rx_flush_buf, &cfg->rx_flush_buf[cpy_len], + rdata->flush_cnt); + atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO); + NRFX_IRQ_PENDING_SET(nrfx_get_irq_number(uarte)); return 0; + } else { +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + if (!K_TIMEOUT_EQ(rdata->timeout, K_NO_WAIT)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); + k_timer_start(&rdata->timer, rdata->timeout, K_NO_WAIT); + } +#endif } } } @@ -983,11 +1065,11 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); - data->async->rx_enabled = true; + rdata->enabled = true; if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { unsigned int key = irq_lock(); - ret = uarte_enable(dev, UARTE_LOW_POWER_RX); + ret = uarte_enable(dev, UARTE_FLAG_LOW_POWER_RX, UARTE_FLAG_LOW_POWER_TX); irq_unlock(key); } @@ -1000,17 +1082,39 @@ static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; int err; NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key = irq_lock(); - if (data->async->rx_buf == NULL) { + if (rdata->buf == NULL) { err = -EACCES; - } else if (data->async->rx_next_buf == NULL) { - data->async->rx_next_buf = buf; - data->async->rx_next_buf_len = len; + } else if (rdata->next_buf == NULL) { +#ifdef CONFIG_HAS_NORDIC_DMM + uint8_t *dma_buf; + const struct uarte_nrfx_config *config = dev->config; + + err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); + if (err < 0) { + return err; + } + rdata->next_usr_buf = buf; + buf = dma_buf; +#endif + rdata->next_buf = buf; + rdata->next_buf_len = len; nrf_uarte_rx_buffer_set(uarte, buf, len); - nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + /* If buffer is shorter than RX FIFO then there is a risk that due + * to interrupt handling latency ENDRX event is not handled on time + * and due to ENDRX_STARTRX short data will start to be overwritten. + * In that case short is not enabled and ENDRX event handler will + * manually start RX for that buffer. Thanks to RX FIFO there is + * 5 byte time for doing that. If interrupt latency is higher and + * there is no HWFC in both cases data will be lost or corrupted. + */ + if (len >= UARTE_HW_RX_FIFO_SIZE) { + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + } err = 0; } else { err = -EBUSY; @@ -1040,19 +1144,20 @@ static int uarte_nrfx_callback_set(const struct device *dev, static int uarte_nrfx_rx_disable(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - if (data->async->rx_buf == NULL) { + if (rdata->buf == NULL) { return -EFAULT; } - if (data->async->rx_next_buf != NULL) { + if (rdata->next_buf != NULL) { nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); } - k_timer_stop(&data->async->rx_timeout_timer); - data->async->rx_enabled = false; - data->async->discard_rx_fifo = true; + k_timer_stop(&rdata->timer); + rdata->enabled = false; + rdata->discard_fifo = true; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); @@ -1061,8 +1166,8 @@ static int uarte_nrfx_rx_disable(const struct device *dev) static void tx_timeout(struct k_timer *timer) { - struct uarte_nrfx_data *data = k_timer_user_data_get(timer); - (void) uarte_nrfx_tx_abort(data->dev); + const struct device *dev = k_timer_user_data_get(timer); + (void) uarte_nrfx_tx_abort(dev); } /** @@ -1075,12 +1180,30 @@ static void tx_timeout(struct k_timer *timer) */ static void rx_timeout(struct k_timer *timer) { - struct uarte_nrfx_data *data = k_timer_user_data_get(timer); - const struct device *dev = data->dev; + const struct device *dev = k_timer_user_data_get(timer); + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; + +#if CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + + if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); + rdata->idle_cnt = RX_TIMEOUT_DIV - 1; + } else { + rdata->idle_cnt--; + if (rdata->idle_cnt == 0) { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); + return; + } + } + + k_timer_start(&rdata->timer, rdata->timeout, K_NO_WAIT); +#else /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */ const struct uarte_nrfx_config *cfg = dev->config; uint32_t read; - if (data->async->is_in_irq) { + if (rdata->is_in_irq) { return; } @@ -1093,21 +1216,20 @@ static void rx_timeout(struct k_timer *timer) if (HW_RX_COUNTING_ENABLED(cfg)) { read = nrfx_timer_capture(&cfg->timer, 0); } else { - read = data->async->rx_cnt.cnt; + read = rdata->cnt.cnt; } /* Check if data was received since last function call */ - if (read != data->async->rx_total_byte_cnt) { - data->async->rx_total_byte_cnt = read; - data->async->rx_timeout_left = data->async->rx_timeout; + if (read != rdata->total_byte_cnt) { + rdata->total_byte_cnt = read; + rdata->timeout_left = rdata->timeout; } /* Check if there is data that was not sent to user yet * Note though that 'len' is a count of data bytes received, but not * necessarily the amount available in the current buffer */ - int32_t len = data->async->rx_total_byte_cnt - - data->async->rx_total_user_byte_cnt; + int32_t len = rdata->total_byte_cnt - rdata->total_user_byte_cnt; if (!HW_RX_COUNTING_ENABLED(cfg) && (len < 0)) { @@ -1116,7 +1238,7 @@ static void rx_timeout(struct k_timer *timer) * At this point, the number of received bytes is at least * equal to what was reported to the user. */ - data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt; + rdata->cnt.cnt = rdata->total_user_byte_cnt; len = 0; } @@ -1128,37 +1250,34 @@ static void rx_timeout(struct k_timer *timer) */ bool clipped = false; - if (len + data->async->rx_offset > data->async->rx_buf_len) { - len = data->async->rx_buf_len - data->async->rx_offset; + if (len + rdata->offset > rdata->buf_len) { + len = rdata->buf_len - rdata->offset; clipped = true; } if (len > 0) { - if (clipped || - (data->async->rx_timeout_left - < data->async->rx_timeout_slab)) { + if (clipped || (rdata->timeout_left < rdata->timeout_slab)) { /* rx_timeout us elapsed since last receiving */ - if (data->async->rx_buf != NULL) { + if (rdata->buf != NULL) { notify_uart_rx_rdy(dev, len); - data->async->rx_offset += len; - data->async->rx_total_user_byte_cnt += len; + rdata->offset += len; + rdata->total_user_byte_cnt += len; } } else { - data->async->rx_timeout_left -= - data->async->rx_timeout_slab; + rdata->timeout_left -= rdata->timeout_slab; } /* If there's nothing left to report until the buffers are * switched then the timer can be stopped */ if (clipped) { - k_timer_stop(&data->async->rx_timeout_timer); + k_timer_stop(&rdata->timer); } } nrf_uarte_int_enable(get_uarte_instance(dev), NRF_UARTE_INT_ENDRX_MASK); - +#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */ } #define UARTE_ERROR_FROM_MASK(mask) \ @@ -1176,6 +1295,7 @@ static void error_isr(const struct device *dev) .type = UART_RX_STOPPED, .data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err), }; + user_callback(dev, &evt); (void) uarte_nrfx_rx_disable(dev); } @@ -1183,37 +1303,58 @@ static void error_isr(const struct device *dev) static void rxstarted_isr(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; struct uart_event evt = { .type = UART_RX_BUF_REQUEST, }; - user_callback(dev, &evt); - if (data->async->rx_timeout != SYS_FOREVER_US) { - data->async->rx_timeout_left = data->async->rx_timeout; - k_timer_start(&data->async->rx_timeout_timer, - K_USEC(data->async->rx_timeout_slab), - K_USEC(data->async->rx_timeout_slab)); + +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + + if (!K_TIMEOUT_EQ(rdata->timeout, K_NO_WAIT)) { + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); } +#else + if (rdata->timeout != SYS_FOREVER_US) { + k_timeout_t timeout = K_USEC(rdata->timeout_slab); + + rdata->timeout_left = rdata->timeout; + k_timer_start(&rdata->timer, timeout, timeout); + } +#endif + + user_callback(dev, &evt); } static void endrx_isr(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - data->async->is_in_irq = true; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + rdata->is_in_irq = true; +#endif /* ensure rx timer is stopped - it will be restarted in RXSTARTED * handler if needed */ - k_timer_stop(&data->async->rx_timeout_timer); + k_timer_stop(&rdata->timer); /* this is the amount that the EasyDMA controller has copied into the * buffer */ - const int rx_amount = nrf_uarte_rx_amount_get(uarte) + - data->async->rx_flush_cnt; + const int rx_amount = nrf_uarte_rx_amount_get(uarte) + rdata->flush_cnt; - data->async->rx_flush_cnt = 0; +#ifdef CONFIG_HAS_NORDIC_DMM + const struct uarte_nrfx_config *config = dev->config; + int err = dmm_buffer_in_release(config->mem_reg, rdata->usr_buf, rx_amount, rdata->buf); + + (void)err; + __ASSERT_NO_MSG(err == 0); + rdata->buf = rdata->usr_buf; +#endif + rdata->flush_cnt = 0; /* The 'rx_offset' can be bigger than 'rx_amount', so it the length * of data we report back the user may need to be clipped. @@ -1222,56 +1363,59 @@ static void endrx_isr(const struct device *dev) * here to handle this buffer. (The next buffer is now already active * because of the ENDRX_STARTRX shortcut) */ - int rx_len = rx_amount - data->async->rx_offset; + int rx_len = rx_amount - rdata->offset; if (rx_len < 0) { rx_len = 0; } - data->async->rx_total_user_byte_cnt += rx_len; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + rdata->total_user_byte_cnt += rx_len; +#endif /* Only send the RX_RDY event if there is something to send */ if (rx_len > 0) { notify_uart_rx_rdy(dev, rx_len); } - if (!data->async->rx_enabled) { - data->async->is_in_irq = false; - return; - } - - rx_buf_release(dev, &data->async->rx_buf); - - /* If there is a next buffer, then STARTRX will have already been - * invoked by the short (the next buffer will be filling up already) - * and here we just do the swap of which buffer the driver is following, - * the next rx_timeout() will update the rx_offset. - */ - unsigned int key = irq_lock(); - - if (data->async->rx_next_buf) { - data->async->rx_buf = data->async->rx_next_buf; - data->async->rx_buf_len = data->async->rx_next_buf_len; - data->async->rx_next_buf = NULL; - data->async->rx_next_buf_len = 0; - - data->async->rx_offset = 0; - /* Check is based on assumption that ISR handler handles - * ENDRX before RXSTARTED so if short was set on time, RXSTARTED - * event will be set. + rx_buf_release(dev, rdata->buf); + rdata->buf = rdata->next_buf; + rdata->buf_len = rdata->next_buf_len; +#ifdef CONFIG_HAS_NORDIC_DMM + rdata->usr_buf = rdata->next_usr_buf; +#endif + rdata->next_buf = NULL; + rdata->next_buf_len = 0; + rdata->offset = 0; + + if (rdata->enabled) { + /* If there is a next buffer, then STARTRX will have already been + * invoked by the short (the next buffer will be filling up already) + * and here we just do the swap of which buffer the driver is following, + * the next rx_timeout() will update the rx_offset. */ - if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); + unsigned int key = irq_lock(); + + if (rdata->buf) { + /* Check is based on assumption that ISR handler handles + * ENDRX before RXSTARTED so if short was set on time, RXSTARTED + * event will be set. + */ + if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); + } + /* Remove the short until the subsequent next buffer is setup */ + nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + } else { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); } - /* Remove the short until the subsequent next buffer is setup */ - nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); - } else { - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); - } - irq_unlock(key); + irq_unlock(key); + } - data->async->is_in_irq = false; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + rdata->is_in_irq = false; +#endif } /* Function for flushing internal RX fifo. Function can be called in case @@ -1297,26 +1441,23 @@ static void endrx_isr(const struct device *dev) * * @return number of bytes flushed from the fifo. */ -static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len) + +static uint8_t rx_flush(const struct device *dev, uint8_t *buf) { /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/ - static const uint8_t dirty; + static const uint8_t dirty = 0xAA; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - uint32_t prev_rx_amount = nrf_uarte_rx_amount_get(uarte); - uint8_t tmp_buf[UARTE_HW_RX_FIFO_SIZE]; - uint8_t *flush_buf = buf ? buf : tmp_buf; - size_t flush_len = buf ? len : sizeof(tmp_buf); - - if (buf) { - memset(buf, dirty, len); - flush_buf = buf; - flush_len = len; + uint32_t prev_rx_amount; + uint32_t rx_amount; + + if (IS_ENABLED(RX_FLUSH_WORKAROUND)) { + memset(buf, dirty, UARTE_HW_RX_FIFO_SIZE); + prev_rx_amount = nrf_uarte_rx_amount_get(uarte); } else { - flush_buf = tmp_buf; - flush_len = sizeof(tmp_buf); + prev_rx_amount = 0; } - nrf_uarte_rx_buffer_set(uarte, flush_buf, flush_len); + nrf_uarte_rx_buffer_set(uarte, buf, UARTE_HW_RX_FIFO_SIZE); /* Final part of handling RXTO event is in ENDRX interrupt * handler. ENDRX is generated as a result of FLUSHRX task. */ @@ -1326,18 +1467,22 @@ static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len) /* empty */ } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); - if (!buf) { - return nrf_uarte_rx_amount_get(uarte); + rx_amount = nrf_uarte_rx_amount_get(uarte); + if (!buf || !IS_ENABLED(RX_FLUSH_WORKAROUND)) { + return rx_amount; } - uint32_t rx_amount = nrf_uarte_rx_amount_get(uarte); - if (rx_amount != prev_rx_amount) { return rx_amount; } - for (int i = 0; i < flush_len; i++) { + if (rx_amount > UARTE_HW_RX_FIFO_SIZE) { + return 0; + } + + for (int i = 0; i < rx_amount; i++) { if (buf[i] != dirty) { return rx_amount; } @@ -1346,29 +1491,6 @@ static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len) return 0; } -static void async_uart_release(const struct device *dev, uint32_t dir_mask) -{ - struct uarte_nrfx_data *data = dev->data; - unsigned int key = irq_lock(); - - data->async->low_power_mask &= ~dir_mask; - if (!data->async->low_power_mask) { - if (dir_mask == UARTE_LOW_POWER_RX) { - data->async->rx_flush_cnt = - rx_flush(dev, data->async->rx_flush_buffer, - sizeof(data->async->rx_flush_buffer)); - } - - uart_disable(dev); - int err = pins_state_change(dev, false); - - (void)err; - __ASSERT_NO_MSG(err == 0); - } - - irq_unlock(key); -} - /* This handler is called when the receiver is stopped. If rx was aborted * data from fifo is flushed. */ @@ -1376,9 +1498,16 @@ static void rxto_isr(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; - rx_buf_release(dev, &data->async->rx_buf); - rx_buf_release(dev, &data->async->rx_next_buf); + if (rdata->buf) { +#ifdef CONFIG_HAS_NORDIC_DMM + (void)dmm_buffer_in_release(config->mem_reg, rdata->usr_buf, 0, rdata->buf); + rdata->buf = rdata->usr_buf; +#endif + rx_buf_release(dev, rdata->buf); + rdata->buf = NULL; + } /* This point can be reached in two cases: * 1. RX is disabled because all provided RX buffers have been filled. @@ -1388,14 +1517,39 @@ static void rxto_isr(const struct device *dev) * In the second case, additionally, data from the UARTE internal RX * FIFO need to be discarded. */ - data->async->rx_enabled = false; - if (data->async->discard_rx_fifo) { - data->async->discard_rx_fifo = false; - (void)rx_flush(dev, NULL, 0); + rdata->enabled = false; + if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { + if (rdata->discard_fifo) { + rdata->discard_fifo = false; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + if (HW_RX_COUNTING_ENABLED(data)) { + uint8_t buf[UARTE_HW_RX_FIFO_SIZE]; + + /* It need to be included because TIMER+PPI got RXDRDY events + * and counted those flushed bytes. + */ + rdata->total_user_byte_cnt += rx_flush(dev, buf); + (void)buf; + } +#endif + } else { + rdata->flush_cnt = rx_flush(dev, config->rx_flush_buf); + } } +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); +#ifdef UARTE_HAS_FRAME_TIMEOUT + nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX); +#endif + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); +#endif + if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { - async_uart_release(dev, UARTE_LOW_POWER_RX); + uint32_t key = irq_lock(); + + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX, UARTE_FLAG_LOW_POWER_TX); + irq_unlock(key); } notify_rx_disable(dev); @@ -1410,20 +1564,22 @@ static void txstopped_isr(const struct device *dev) if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - async_uart_release(dev, UARTE_LOW_POWER_TX); + key = irq_lock(); + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX, UARTE_FLAG_LOW_POWER_RX); + irq_unlock(key); - if (!data->async->tx_size) { + if (!data->async->tx.len) { return; } } - if (!data->async->tx_buf) { + if (!data->async->tx.buf) { return; } key = irq_lock(); - size_t amount = (data->async->tx_amount >= 0) ? - data->async->tx_amount : nrf_uarte_tx_amount_get(uarte); + size_t amount = (data->async->tx.amount >= 0) ? + data->async->tx.amount : nrf_uarte_tx_amount_get(uarte); irq_unlock(key); @@ -1431,7 +1587,7 @@ static void txstopped_isr(const struct device *dev) * was called when there was ongoing uart_poll_out. Handling * TXSTOPPED interrupt means that uart_poll_out has completed. */ - if (data->async->pending_tx) { + if (data->async->tx.pending) { key = irq_lock(); start_tx_locked(dev, data); irq_unlock(key); @@ -1439,12 +1595,12 @@ static void txstopped_isr(const struct device *dev) } /* Cache buffer is used because tx_buf wasn't in RAM. */ - if (data->async->tx_buf != data->async->xfer_buf) { + if (data->async->tx.buf != data->async->tx.xfer_buf) { /* In that case setup next chunk. If that was the last chunk * fall back to reporting TX_DONE. */ - if (amount == data->async->xfer_len) { - data->async->tx_cache_offset += amount; + if (amount == data->async->tx.xfer_len) { + data->async->tx.cache_offset += amount; if (setup_tx_cache(dev)) { key = irq_lock(); start_tx_locked(dev, data); @@ -1452,53 +1608,79 @@ static void txstopped_isr(const struct device *dev) return; } - /* Amount is already included in tx_cache_offset. */ - amount = data->async->tx_cache_offset; + /* Amount is already included in cache_offset. */ + amount = data->async->tx.cache_offset; } else { - /* TX was aborted, include tx_cache_offset in amount. */ - amount += data->async->tx_cache_offset; + /* TX was aborted, include cache_offset in amount. */ + amount += data->async->tx.cache_offset; } } - k_timer_stop(&data->async->tx_timeout_timer); + k_timer_stop(&data->async->tx.timer); struct uart_event evt = { - .data.tx.buf = data->async->tx_buf, + .data.tx.buf = data->async->tx.buf, .data.tx.len = amount, }; - if (amount == data->async->tx_size) { + if (amount == data->async->tx.len) { evt.type = UART_TX_DONE; } else { evt.type = UART_TX_ABORTED; } nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - data->async->tx_buf = NULL; - data->async->tx_size = 0; + data->async->tx.buf = NULL; + data->async->tx.len = 0; user_callback(dev, &evt); } +static void rxdrdy_isr(const struct device *dev) +{ + struct uarte_nrfx_data *data = dev->data; + +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + + data->async->rx.idle_cnt = RX_TIMEOUT_DIV - 1; + k_timer_start(&data->async->rx.timer, data->async->rx.timeout, K_NO_WAIT); + nrf_uarte_int_disable(uarte, NRF_UARTE_INT_RXDRDY_MASK); +#else + data->async->rx.cnt.cnt++; +#endif + +} + +static bool event_check_clear(NRF_UARTE_Type *uarte, nrf_uarte_event_t event, + uint32_t int_mask, uint32_t int_en_mask) +{ + if (nrf_uarte_event_check(uarte, event) && (int_mask & int_en_mask)) { + nrf_uarte_event_clear(uarte, event); + return true; + } + + return false; +} + static void uarte_nrfx_isr_async(const void *arg) { const struct device *dev = arg; NRF_UARTE_Type *uarte = get_uarte_instance(dev); const struct uarte_nrfx_config *config = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *rdata = &data->async->rx; + uint32_t imask = nrf_uarte_int_enable_check(uarte, UINT32_MAX); if (!HW_RX_COUNTING_ENABLED(config) && - && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) { - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); - data->async->rx_cnt.cnt++; - return; + event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) { + rxdrdy_isr(dev); } - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) { - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR); + if (event_check_clear(uarte, NRF_UARTE_EVENT_ERROR, NRF_UARTE_INT_ERROR_MASK, imask)) { error_isr(dev); } - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX) - && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDRX_MASK)) { + if (event_check_clear(uarte, NRF_UARTE_EVENT_ENDRX, NRF_UARTE_INT_ENDRX_MASK, imask)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); endrx_isr(dev); } @@ -1539,6 +1721,24 @@ static void uarte_nrfx_isr_async(const void *arg) NRF_UARTE_INT_TXSTOPPED_MASK)) { txstopped_isr(dev); } + + if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) { +#ifdef CONFIG_HAS_NORDIC_DMM + int ret; + + ret = dmm_buffer_in_release(config->mem_reg, rdata->usr_buf, + rdata->buf_len, rdata->buf); + + (void)ret; + __ASSERT_NO_MSG(ret == 0); + rdata->buf = rdata->usr_buf; +#endif + notify_uart_rx_rdy(dev, rdata->buf_len); + rx_buf_release(dev, rdata->buf); + rdata->buf_len = 0; + rdata->buf = NULL; + notify_rx_disable(dev); + } } #endif /* UARTE_ANY_ASYNC */ @@ -1553,11 +1753,12 @@ static void uarte_nrfx_isr_async(const void *arg) */ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) { - - const struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *config = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); #ifdef UARTE_ANY_ASYNC + struct uarte_nrfx_data *data = dev->data; + if (data->async) { return -ENOTSUP; } @@ -1567,7 +1768,7 @@ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) return -1; } - *c = *data->rx_data; + *c = *config->poll_in_byte; /* clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); @@ -1584,7 +1785,7 @@ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) */ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) { - struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *config = dev->config; bool isr_mode = k_is_in_isr() || k_is_pre_kernel(); unsigned int key; @@ -1593,11 +1794,12 @@ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) key = irq_lock(); if (is_tx_ready(dev)) { #if UARTE_ANY_ASYNC - if (data->async && data->async->tx_size && - data->async->tx_amount < 0) { - data->async->tx_amount = - nrf_uarte_tx_amount_get( - get_uarte_instance(dev)); + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + struct uarte_nrfx_data *data = dev->data; + + if (data->async && data->async->tx.len && + data->async->tx.amount < 0) { + data->async->tx.amount = nrf_uarte_tx_amount_get(uarte); } #endif break; @@ -1610,8 +1812,8 @@ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) key = wait_tx_ready(dev); } - *data->char_out = c; - tx_start(dev, data->char_out, 1); + *config->poll_out_byte = c; + tx_start(dev, config->poll_out_byte, 1); irq_unlock(key); } @@ -1654,14 +1856,14 @@ static int uarte_nrfx_fifo_read(const struct device *dev, { int num_rx = 0; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - const struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *config = dev->config; if (size > 0 && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { /* Clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); /* Receive a character */ - rx_data[num_rx++] = *data->rx_data; + rx_data[num_rx++] = *config->poll_in_byte; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } @@ -1818,12 +2020,13 @@ static const struct uart_driver_api uart_nrfx_uarte_driver_api = { #endif /* UARTE_INTERRUPT_DRIVEN */ }; +#ifdef UARTE_ENHANCED_POLL_OUT static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte, struct uarte_nrfx_data *data) { nrfx_err_t ret; - ret = gppi_channel_alloc(&data->ppi_ch_endtx); + ret = nrfx_gppi_channel_alloc(&data->ppi_ch_endtx); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); return -EIO; @@ -1836,18 +2039,21 @@ static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte, return 0; } +#endif static int uarte_instance_init(const struct device *dev, uint8_t interrupts_active) { int err; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *cfg = dev->config; - nrf_uarte_disable(uarte); +#if defined(CONFIG_UART_USE_RUNTIME_CONFIGURE) || defined(UARTE_ENHANCED_POLL_OUT) || \ + defined(UARTE_ANY_ASYNC) + struct uarte_nrfx_data *data = dev->data; +#endif - data->dev = dev; + nrf_uarte_disable(uarte); #ifdef CONFIG_ARCH_POSIX /* For simulation the DT provided peripheral address needs to be corrected */ @@ -1865,24 +2071,25 @@ static int uarte_instance_init(const struct device *dev, return err; } #else - if (cfg->baudrate == 0) { + if (cfg->nrf_baudrate == 0) { return -EINVAL; } - nrf_uarte_baudrate_set(uarte, cfg->baudrate); + nrf_uarte_baudrate_set(uarte, cfg->nrf_baudrate); nrf_uarte_configure(uarte, &cfg->hw_config); #endif - if (IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) { - nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDTX_STOPTX); - } - else if (IS_ENABLED(UARTE_ENHANCED_POLL_OUT) && - cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) { +#ifdef UARTE_HAS_ENDTX_STOPTX_SHORT + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDTX_STOPTX); +#endif + +#ifdef UARTE_ENHANCED_POLL_OUT + if (cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) { err = endtx_stoptx_ppi_init(uarte, data); if (err < 0) { return err; } } - +#endif #ifdef UARTE_ANY_ASYNC if (data->async) { @@ -1899,7 +2106,7 @@ static int uarte_instance_init(const struct device *dev, if (!cfg->disable_rx) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); - nrf_uarte_rx_buffer_set(uarte, data->rx_data, 1); + nrf_uarte_rx_buffer_set(uarte, cfg->poll_in_byte, 1); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } } @@ -1909,19 +2116,11 @@ static int uarte_instance_init(const struct device *dev, nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK); } - if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - } - /* Set TXSTOPPED event by requesting fake (zero-length) transfer. * Pointer to RAM variable (data->tx_buffer) is set because otherwise * such operation may result in HardFault or RAM corruption. */ - nrf_uarte_tx_buffer_set(uarte, data->char_out, 0); - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); - - /* switch off transmitter to save an energy */ - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); + tx_start(dev, cfg->poll_out_byte, 0); return 0; } @@ -2024,8 +2223,8 @@ static int uarte_nrfx_pm_action(const struct device *dev, /* Entering inactive state requires device to be no * active asynchronous calls. */ - __ASSERT_NO_MSG(!data->async->rx_enabled); - __ASSERT_NO_MSG(!data->async->tx_size); + __ASSERT_NO_MSG(!data->async->rx.enabled); + __ASSERT_NO_MSG(!data->async->tx.len); } #endif @@ -2055,7 +2254,8 @@ static int uarte_nrfx_pm_action(const struct device *dev, } wait_for_tx_stopped(dev); - uart_disable(dev); + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX, UARTE_FLAG_LOW_POWER_RX); + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX, UARTE_FLAG_LOW_POWER_TX); ret = pins_state_change(dev, false); if (ret < 0) { @@ -2090,6 +2290,28 @@ static int uarte_nrfx_pm_action(const struct device *dev, #define UARTE_DISABLE_RX_INIT(node_id) \ .disable_rx = DT_PROP(node_id, disable_rx) +#define UARTE_MEM_REGION(idx) DT_PHANDLE(UARTE(idx), memory_regions) + +#define UARTE_GET_MEM_ATTR(idx)\ + COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions), \ + (COND_CODE_1(DT_NODE_HAS_PROP(UARTE_MEM_REGION(idx), zephyr_memory_attr), \ + (DT_PROP(UARTE_MEM_REGION(idx), zephyr_memory_attr)), \ + (0))), \ + (0)) + +#define UARTE_IS_CACHEABLE(idx) DMM_IS_REG_CACHEABLE(UARTE(idx)) + +#define UARTE_GET_BAUDRATE_DIV(idx) \ + COND_CODE_0(UARTE_HAS_PROP(idx, base_frequency), (1), \ + (UARTE_PROP(idx, base_frequency) / 16)) + +/* When calculating baudrate we need to take into account that high speed instances + * must have baudrate adjust to the ratio between UARTE clocking frequency and 16 MHz. + */ +#define UARTE_GET_BAUDRATE(idx) \ + (NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) / UARTE_GET_BAUDRATE_DIV(idx)) + + /* Macro for setting nRF specific configuration structures. */ #define UARTE_NRF_CONFIG(idx) { \ .hwfc = (UARTE_PROP(idx, hw_flow_control) == \ @@ -2100,6 +2322,8 @@ static int uarte_nrfx_pm_action(const struct device *dev, IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\ IF_ENABLED(UARTE_ODD_PARITY_ALLOWED, \ (.paritytype = NRF_UARTE_PARITYTYPE_EVEN,)) \ + IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT, \ + (.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN,)) \ } /* Macro for setting zephyr specific configuration structures. */ @@ -2122,14 +2346,14 @@ static int uarte_nrfx_pm_action(const struct device *dev, IF_ENABLED(CONFIG_UART_##idx##_ASYNC, ( \ static uint8_t \ uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \ - UARTE_MEMORY_SECTION(idx); \ + DMM_MEMORY_SECTION(UARTE(idx)); \ + static uint8_t uarte##idx##_flush_buf[UARTE_HW_RX_FIFO_SIZE] \ + DMM_MEMORY_SECTION(UARTE(idx)); \ struct uarte_async_cb uarte##idx##_async; \ )) \ - static uint8_t uarte##idx##_char_out UARTE_MEMORY_SECTION(idx); \ - static uint8_t uarte##idx##_rx_data UARTE_MEMORY_SECTION(idx); \ + static uint8_t uarte##idx##_poll_out_byte DMM_MEMORY_SECTION(UARTE(idx));\ + static uint8_t uarte##idx##_poll_in_byte DMM_MEMORY_SECTION(UARTE(idx)); \ static struct uarte_nrfx_data uarte_##idx##_data = { \ - .char_out = &uarte##idx##_char_out, \ - .rx_data = &uarte##idx##_rx_data, \ IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE, \ (.uart_config = UARTE_CONFIG(idx),)) \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ @@ -2138,11 +2362,16 @@ static int uarte_nrfx_pm_action(const struct device *dev, (.int_driven = &uarte##idx##_int_driven,)) \ }; \ static const struct uarte_nrfx_config uarte_##idx##z_config = { \ - COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, (), \ - (.baudrate = NRF_BAUDRATE(UARTE_PROP(idx, current_speed)), \ + COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, \ + (.baudrate_div = UARTE_GET_BAUDRATE_DIV(idx),), \ + (IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT, \ + (.baudrate = UARTE_PROP(idx, current_speed),)) \ + .nrf_baudrate = UARTE_GET_BAUDRATE(idx), \ .hw_config = UARTE_NRF_CONFIG(idx),)) \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \ .uarte_regs = _CONCAT(NRF_UARTE, idx), \ + IF_ENABLED(CONFIG_HAS_NORDIC_DMM, \ + (.mem_reg = DMM_DEV_TO_REG(UARTE(idx)),)) \ .flags = \ (IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \ UARTE_CFG_FLAG_GPIO_MGMT : 0) | \ @@ -2150,10 +2379,15 @@ static int uarte_nrfx_pm_action(const struct device *dev, UARTE_CFG_FLAG_PPI_ENDTX : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC) ? \ UARTE_CFG_FLAG_HW_BYTE_COUNTING : 0) | \ + (!IS_ENABLED(CONFIG_HAS_NORDIC_DMM) ? 0 : \ + (UARTE_IS_CACHEABLE(idx) ? UARTE_CFG_FLAG_CACHEABLE : 0)) | \ USE_LOW_POWER(idx), \ UARTE_DISABLE_RX_INIT(UARTE(idx)), \ + .poll_out_byte = &uarte##idx##_poll_out_byte, \ + .poll_in_byte = &uarte##idx##_poll_in_byte, \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ - (.tx_cache = uarte##idx##_tx_cache,)) \ + (.tx_cache = uarte##idx##_tx_cache, \ + .rx_flush_buf = uarte##idx##_flush_buf,)) \ IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ (.timer = NRFX_TIMER_INSTANCE( \ CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ @@ -2168,7 +2402,8 @@ static int uarte_nrfx_pm_action(const struct device *dev, IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN)); \ } \ \ - PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action); \ + PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action, \ + COND_CODE_1(CONFIG_UART_##idx##_ASYNC, (PM_DEVICE_ISR_SAFE), (0))); \ \ DEVICE_DT_DEFINE(UARTE(idx), \ uarte_##idx##_init, \ @@ -2184,19 +2419,13 @@ static int uarte_nrfx_pm_action(const struct device *dev, (static uint8_t uarte##idx##_tx_buffer \ [MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE, \ BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))] \ - UARTE_MEMORY_SECTION(idx); \ + DMM_MEMORY_SECTION(UARTE(idx)); \ static struct uarte_nrfx_int_driven \ uarte##idx##_int_driven = { \ .tx_buffer = uarte##idx##_tx_buffer, \ .tx_buff_size = sizeof(uarte##idx##_tx_buffer),\ };)) -#define UARTE_MEMORY_SECTION(idx) \ - COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions), \ - (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ - DT_PHANDLE(UARTE(idx), memory_regions)))))), \ - ()) - #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);))