diff --git a/doc/services/device_mgmt/ec_host_cmd.rst b/doc/services/device_mgmt/ec_host_cmd.rst index 1b3e9ff6f693e31..880b53d3e2f3d25 100644 --- a/doc/services/device_mgmt/ec_host_cmd.rst +++ b/doc/services/device_mgmt/ec_host_cmd.rst @@ -36,6 +36,36 @@ one backend layer. .. image:: ec_host_cmd_shi.png :align: center +Another case is SPI. Unfortunately, the current SPI API can't be used to handle the host commands +communication. The main issues are unknown command size sent by the host (the SPI transaction +sends/receives specific number of bytes) and need to constant sending status byte (the SPI module +is enabled and disabled per transaction). It forces implementing the SPI driver within a backend, +as it is done for SHI. That means a SPI backend has to implemented per chip family. However, it +can be changed in the future once the SPI API is extended to host command needs. + +That approach requires configuring the SPI dts node in a special way. The main compatible string of +a SPI node has changed to use the Host Command version of a SPI driver. The rest of the properties +should be configured as usual. Example of the SPI node for STM32: + +.. code-block:: devicetree + + &spi1 { + /* Change the compatible string to use the Host Command version of the + * STM32 SPI driver + */ + compatible = "st,stm32-spi-host-cmd"; + status = "okay"; + + dmas = <&dma2 3 3 0x38440 0x03>, + <&dma2 0 3 0x38480 0x03>; + dma-names = "tx", "rx"; + /* This field is used to point at our CS pin */ + cs-gpios = <&gpioa 4 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; + }; + +The chip that runs Zephyr is a SPI slave and the `cs-gpios` property is used to point our CS pin. +For the SPI, it is required to set the backend chosen node ``zephyr,host-cmd-spi-backend``. + The supported backend and peripheral drivers: * Simulator @@ -43,6 +73,7 @@ The supported backend and peripheral drivers: * eSPI - any eSPI slave driver that support :kconfig:option:`CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD` and :kconfig:option:`CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE` * UART - any UART driver that supports the asynchronous API +* SPI - STM32 Initialization ************** @@ -54,6 +85,7 @@ initializes the host command subsystem by calling :c:func:`ec_host_cmd_init`: * ``zephyr,host-cmd-espi-backend`` * ``zephyr,host-cmd-shi-backend`` * ``zephyr,host-cmd-uart-backend`` +* ``zephyr,host-cmd-spi-backend`` If no backend chosen node is configured, the application must call the :c:func:`ec_host_cmd_init` function directly. This way of initialization is useful if a backend is chosen in runtime diff --git a/dts/bindings/spi/st,stm32-spi-host-cmd.yaml b/dts/bindings/spi/st,stm32-spi-host-cmd.yaml new file mode 100644 index 000000000000000..db5cb55d5f2d335 --- /dev/null +++ b/dts/bindings/spi/st,stm32-spi-host-cmd.yaml @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Google LLC +# SPDX-License-Identifier: Apache-2.0 + +description: | + Host Command version of STM32 SPI controller. + All properties are the same, but a different driver is used. + +compatible: "st,stm32-spi-host-cmd" + +include: st,stm32-spi.yaml diff --git a/dts/bindings/spi/st,stm32h7-spi-host-cmd.yaml b/dts/bindings/spi/st,stm32h7-spi-host-cmd.yaml new file mode 100644 index 000000000000000..5fdc90e401a22c8 --- /dev/null +++ b/dts/bindings/spi/st,stm32h7-spi-host-cmd.yaml @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Google LLC +# SPDX-License-Identifier: Apache-2.0 + +description: | + Host Command version of STM32H7 SPI controller. + All properties are the same, but a different driver is used. + +compatible: "st,stm32h7-spi-host-cmd" + +include: st,stm32h7-spi.yaml diff --git a/subsys/mgmt/ec_host_cmd/Kconfig b/subsys/mgmt/ec_host_cmd/Kconfig index 979fa06f659826f..c754e749b7ab8ad 100644 --- a/subsys/mgmt/ec_host_cmd/Kconfig +++ b/subsys/mgmt/ec_host_cmd/Kconfig @@ -26,6 +26,7 @@ config EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE default 0 if EC_HOST_CMD_BACKEND_ESPI default 0 if EC_HOST_CMD_BACKEND_SHI default 256 if EC_HOST_CMD_BACKEND_UART + default 552 if EC_HOST_CMD_BACKEND_SPI default 256 help Buffer size in bytes for TX buffer defined by the host command handler. @@ -38,6 +39,7 @@ config EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE default 256 if EC_HOST_CMD_BACKEND_ESPI default 0 if EC_HOST_CMD_BACKEND_SHI default 544 if EC_HOST_CMD_BACKEND_UART + default 544 if EC_HOST_CMD_BACKEND_SPI default 256 help Buffer size in bytes for TX buffer defined by the host command handler. @@ -56,7 +58,7 @@ config EC_HOST_CMD_HANDLER_PRIO config EC_HOST_CMD_INIT_PRIORITY int "Initialization priority" - default 60 + default 80 range 0 99 help Initialization priority for Host Command. It must be higher than the initialization diff --git a/subsys/mgmt/ec_host_cmd/backends/CMakeLists.txt b/subsys/mgmt/ec_host_cmd/backends/CMakeLists.txt index 61e3f17c01f0eb6..06d06ae22a4b7a2 100644 --- a/subsys/mgmt/ec_host_cmd/backends/CMakeLists.txt +++ b/subsys/mgmt/ec_host_cmd/backends/CMakeLists.txt @@ -21,3 +21,7 @@ zephyr_library_sources_ifdef( zephyr_library_sources_ifdef( CONFIG_EC_HOST_CMD_BACKEND_UART ec_host_cmd_backend_uart.c) + +zephyr_library_sources_ifdef( + CONFIG_EC_HOST_CMD_BACKEND_SPI_STM32 + ec_host_cmd_backend_spi_stm32.c) diff --git a/subsys/mgmt/ec_host_cmd/backends/Kconfig b/subsys/mgmt/ec_host_cmd/backends/Kconfig index 13b05dfb1b38407..a58d35519077bd6 100644 --- a/subsys/mgmt/ec_host_cmd/backends/Kconfig +++ b/subsys/mgmt/ec_host_cmd/backends/Kconfig @@ -6,6 +6,7 @@ DT_CHOSEN_ESPI_BACKEND := zephyr,host-cmd-espi-backend DT_CHOSEN_SHI_BACKEND := zephyr,host-cmd-shi-backend DT_CHOSEN_UART_BACKEND := zephyr,host-cmd-uart-backend +DT_CHOSEN_SPI_BACKEND := zephyr,host-cmd-spi-backend config EC_HOST_CMD_BACKEND_SIMULATOR bool "Embedded Controller Host Command Backend Simulator" @@ -37,6 +38,12 @@ config EC_HOST_CMD_BACKEND_UART Enable support for Embedded Controller host commands using the UART. +config EC_HOST_CMD_BACKEND_SPI + bool "Host commands support using SPI" + help + Enable support for Embedded Controller host commands using + the SPI. + if EC_HOST_CMD_BACKEND_SHI choice EC_HOST_CMD_BACKEND_SHI_DRIVER @@ -79,3 +86,19 @@ config EC_HOST_CMD_BACKEND_SHI_MAX_RESPONSE response. endif # EC_HOST_CMD_BACKEND_SHI + +if EC_HOST_CMD_BACKEND_SPI + +choice EC_HOST_CMD_BACKEND_SPI_DRIVER + prompt "SHI driver" + default EC_HOST_CMD_BACKEND_SPI_STM32 if SOC_FAMILY_STM32 + +config EC_HOST_CMD_BACKEND_SPI_STM32 + bool "SPI by STM32" + help + This option enables the driver for SPI backend in the + STM32 chip family. + +endchoice + +endif # EC_HOST_CMD_BACKEND_SPI diff --git a/subsys/mgmt/ec_host_cmd/backends/ec_host_cmd_backend_spi_stm32.c b/subsys/mgmt/ec_host_cmd/backends/ec_host_cmd_backend_spi_stm32.c new file mode 100644 index 000000000000000..bc757cbbad6267b --- /dev/null +++ b/subsys/mgmt/ec_host_cmd/backends/ec_host_cmd_backend_spi_stm32.c @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2023 Google LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* The SPI STM32 backend implements dedicated SPI driver for Host Commands. Unfortunately, the + * current SPI API can't be used to handle the host commands communication. The main issues are + * unknown command size sent by the host (the SPI transaction sends/receives specific number of + * bytes) and need to constant sending status byte (the SPI module is enabled and disabled per + * transaction). + */ + +#include +LOG_MODULE_REGISTER(host_cmd_spi, CONFIG_EC_HC_LOG_LEVEL); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The default compatible string of a SPI devicetree node has to be replaced with the one + * dedicated for Host Commands. It disabled standard SPI driver. For STM32 SPI "st,stm32-spi" has + * to be changed to "st,stm32-spi-host-cmd". The remaining "additional" compatible strings should + * stay the same. + */ +#define ST_STM32_SPI_HOST_CMD_COMPAT st_stm32_spi_host_cmd +BUILD_ASSERT(DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_spi_backend)), + "The chosen backend node is obligatory for SPI STM32 backend."); +BUILD_ASSERT(DT_NODE_HAS_COMPAT_STATUS(DT_CHOSEN(zephyr_host_cmd_spi_backend), + ST_STM32_SPI_HOST_CMD_COMPAT, okay), + "Invalid compatible of the chosen spi node."); + +#define RX_HEADER_SIZE (sizeof(struct ec_host_cmd_request_header)) + +/* Framing byte which precedes a response packet from the EC. After sending a + * request, the host will clock in bytes until it sees the framing byte, then + * clock in the response packet. + */ +#define EC_SPI_FRAME_START 0xec + +/* Padding bytes which are clocked out after the end of a response packet.*/ +#define EC_SPI_PAST_END 0xed + +/* The number of the ending bytes. The number can be bigger than 1 for chip families + * than need to bypass the DMA threshold. + */ +#define EC_SPI_PAST_END_LENGTH 1 + +/* EC is ready to receive.*/ +#define EC_SPI_RX_READY 0x78 + +/* EC has started receiving the request from the host, but hasn't started + * processing it yet. + */ +#define EC_SPI_RECEIVING 0xf9 + +/* EC has received the entire request from the host and is processing it. */ +#define EC_SPI_PROCESSING 0xfa + +/* EC received bad data from the host, such as a packet header with an invalid + * length. EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_RX_BAD_DATA 0xfb + +/* EC received data from the AP before it was ready. That is, the host asserted + * chip select and started clocking data before the EC was ready to receive it. + * EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_NOT_READY 0xfc + +/* Supported version of host commands protocol. */ +#define EC_HOST_REQUEST_VERSION 3 + +/* Timeout to wait for SPI request packet + * + * This affects the slowest SPI clock we can support. A delay of 8192 us + * permits a 512-byte request at 500 KHz, assuming the master starts sending + * bytes as soon as it asserts chip select. That's as slow as we would + * practically want to run the SPI interface, since running it slower + * significantly impacts firmware update times. + */ +#define EC_SPI_CMD_RX_TIMEOUT_US 8192 + +/* Enumeration to maintain different states of incoming request from + * host + */ +enum spi_host_command_state { + /* SPI not enabled (initial state, and when chipset is off) */ + SPI_HOST_CMD_STATE_DISABLED = 0, + + /* SPI module enabled, but not ready to receive */ + SPI_HOST_CMD_STATE_RX_NOT_READY, + + /* Ready to receive next request */ + SPI_HOST_CMD_STATE_READY_TO_RX, + + /* Receiving request */ + SPI_HOST_CMD_STATE_RECEIVING, + + /* Processing request */ + SPI_HOST_CMD_STATE_PROCESSING, + + /* Sending response */ + SPI_HOST_CMD_STATE_SENDING, + + /* Received bad data - transaction started before we were ready, or + * packet header from host didn't parse properly. Ignoring received + * data. + */ + SPI_HOST_CMD_STATE_RX_BAD, +}; + +struct dma_stream { + const struct device *dma_dev; + uint32_t channel; + struct dma_config dma_cfg; + struct dma_block_config dma_blk_cfg; + int fifo_threshold; +}; + +struct ec_host_cmd_spi_cfg { + SPI_TypeDef *spi; + const struct pinctrl_dev_config *pcfg; + size_t pclk_len; + const struct stm32_pclken *pclken; +}; + +struct ec_host_cmd_spi_ctx { + struct gpio_dt_spec cs; + struct gpio_callback cs_callback; + struct ec_host_cmd_spi_cfg *spi_config; + struct ec_host_cmd_rx_ctx *rx_ctx; + struct ec_host_cmd_tx_buf *tx; + uint8_t *tx_buf; + struct dma_stream *dma_rx; + struct dma_stream *dma_tx; + enum spi_host_command_state state; + int prepare_rx_later; +}; + +static const uint8_t out_preamble[4] = { + EC_SPI_PROCESSING, EC_SPI_PROCESSING, EC_SPI_PROCESSING, + EC_SPI_FRAME_START, /* This is the byte which matters */ +}; + +static void dma_callback(const struct device *dev, void *arg, uint32_t channel, int status); +static int prepare_rx(struct ec_host_cmd_spi_ctx *hc_spi); + +#define SPI_DMA_CHANNEL_INIT(id, dir, dir_cap, src_dev, dest_dev) \ + .dma_dev = DEVICE_DT_GET(DT_DMAS_CTLR_BY_NAME(id, dir)), \ + .channel = DT_DMAS_CELL_BY_NAME(id, dir, channel), \ + .dma_cfg = \ + { \ + .dma_slot = DT_DMAS_CELL_BY_NAME(id, dir, slot), \ + .channel_direction = STM32_DMA_CONFIG_DIRECTION( \ + DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \ + .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE( \ + DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \ + .dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE( \ + DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \ + .source_burst_length = 1, /* SINGLE transfer */ \ + .dest_burst_length = 1, /* SINGLE transfer */ \ + .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ + DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \ + .dma_callback = dma_callback, \ + .block_count = 2, \ + }, \ + .fifo_threshold = \ + STM32_DMA_FEATURES_FIFO_THRESHOLD(DT_DMAS_CELL_BY_NAME(id, dir, features)), + +#define STM32_SPI_INIT(id) \ + PINCTRL_DT_DEFINE(id); \ + static const struct stm32_pclken pclken[] = STM32_DT_CLOCKS(id); \ + \ + static struct ec_host_cmd_spi_cfg ec_host_cmd_spi_cfg = { \ + .spi = (SPI_TypeDef *)DT_REG_ADDR(id), \ + .pclken = pclken, \ + .pclk_len = DT_NUM_CLOCKS(id), \ + .pcfg = PINCTRL_DT_DEV_CONFIG_GET(id), \ + }; \ + \ + static struct dma_stream dma_rx = {SPI_DMA_CHANNEL_INIT(id, rx, RX, PERIPHERAL, MEMORY)}; \ + static struct dma_stream dma_tx = {SPI_DMA_CHANNEL_INIT(id, tx, TX, MEMORY, PERIPHERAL)} + +STM32_SPI_INIT(DT_CHOSEN(zephyr_host_cmd_spi_backend)); + +#define EC_HOST_CMD_SPI_DEFINE(_name) \ + static struct ec_host_cmd_spi_ctx _name##_hc_spi = { \ + .dma_rx = &dma_rx, \ + .dma_tx = &dma_tx, \ + .spi_config = &ec_host_cmd_spi_cfg, \ + }; \ + struct ec_host_cmd_backend _name = { \ + .api = &ec_host_cmd_api, \ + .ctx = (struct ec_host_cmd_spi_ctx *)&_name##_hc_spi, \ + } + +static inline uint32_t dma_source_addr(SPI_TypeDef *spi) +{ +#if DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_host_cmd_spi_backend), st_stm32h7_spi) + return (uint32_t)(&spi->RXDR); +#else + return (uint32_t)LL_SPI_DMA_GetRegAddr(spi); +#endif /* st_stm32h7_spi */ +} + +static inline uint32_t dma_dest_addr(SPI_TypeDef *spi) +{ +#if DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_host_cmd_spi_backend), st_stm32h7_spi) + return (uint32_t)(&spi->TXDR); +#else + return (uint32_t)LL_SPI_DMA_GetRegAddr(spi); +#endif /* st_stm32h7_spi */ +} + +/* Set TX register to send status, while SPI module is enabled */ +static inline void tx_status(SPI_TypeDef *spi, uint8_t status) +{ + /* The number of status bytes to sent can be bigger than 1 for chip + * families than need to bypass the DMA threshold. + */ + LL_SPI_TransmitData8(spi, status); +} + +static int expected_size(const struct ec_host_cmd_request_header *header) +{ + /* Check host request version */ + if (header->prtcl_ver != EC_HOST_REQUEST_VERSION) { + return 0; + } + + /* Reserved byte should be 0 */ + if (header->reserved) { + return 0; + } + + return sizeof(*header) + header->data_len; +} + +static void dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) +{ + struct ec_host_cmd_spi_ctx *hc_spi = arg; + + /* End of sending */ + if (channel == hc_spi->dma_tx->channel) { + if (hc_spi->prepare_rx_later) { + int ret; + + ret = prepare_rx(hc_spi); + + if (ret) { + LOG_ERR("Failed to prepare RX later"); + } + } else { + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + + /* Set the status not ready. Prepare RX after CS deassertion */ + tx_status(spi, EC_SPI_NOT_READY); + hc_spi->state = SPI_HOST_CMD_STATE_RX_NOT_READY; + } + } +} + +static int spi_init(const struct ec_host_cmd_spi_ctx *hc_spi) +{ + int err; + + if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) { + LOG_ERR("Clock control device not ready"); + return -ENODEV; + } + + err = clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), + (clock_control_subsys_t)&hc_spi->spi_config->pclken[0]); + if (err < 0) { + LOG_ERR("Could not enable SPI clock"); + return err; + } + + if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (hc_spi->spi_config->pclk_len > 1)) { + err = clock_control_configure( + DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), + (clock_control_subsys_t)&hc_spi->spi_config->pclken[1], NULL); + if (err < 0) { + LOG_ERR("Could not select SPI domain clock"); + return err; + } + } + + /* Configure dt provided device signals when available */ + err = pinctrl_apply_state(hc_spi->spi_config->pcfg, PINCTRL_STATE_DEFAULT); + if (err < 0) { + LOG_ERR("SPI pinctrl setup failed (%d)", err); + return err; + } + + if ((hc_spi->dma_rx->dma_dev != NULL) && !device_is_ready(hc_spi->dma_rx->dma_dev)) { + LOG_ERR("%s device not ready", hc_spi->dma_rx->dma_dev->name); + return -ENODEV; + } + + if ((hc_spi->dma_tx->dma_dev != NULL) && !device_is_ready(hc_spi->dma_tx->dma_dev)) { + LOG_ERR("%s device not ready", hc_spi->dma_tx->dma_dev->name); + return -ENODEV; + } + + return 0; +} + +static int spi_configure(const struct ec_host_cmd_spi_ctx *hc_spi) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + +#if defined(LL_SPI_PROTOCOL_MOTOROLA) && defined(SPI_CR2_FRF) + LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_MOTOROLA); +#endif + + /* Disable before configuration */ + LL_SPI_Disable(spi); + /* Set clock signal configuration */ + LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_LOW); + LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_1EDGE); + /* Set protocol parameters */ + LL_SPI_SetTransferDirection(spi, LL_SPI_FULL_DUPLEX); + LL_SPI_SetTransferBitOrder(spi, LL_SPI_MSB_FIRST); + LL_SPI_DisableCRC(spi); + LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_8BIT); + /* Set slave options */ + LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_INPUT); + LL_SPI_SetMode(spi, LL_SPI_MODE_SLAVE); + +#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo) +#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) + LL_SPI_SetFIFOThreshold(spi, LL_SPI_FIFO_TH_01DATA); +#else + LL_SPI_SetRxFIFOThreshold(spi, LL_SPI_RX_FIFO_TH_QUARTER); +#endif /* st_stm32h7_spi */ +#endif /* st_stm32_spi_fifo */ + + return 0; +} + +static int reload_dma_tx(struct ec_host_cmd_spi_ctx *hc_spi, size_t len) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + int ret; + + /* Set DMA at the beggining of the TX buffer and set the number of bytes to send */ + ret = dma_reload(hc_spi->dma_tx->dma_dev, hc_spi->dma_tx->channel, (uint32_t)hc_spi->tx_buf, + dma_dest_addr(spi), len); + if (ret != 0) { + return ret; + } + + /* Start DMA transfer */ + ret = dma_start(hc_spi->dma_tx->dma_dev, hc_spi->dma_tx->channel); + if (ret != 0) { + return ret; + } + + return 0; +} + +static int spi_config_dma_tx(struct ec_host_cmd_spi_ctx *hc_spi) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + struct dma_block_config *blk_cfg; + struct dma_stream *stream = hc_spi->dma_tx; + int ret; + + blk_cfg = &stream->dma_blk_cfg; + + /* Set configs for TX. This shouldn't be changed during communication */ + memset(blk_cfg, 0, sizeof(struct dma_block_config)); + blk_cfg->block_size = 0; + + /* The destination address is the SPI register */ + blk_cfg->dest_address = dma_dest_addr(spi); + blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + + blk_cfg->source_address = (uint32_t)hc_spi->tx_buf; + blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; + + blk_cfg->fifo_mode_control = hc_spi->dma_tx->fifo_threshold; + + stream->dma_cfg.head_block = blk_cfg; + stream->dma_cfg.user_data = hc_spi; + + /* Configure the TX the channels */ + ret = dma_config(hc_spi->dma_tx->dma_dev, hc_spi->dma_tx->channel, &stream->dma_cfg); + + if (ret != 0) { + return ret; + } + + return 0; +} + +static int reload_dma_rx(struct ec_host_cmd_spi_ctx *hc_spi) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + int ret; + + /* Reload DMA to the beginning of the RX buffer */ + ret = dma_reload(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel, dma_source_addr(spi), + (uint32_t)hc_spi->rx_ctx->buf, CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE); + if (ret != 0) { + return ret; + } + + ret = dma_start(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel); + if (ret != 0) { + return ret; + } + + return 0; +} + +static int spi_config_dma_rx(struct ec_host_cmd_spi_ctx *hc_spi) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + struct dma_block_config *blk_cfg; + struct dma_stream *stream = hc_spi->dma_rx; + int ret; + + blk_cfg = &stream->dma_blk_cfg; + + /* Set configs for RX. This shouldn't be changed during communication */ + memset(blk_cfg, 0, sizeof(struct dma_block_config)); + blk_cfg->block_size = CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE; + + /* The destination address is our RX buffer */ + blk_cfg->dest_address = (uint32_t)hc_spi->rx_ctx->buf; + blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; + + /* The source address is the SPI register */ + blk_cfg->source_address = dma_source_addr(spi); + blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + + blk_cfg->fifo_mode_control = hc_spi->dma_rx->fifo_threshold; + + stream->dma_cfg.head_block = blk_cfg; + stream->dma_cfg.user_data = hc_spi; + + /* Configure the RX the channels */ + ret = dma_config(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel, &stream->dma_cfg); + + return ret; +} + +static int prepare_rx(struct ec_host_cmd_spi_ctx *hc_spi) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + int ret; + + hc_spi->prepare_rx_later = 0; + /* Flush RX buffer. It clears the RXNE(RX not empty) flag not to trigger + * the DMA transfer at the beginning of a new SPI transfer. The flag is + * set while sending response to host. The number of bytes to read can + * be bigger than 1 for chip families than need to bypass the DMA + * threshold. + */ + LL_SPI_ReceiveData8(spi); + + ret = reload_dma_rx(hc_spi); + if (!ret) { + tx_status(spi, EC_SPI_RX_READY); + hc_spi->state = SPI_HOST_CMD_STATE_READY_TO_RX; + } + + return ret; +} + +static int spi_setup_dma(struct ec_host_cmd_spi_ctx *hc_spi) +{ + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + /* retrieve active RX DMA channel (used in callback) */ + int ret; + +#if DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_host_cmd_spi_backend), st_stm32h7_spi) + /* Set request before enabling (else SPI CFG1 reg is write protected) */ + LL_SPI_EnableDMAReq_RX(spi); + LL_SPI_EnableDMAReq_TX(spi); + + LL_SPI_Enable(spi); +#else /* st_stm32h7_spi */ + LL_SPI_Enable(spi); +#endif /* !st_stm32h7_spi */ + + ret = spi_config_dma_tx(hc_spi); + if (ret != 0) { + return ret; + } + + ret = spi_config_dma_rx(hc_spi); + if (ret != 0) { + return ret; + } + + /* Start receiving from the SPI Master */ + ret = dma_start(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel); + if (ret != 0) { + return ret; + } + +#if !DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_host_cmd_spi_backend), st_stm32h7_spi) + /* toggle the DMA request to restart the transfer */ + LL_SPI_EnableDMAReq_RX(spi); + LL_SPI_EnableDMAReq_TX(spi); +#endif /* !st_stm32h7_spi */ + + return 0; +} + +static int wait_for_rx_bytes(struct ec_host_cmd_spi_ctx *hc_spi, int needed) +{ + int64_t deadline = k_ticks_to_us_floor64(k_uptime_ticks()) + EC_SPI_CMD_RX_TIMEOUT_US; + int64_t current_time; + struct dma_status stat; + + while (1) { + current_time = k_ticks_to_us_floor64(k_uptime_ticks()); + + /* RX DMA is always programed to copy buffer size (max command size) */ + if (dma_get_status(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel, &stat) == 0) { + uint32_t rx_bytes = + CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE - stat.pending_length; + if (rx_bytes >= needed) { + return 0; + } + } else { + return -1; + } + + /* Make sure the SPI transfer is ongoing */ + if (gpio_pin_get(hc_spi->cs.port, hc_spi->cs.pin)) { + /* End of transfer - return instantly */ + return -1; + } + + if (current_time >= deadline) { + /* Timeout */ + return -1; + } + } +} + +void gpio_cb_nss(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins) +{ + struct ec_host_cmd_spi_ctx *hc_spi = + CONTAINER_OF(cb, struct ec_host_cmd_spi_ctx, cs_callback); + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + int ret; + + /* CS deasserted. Setup fo the next transaction */ + if (gpio_pin_get(hc_spi->cs.port, hc_spi->cs.pin)) { + /* CS asserted during processing a command. Prepare for receiving after + * sending response. + */ + if (hc_spi->state == SPI_HOST_CMD_STATE_PROCESSING) { + hc_spi->prepare_rx_later = 1; + return; + } + + ret = prepare_rx(hc_spi); + if (ret) { + LOG_ERR("Failed to prepare RX after CS deassertion"); + } + + return; + } + + /* CS asserted. Receive full packet and call general handler */ + if (hc_spi->state == SPI_HOST_CMD_STATE_READY_TO_RX) { + /* The SPI module and DMA are already configured and ready to receive data. + * Consider disabling the SPI module at the end of sending response and + * reenabling it here if there is a need to disable reset SPI module, + * because of unexpected states. + */ + int exp_size; + + hc_spi->state = SPI_HOST_CMD_STATE_RECEIVING; + + /* Set TX register to send status */ + tx_status(spi, EC_SPI_RECEIVING); + + /* Get the header */ + if (wait_for_rx_bytes(hc_spi, RX_HEADER_SIZE)) { + goto spi_bad_rx; + } + + exp_size = expected_size((struct ec_host_cmd_request_header *)hc_spi->rx_ctx->buf); + /* Get data bytes */ + if (exp_size > RX_HEADER_SIZE) { + if (wait_for_rx_bytes(hc_spi, exp_size)) { + goto spi_bad_rx; + } + } + + hc_spi->rx_ctx->len = exp_size; + hc_spi->state = SPI_HOST_CMD_STATE_PROCESSING; + tx_status(spi, EC_SPI_PROCESSING); + k_sem_give(&hc_spi->rx_ctx->handler_owns); + + return; + } + +spi_bad_rx: + tx_status(spi, EC_SPI_NOT_READY); + hc_spi->state = SPI_HOST_CMD_STATE_RX_BAD; +} + +static int ec_host_cmd_spi_init(const struct ec_host_cmd_backend *backend, + struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx) +{ + int ret; + struct ec_host_cmd_spi_ctx *hc_spi = (struct ec_host_cmd_spi_ctx *)backend->ctx; + const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config; + SPI_TypeDef *spi = cfg->spi; + + hc_spi->state = SPI_HOST_CMD_STATE_DISABLED; + + /* SPI backend needs rx and tx buffers provided by the handler */ + if (!rx_ctx->buf || !tx->buf) { + return -EIO; + } + + gpio_init_callback(&hc_spi->cs_callback, gpio_cb_nss, BIT(hc_spi->cs.pin)); + gpio_add_callback(hc_spi->cs.port, &hc_spi->cs_callback); + gpio_pin_interrupt_configure(hc_spi->cs.port, hc_spi->cs.pin, GPIO_INT_EDGE_BOTH); + + hc_spi->rx_ctx = rx_ctx; + hc_spi->rx_ctx->len = 0; + + /* Buffer to transmit */ + hc_spi->tx_buf = tx->buf; + hc_spi->tx = tx; + /* Buffer for response from HC handler. Make space for preamble */ + hc_spi->tx->buf = (uint8_t *)hc_spi->tx->buf + sizeof(out_preamble); + + ret = spi_init(hc_spi); + if (ret) { + return ret; + } + + ret = spi_configure(hc_spi); + if (ret) { + return ret; + } + + ret = spi_setup_dma(hc_spi); + if (ret) { + return ret; + } + + tx_status(spi, EC_SPI_RX_READY); + hc_spi->state = SPI_HOST_CMD_STATE_READY_TO_RX; + + return ret; +} + +static int ec_host_cmd_spi_send(const struct ec_host_cmd_backend *backend) +{ + struct ec_host_cmd_spi_ctx *hc_spi = (struct ec_host_cmd_spi_ctx *)backend->ctx; + int ret = 0; + int tx_size; + + dma_stop(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel); + + /* Add state bytes at the beggining and the end of the buffer to transmit */ + memcpy(hc_spi->tx_buf, out_preamble, sizeof(out_preamble)); + for (int i = 0; i < EC_SPI_PAST_END_LENGTH; i++) { + hc_spi->tx_buf[sizeof(out_preamble) + hc_spi->tx->len + i] = EC_SPI_PAST_END; + } + tx_size = hc_spi->tx->len + sizeof(out_preamble) + EC_SPI_PAST_END_LENGTH; + + hc_spi->state = SPI_HOST_CMD_STATE_SENDING; + + ret = reload_dma_tx(hc_spi, tx_size); + if (ret) { + LOG_ERR("Failed to send response"); + } + + return ret; +} + +static const struct ec_host_cmd_backend_api ec_host_cmd_api = { + .init = &ec_host_cmd_spi_init, + .send = &ec_host_cmd_spi_send, +}; + +EC_HOST_CMD_SPI_DEFINE(ec_host_cmd_spi); + +struct ec_host_cmd_backend *ec_host_cmd_backend_get_spi(struct gpio_dt_spec *cs) +{ + struct ec_host_cmd_spi_ctx *hc_spi = ec_host_cmd_spi.ctx; + + hc_spi->cs = *cs; + + return &ec_host_cmd_spi; +} + +#ifdef CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT +static int host_cmd_init(void) +{ + struct gpio_dt_spec cs = GPIO_DT_SPEC_GET(DT_CHOSEN(zephyr_host_cmd_spi_backend), cs_gpios); + + ec_host_cmd_init(ec_host_cmd_backend_get_spi(&cs)); + + return 0; +} +SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY); + +#endif diff --git a/subsys/mgmt/ec_host_cmd/ec_host_cmd_handler.c b/subsys/mgmt/ec_host_cmd/ec_host_cmd_handler.c index 13e12af55a5c6e0..eb8dc7e02363e7e 100644 --- a/subsys/mgmt/ec_host_cmd/ec_host_cmd_handler.c +++ b/subsys/mgmt/ec_host_cmd/ec_host_cmd_handler.c @@ -15,7 +15,8 @@ LOG_MODULE_REGISTER(host_cmd_handler, CONFIG_EC_HC_LOG_LEVEL); #define EC_HOST_CMD_CHOSEN_BACKEND_LIST \ - zephyr_host_cmd_espi_backend, zephyr_host_cmd_shi_backend, zephyr_host_cmd_uart_backend + zephyr_host_cmd_espi_backend, zephyr_host_cmd_shi_backend, zephyr_host_cmd_uart_backend, \ + zephyr_host_cmd_spi_backend #define EC_HOST_CMD_ADD_CHOSEN(chosen) COND_CODE_1(DT_NODE_EXISTS(DT_CHOSEN(chosen)), (1), (0))