From 8562468c451ce8596c7ae20ebdf5ed44f507854d Mon Sep 17 00:00:00 2001 From: Adam Kondraciuk Date: Wed, 27 Nov 2024 15:47:30 +0100 Subject: [PATCH 1/6] [nrf fromlist] soc: nordic_nrf: add support for TDM Add Kconfig options for TDM130 and TDM131. Upstream PR #: 82144 Signed-off-by: Adam Kondraciuk --- soc/nordic/common/Kconfig.peripherals | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/soc/nordic/common/Kconfig.peripherals b/soc/nordic/common/Kconfig.peripherals index 9ea6ebc45c5..5632857fe86 100644 --- a/soc/nordic/common/Kconfig.peripherals +++ b/soc/nordic/common/Kconfig.peripherals @@ -389,6 +389,12 @@ config HAS_HW_NRF_SWI4 config HAS_HW_NRF_SWI5 def_bool $(dt_nodelabel_enabled_with_compat,swi5,$(DT_COMPAT_NORDIC_NRF_SWI)) +config HAS_HW_NRF_TDM130 + def_bool $(dt_nodelabel_enabled_with_compat,tdm130,$(DT_COMPAT_NORDIC_NRF_TDM)) + +config HAS_HW_NRF_TDM131 + def_bool $(dt_nodelabel_enabled_with_compat,tdm131,$(DT_COMPAT_NORDIC_NRF_TDM)) + config HAS_HW_NRF_TEMP def_bool $(dt_compat_enabled,$(DT_COMPAT_NORDIC_NRF_TEMP)) From da183886c00b9ff95b769c715c22eae658188e3b Mon Sep 17 00:00:00 2001 From: Adam Kondraciuk Date: Wed, 27 Nov 2024 15:51:39 +0100 Subject: [PATCH 2/6] [nrf fromlist] modules: hal_nordic: add support for TDM Add HAL glue Kconfig options for TDM130 and TDM131. Upstream PR #: 82144 Signed-off-by: Adam Kondraciuk --- modules/hal_nordic/nrfx/Kconfig | 13 ++++ .../nrfx/nrfx_config_nrf54h20_application.h | 60 +++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/modules/hal_nordic/nrfx/Kconfig b/modules/hal_nordic/nrfx/Kconfig index 14087daf8c5..6cfc2457c35 100644 --- a/modules/hal_nordic/nrfx/Kconfig +++ b/modules/hal_nordic/nrfx/Kconfig @@ -694,6 +694,19 @@ config NRFX_TBM bool "TBM driver" depends on $(dt_has_compat,$(DT_COMPAT_NORDIC_NRF_TBM)) +config NRFX_TDM + bool + +config NRFX_TDM130 + bool "TDM130 driver instance" + depends on $(dt_nodelabel_has_compat,tdm130,$(DT_COMPAT_NORDIC_NRF_TDM)) + select NRFX_TDM + +config NRFX_TDM131 + bool "TDM131 driver instance" + depends on $(dt_nodelabel_has_compat,tdm131,$(DT_COMPAT_NORDIC_NRF_TDM)) + select NRFX_TDM + config NRFX_TEMP bool "TEMP driver" depends on $(dt_has_compat,$(DT_COMPAT_NORDIC_NRF_TEMP)) diff --git a/modules/hal_nordic/nrfx/nrfx_config_nrf54h20_application.h b/modules/hal_nordic/nrfx/nrfx_config_nrf54h20_application.h index 3caed86c48b..e48fdafa2d5 100644 --- a/modules/hal_nordic/nrfx/nrfx_config_nrf54h20_application.h +++ b/modules/hal_nordic/nrfx/nrfx_config_nrf54h20_application.h @@ -1265,6 +1265,66 @@ #define NRFX_SYSTICK_ENABLED 0 #endif +/** + * @brief NRFX_TDM_ENABLED + * + * Boolean. Accepted values: 0 and 1. + */ +#ifndef NRFX_TDM_ENABLED +#define NRFX_TDM_ENABLED 0 +#endif + +/** + * @brief NRFX_TDM_DEFAULT_CONFIG_IRQ_PRIORITY + * + * Integer value. Minimum: 0. Maximum: 7. + */ +#ifndef NRFX_TDM_DEFAULT_CONFIG_IRQ_PRIORITY +#define NRFX_TDM_DEFAULT_CONFIG_IRQ_PRIORITY NRFX_DEFAULT_IRQ_PRIORITY +#endif + +/** + * @brief NRFX_TDM_CONFIG_LOG_ENABLED + * + * Boolean. Accepted values: 0 and 1. + */ +#ifndef NRFX_TDM_CONFIG_LOG_ENABLED +#define NRFX_TDM_CONFIG_LOG_ENABLED 0 +#endif + +/** + * @brief NRFX_TDM_CONFIG_LOG_LEVEL + * + * Integer value. + * Supported values: + * - Off = 0 + * - Error = 1 + * - Warning = 2 + * - Info = 3 + * - Debug = 4 + */ +#ifndef NRFX_TDM_CONFIG_LOG_LEVEL +#define NRFX_TDM_CONFIG_LOG_LEVEL 3 +#endif + +/** + * @brief NRFX_TDM130_ENABLED + * + * Boolean. Accepted values: 0 and 1. + */ +#ifndef NRFX_TDM130_ENABLED +#define NRFX_TDM130_ENABLED 0 +#endif + +/** + * @brief NRFX_TDM131_ENABLED + * + * Boolean. Accepted values: 0 and 1. + */ +#ifndef NRFX_TDM131_ENABLED +#define NRFX_TDM131_ENABLED 0 +#endif + /** * @brief NRFX_TEMP_ENABLED * From 3a082447066fd999e68db13d5ff78a08fce7baf0 Mon Sep 17 00:00:00 2001 From: Adam Kondraciuk Date: Wed, 27 Nov 2024 16:00:56 +0100 Subject: [PATCH 3/6] [nrf fromlist] drivers: pinctrl: nrf: Add support for TDM peripheral Add support for configuring pins of the nRF TDM peripheral. Upstream PR #: 82144 Signed-off-by: Adam Kondraciuk --- drivers/pinctrl/pinctrl_nrf.c | 45 +++++++++++++++++++ .../zephyr/dt-bindings/pinctrl/nrf-pinctrl.h | 14 ++++++ 2 files changed, 59 insertions(+) diff --git a/drivers/pinctrl/pinctrl_nrf.c b/drivers/pinctrl/pinctrl_nrf.c index 1e80de8fe67..b854812238a 100644 --- a/drivers/pinctrl/pinctrl_nrf.c +++ b/drivers/pinctrl/pinctrl_nrf.c @@ -94,6 +94,10 @@ static const nrf_gpio_pin_drive_t drive_modes[NRF_DRIVE_COUNT] = { #define NRF_PSEL_QSPI(reg, line) ((NRF_QSPI_Type *)reg)->PSEL.line #endif +#if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_tdm) +#define NRF_PSEL_TDM(reg, line) ((NRF_TDM_Type *)reg)->PSEL.line +#endif + int pinctrl_configure_pins(const pinctrl_soc_pin_t *pins, uint8_t pin_cnt, uintptr_t reg) { @@ -336,6 +340,47 @@ int pinctrl_configure_pins(const pinctrl_soc_pin_t *pins, uint8_t pin_cnt, input = NRF_GPIO_PIN_INPUT_DISCONNECT; break; #endif /* defined(NRF_PSEL_QSPI) */ +#if defined(NRF_PSEL_TDM) + case NRF_FUN_TDM_SCK_M: + NRF_PSEL_TDM(reg, SCK) = psel; + write = 0U; + dir = NRF_GPIO_PIN_DIR_OUTPUT; + input = NRF_GPIO_PIN_INPUT_DISCONNECT; + break; + case NRF_FUN_TDM_SCK_S: + NRF_PSEL_TDM(reg, SCK) = psel; + dir = NRF_GPIO_PIN_DIR_INPUT; + input = NRF_GPIO_PIN_INPUT_CONNECT; + break; + case NRF_FUN_TDM_FSYNC_M: + NRF_PSEL_TDM(reg, FSYNC) = psel; + write = 0U; + dir = NRF_GPIO_PIN_DIR_OUTPUT; + input = NRF_GPIO_PIN_INPUT_DISCONNECT; + break; + case NRF_FUN_TDM_FSYNC_S: + NRF_PSEL_TDM(reg, FSYNC) = psel; + dir = NRF_GPIO_PIN_DIR_INPUT; + input = NRF_GPIO_PIN_INPUT_CONNECT; + break; + case NRF_FUN_TDM_SDIN: + NRF_PSEL_TDM(reg, SDIN) = psel; + dir = NRF_GPIO_PIN_DIR_INPUT; + input = NRF_GPIO_PIN_INPUT_CONNECT; + break; + case NRF_FUN_TDM_SDOUT: + NRF_PSEL_TDM(reg, SDOUT) = psel; + write = 0U; + dir = NRF_GPIO_PIN_DIR_OUTPUT; + input = NRF_GPIO_PIN_INPUT_DISCONNECT; + break; + case NRF_FUN_TDM_MCK: + NRF_PSEL_TDM(reg, MCK) = psel; + write = 0U; + dir = NRF_GPIO_PIN_DIR_OUTPUT; + input = NRF_GPIO_PIN_INPUT_DISCONNECT; + break; +#endif /* defined(NRF_PSEL_TDM) */ #if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_can) /* Pin routing is controlled by secure domain, via UICR */ case NRF_FUN_CAN_TX: diff --git a/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h b/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h index 4611baef95c..34634f1faf2 100644 --- a/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h +++ b/include/zephyr/dt-bindings/pinctrl/nrf-pinctrl.h @@ -162,6 +162,20 @@ #define NRF_FUN_CAN_TX 46U /** CAN RX */ #define NRF_FUN_CAN_RX 47U +/** TDM SCK in master mode */ +#define NRF_FUN_TDM_SCK_M 48U +/** TDM SCK in slave mode */ +#define NRF_FUN_TDM_SCK_S 49U +/** TDM LRCK in master mode */ +#define NRF_FUN_TDM_FSYNC_M 50U +/** TDM LRCK in slave mode */ +#define NRF_FUN_TDM_FSYNC_S 51U +/** TDM SDIN */ +#define NRF_FUN_TDM_SDIN 52U +/** TDM SDOUT */ +#define NRF_FUN_TDM_SDOUT 53U +/** TDM MCK */ +#define NRF_FUN_TDM_MCK 54U /** @} */ From d8678508888365720b4668ff16fca244a1e13dd4 Mon Sep 17 00:00:00 2001 From: Adam Kondraciuk Date: Wed, 27 Nov 2024 16:06:44 +0100 Subject: [PATCH 4/6] [nrf fromlist] dts: nordic: nrf54h20: add TDM support for nRF54H20 Add TDM130 and TDM131 nodes. Upstream PR #: 82144 Signed-off-by: Adam Kondraciuk --- dts/bindings/i2s/nordic,nrf-tdm.yaml | 48 ++++++++++++++++++++++++++++ dts/common/nordic/nrf54h20.dtsi | 28 ++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 dts/bindings/i2s/nordic,nrf-tdm.yaml diff --git a/dts/bindings/i2s/nordic,nrf-tdm.yaml b/dts/bindings/i2s/nordic,nrf-tdm.yaml new file mode 100644 index 00000000000..eb9d7f68644 --- /dev/null +++ b/dts/bindings/i2s/nordic,nrf-tdm.yaml @@ -0,0 +1,48 @@ +# Copyright (c) 2024 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +description: Nordic TDM (Time division multiplexed audio interface) + +compatible: "nordic,nrf-tdm" + +include: [i2s-controller.yaml, pinctrl-device.yaml, nordic-clockpin.yaml, "memory-region.yaml"] + +properties: + reg: + required: true + + interrupts: + required: true + + mck-frequency: + type: int + description: | + Frequency of the MCK clock. Configured independently of SCK. + + pinctrl-0: + required: true + + pinctrl-names: + required: true + + easydma-maxcnt-bits: + type: int + required: true + description: | + Maximum number of bits available in the EasyDMA MAXCNT register. This + property must be set at SoC level DTS files. + + clock-source: + type: string + default: "FLL16M" + description: | + Clock source to be used by the TDM peripheral. The following options + are available: + - "FLL16M": 16 MHz peripheral clock + - "ACLK": Audio PLL clock with configurable frequency (frequency for + this clock must be set via the "hfclkaudio-frequency" property + in the "nordic,nrf-clock" node); this clock source is only available + in the nRF53 Series SoCs and it requires the use of HFXO + enum: + - "FLL16M" + - "ACLK" diff --git a/dts/common/nordic/nrf54h20.dtsi b/dts/common/nordic/nrf54h20.dtsi index a9523f15465..c8dac8dac91 100644 --- a/dts/common/nordic/nrf54h20.dtsi +++ b/dts/common/nordic/nrf54h20.dtsi @@ -1465,6 +1465,34 @@ endtx-stoptx-supported; frame-timeout-supported; }; + + tdm130: tdm@992000 { + compatible = "nordic,nrf-tdm"; + easydma-maxcnt-bits = <15>; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x992000 0x1000>; + interrupts = <402 NRF_DEFAULT_IRQ_PRIORITY>; + status = "disabled"; + clocks = <&fll16m>; + power-domains = <&gpd NRF_GPD_SLOW_ACTIVE>; + nordic,clockpin-enable = , + ; + }; + + tdm131: tdm@997000 { + compatible = "nordic,nrf-tdm"; + easydma-maxcnt-bits = <15>; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x997000 0x1000>; + interrupts = <407 NRF_DEFAULT_IRQ_PRIORITY>; + status = "disabled"; + clocks = <&fll16m>; + power-domains = <&gpd NRF_GPD_SLOW_ACTIVE>; + nordic,clockpin-enable = , + ; + }; }; }; From f762835840e44154d3625c77bd70d75bae136571 Mon Sep 17 00:00:00 2001 From: Adam Kondraciuk Date: Wed, 27 Nov 2024 16:19:17 +0100 Subject: [PATCH 5/6] [nrf fromlist] drivers: i2s: Add support for nRF TDM peripherals Add a shim that allows using the nRF TDM (Time division multiplexed audio interface) HAL by I2S Zephyr API. Upstream PR #: 82144 Signed-off-by: Adam Kondraciuk --- drivers/i2s/CMakeLists.txt | 1 + drivers/i2s/Kconfig.nrfx | 21 + drivers/i2s/i2s_nrfx_tdm.c | 1048 ++++++++++++++++++++++++++++++++++++ 3 files changed, 1070 insertions(+) create mode 100644 drivers/i2s/i2s_nrfx_tdm.c diff --git a/drivers/i2s/CMakeLists.txt b/drivers/i2s/CMakeLists.txt index 90b8d737a11..40ad7169b2e 100644 --- a/drivers/i2s/CMakeLists.txt +++ b/drivers/i2s/CMakeLists.txt @@ -11,5 +11,6 @@ zephyr_library_sources_ifdef(CONFIG_I2S_STM32 i2s_ll_stm32.c) zephyr_library_sources_ifdef(CONFIG_I2S_LITEX i2s_litex.c) zephyr_library_sources_ifdef(CONFIG_I2S_MCUX_FLEXCOMM i2s_mcux_flexcomm.c) zephyr_library_sources_ifdef(CONFIG_I2S_NRFX i2s_nrfx.c) +zephyr_library_sources_ifdef(CONFIG_TDM_NRFX i2s_nrfx_tdm.c) zephyr_library_sources_ifdef(CONFIG_I2S_MCUX_SAI i2s_mcux_sai.c) zephyr_library_sources_ifdef(CONFIG_I2S_ESP32 i2s_esp32.c) diff --git a/drivers/i2s/Kconfig.nrfx b/drivers/i2s/Kconfig.nrfx index b36f3eb9c64..4b7a9f6177a 100644 --- a/drivers/i2s/Kconfig.nrfx +++ b/drivers/i2s/Kconfig.nrfx @@ -22,3 +22,24 @@ config I2S_NRFX_TX_BLOCK_COUNT default 4 endif # I2S_NRFX + +menuconfig TDM_NRFX + bool "nRF TDM nrfx driver" + default y + depends on DT_HAS_NORDIC_NRF_TDM_ENABLED + select NRFX_TDM130 if HAS_HW_NRF_TDM130 + select PINCTRL + help + Enable support for nrfx TDM driver for nRF MCU series. + +if TDM_NRFX + +config TDM_NRFX_RX_BLOCK_COUNT + int "RX queue length" + default 4 + +config TDM_NRFX_TX_BLOCK_COUNT + int "TX queue length" + default 4 + +endif # TDM_NRFX diff --git a/drivers/i2s/i2s_nrfx_tdm.c b/drivers/i2s/i2s_nrfx_tdm.c new file mode 100644 index 00000000000..e12cb580037 --- /dev/null +++ b/drivers/i2s/i2s_nrfx_tdm.c @@ -0,0 +1,1048 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +LOG_MODULE_REGISTER(tdm_nrfx, CONFIG_I2S_LOG_LEVEL); + +/* The application must provide buffers that are to be used in the next + * part of the transfer. + */ +#define NRFX_TDM_STATUS_NEXT_BUFFERS_NEEDED (1UL << 0) + +/* The TDM peripheral has been stopped and all buffers that were passed + * to the driver have been released. + */ +#define NRFX_TDM_STATUS_TRANSFER_STOPPED (1UL << 1) + +typedef struct { + uint32_t *p_rx_buffer; + uint32_t const *p_tx_buffer; + uint16_t buffer_size; +} tdm_buffers_t; + +typedef void (*tdm_data_handler_t)(tdm_buffers_t const *p_released, uint32_t status); + +typedef struct { + tdm_data_handler_t handler; + bool use_rx: 1; + bool use_tx: 1; + bool rx_ready: 1; + bool tx_ready: 1; + bool buffers_needed: 1; + bool buffers_reused: 1; + tdm_buffers_t next_buffers; + tdm_buffers_t current_buffers; +} tdm_ctrl_t; + +struct stream_cfg { + struct i2s_config cfg; + nrf_tdm_config_t nrfx_cfg; +}; + +struct tdm_buf { + void *mem_block; + size_t size; +}; + +struct tdm_drv_data { + struct onoff_manager *clk_mgr; + struct onoff_client clk_cli; + struct stream_cfg tx; + struct k_msgq tx_queue; + struct stream_cfg rx; + struct k_msgq rx_queue; + NRF_TDM_Type *p_reg; + const uint32_t *last_tx_buffer; + enum i2s_state state; + enum i2s_dir active_dir; + bool stop; /* stop after the current (TX or RX) block */ + bool discard_rx; /* discard further RX blocks */ + volatile bool next_tx_buffer_needed; + tdm_ctrl_t *control_data; + bool tx_configured: 1; + bool rx_configured: 1; + bool request_clock: 1; +}; + +struct tdm_drv_cfg { + tdm_data_handler_t data_handler; + const struct pinctrl_dev_config *pcfg; + uint32_t mck_frequency; + enum clock_source { + FLL16M, + ACLK + } clk_src; +}; + +void tdm_irq_handler(const struct device *dev) +{ + struct tdm_drv_data *drv_data = dev->data; + NRF_TDM_Type *p_reg = drv_data->p_reg; + tdm_ctrl_t *ctrl_data = drv_data->control_data; + uint32_t event_mask = 0; + + if (nrf_tdm_event_check(p_reg, NRF_TDM_EVENT_MAXCNT)) { + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_MAXCNT); + } + if (nrf_tdm_event_check(p_reg, NRF_TDM_EVENT_TXPTRUPD)) { + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_TXPTRUPD); + event_mask |= NRFY_EVENT_TO_INT_BITMASK(NRF_TDM_EVENT_TXPTRUPD); + ctrl_data->tx_ready = true; + if (ctrl_data->use_tx && ctrl_data->buffers_needed) { + ctrl_data->buffers_reused = true; + } + } + if (nrf_tdm_event_check(p_reg, NRF_TDM_EVENT_RXPTRUPD)) { + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_RXPTRUPD); + event_mask |= NRFY_EVENT_TO_INT_BITMASK(NRF_TDM_EVENT_RXPTRUPD); + ctrl_data->rx_ready = true; + if (ctrl_data->use_rx && ctrl_data->buffers_needed) { + ctrl_data->buffers_reused = true; + } + } + if (nrf_tdm_event_check(p_reg, NRF_TDM_EVENT_STOPPED)) { + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_STOPPED); + event_mask |= NRFY_EVENT_TO_INT_BITMASK(NRF_TDM_EVENT_STOPPED); + nrf_tdm_int_disable(p_reg, NRF_TDM_INT_STOPPED_MASK_MASK); + nrf_tdm_disable(p_reg); + /* When stopped, release all buffers, including these scheduled for + * the next part of the transfer, and signal that the transfer has + * finished. + */ + ctrl_data->handler(&ctrl_data->current_buffers, 0); + ctrl_data->handler(&ctrl_data->next_buffers, NRFX_TDM_STATUS_TRANSFER_STOPPED); + } else { + /* Check if the requested transfer has been completed: + * - full-duplex mode + */ + if ((ctrl_data->use_tx && ctrl_data->use_rx && ctrl_data->tx_ready && + ctrl_data->rx_ready) || + /* - TX only mode */ + (!ctrl_data->use_rx && ctrl_data->tx_ready) || + /* - RX only mode */ + (!ctrl_data->use_tx && ctrl_data->rx_ready)) { + ctrl_data->tx_ready = false; + ctrl_data->rx_ready = false; + + /* If the application did not supply the buffers for the next + * part of the transfer until this moment, the current buffers + * cannot be released, since the I2S peripheral already started + * using them. Signal this situation to the application by + * passing NULL instead of the structure with released buffers. + */ + if (ctrl_data->buffers_reused) { + ctrl_data->buffers_reused = false; + /* This will most likely be set at this point. However, there is + * a small time window between TXPTRUPD and RXPTRUPD events, + * and it is theoretically possible that next buffers will be + * set in this window, so to be sure this flag is set to true, + * set it explicitly. + */ + ctrl_data->buffers_needed = true; + ctrl_data->handler(NULL, NRFX_TDM_STATUS_NEXT_BUFFERS_NEEDED); + } else { + /* Buffers that have been used by the I2S peripheral (current) + * are now released and will be returned to the application, + * and the ones scheduled to be used as next become the current + * ones. + */ + tdm_buffers_t released_buffers = ctrl_data->current_buffers; + + ctrl_data->current_buffers = ctrl_data->next_buffers; + ctrl_data->next_buffers.p_rx_buffer = NULL; + ctrl_data->next_buffers.p_tx_buffer = NULL; + ctrl_data->buffers_needed = true; + ctrl_data->handler(&released_buffers, + NRFX_TDM_STATUS_NEXT_BUFFERS_NEEDED); + } + } + } +} + +static uint32_t div_calculate(uint32_t src_freq, uint32_t requested_clk_freq) +{ + enum { + MCKCONST = 1048576 + }; + /* As specified in the PS: + * + * DIV = 4096 * floor(f_MCK * 1048576 / + * (f_source + f_MCK / 2)) + * f_actual = f_source / + * floor(1048576 * 4096 / DIV) + */ + + uint32_t ck_div = (uint32_t)(((uint64_t)requested_clk_freq * MCKCONST) / + (src_freq + requested_clk_freq / 2)); + return (ck_div * 4096); +} + +static bool get_next_tx_buffer(struct tdm_drv_data *drv_data, tdm_buffers_t *buffers) +{ + struct tdm_buf buf; + int ret = k_msgq_get(&drv_data->tx_queue, &buf, K_NO_WAIT); + + if (ret == 0) { + buffers->p_tx_buffer = buf.mem_block; + buffers->buffer_size = buf.size / sizeof(uint32_t); + } + return (ret == 0); +} + +static bool get_next_rx_buffer(struct tdm_drv_data *drv_data, tdm_buffers_t *buffers) +{ + int ret = k_mem_slab_alloc(drv_data->rx.cfg.mem_slab, (void **)&buffers->p_rx_buffer, + K_NO_WAIT); + if (ret < 0) { + LOG_ERR("Failed to allocate next RX buffer: %d", ret); + return false; + } + + return true; +} + +static void free_tx_buffer(struct tdm_drv_data *drv_data, const void *buffer) +{ + k_mem_slab_free(drv_data->tx.cfg.mem_slab, (void *)buffer); + LOG_DBG("Freed TX %p", buffer); +} + +static void free_rx_buffer(struct tdm_drv_data *drv_data, void *buffer) +{ + k_mem_slab_free(drv_data->rx.cfg.mem_slab, buffer); + LOG_DBG("Freed RX %p", buffer); +} + +static void tdm_start(struct tdm_drv_data *drv_data, tdm_buffers_t const *p_initial_buffers) +{ + NRF_TDM_Type *p_reg = drv_data->p_reg; + tdm_ctrl_t *ctrl_data = drv_data->control_data; + + __ASSERT_NO_MSG(p_initial_buffers->p_rx_buffer != NULL || + p_initial_buffers->p_tx_buffer != NULL); + ctrl_data->use_rx = (p_initial_buffers->p_rx_buffer != NULL); + ctrl_data->use_tx = (p_initial_buffers->p_tx_buffer != NULL); + ctrl_data->rx_ready = false; + ctrl_data->tx_ready = false; + ctrl_data->buffers_needed = false; + + ctrl_data->next_buffers = *p_initial_buffers; + ctrl_data->current_buffers.p_rx_buffer = NULL; + ctrl_data->current_buffers.p_tx_buffer = NULL; + nrf_tdm_enable(p_reg); + + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_RXPTRUPD); + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_TXPTRUPD); + + nrf_tdm_int_enable( + p_reg, + (p_initial_buffers->p_rx_buffer ? NRF_TDM_INT_RXPTRUPD_MASK_MASK : 0UL) | + (p_initial_buffers->p_tx_buffer ? NRF_TDM_INT_TXPTRUPD_MASK_MASK : 0UL) | + NRF_TDM_INT_STOPPED_MASK_MASK); + + nrf_tdm_tx_count_set(p_reg, p_initial_buffers->buffer_size); + nrf_tdm_rx_count_set(p_reg, p_initial_buffers->buffer_size); + nrf_tdm_rx_buffer_set(p_reg, p_initial_buffers->p_rx_buffer); + nrf_tdm_tx_buffer_set(p_reg, p_initial_buffers->p_tx_buffer); + nrf_tdm_task_trigger(p_reg, NRF_TDM_TASK_START); +} + +static void tdm_stop(NRF_TDM_Type *p_reg) +{ + nrf_tdm_int_disable(p_reg, NRF_TDM_INT_RXPTRUPD_MASK_MASK | NRF_TDM_INT_TXPTRUPD_MASK_MASK); + + nrf_tdm_task_trigger(p_reg, NRF_TDM_TASK_STOP); +} + +static bool next_buffers_set(struct tdm_drv_data *drv_data, tdm_buffers_t const *p_buffers) +{ + NRF_TDM_Type *p_reg = drv_data->p_reg; + tdm_ctrl_t *ctrl_data = drv_data->control_data; + nrf_tdm_rxtxen_t dir = NRF_TDM_RXTXEN_DUPLEX; + + __ASSERT_NO_MSG(p_buffers->p_rx_buffer != NULL || p_buffers->p_tx_buffer != NULL); + + if (!ctrl_data->buffers_needed) { + return false; + } + + nrf_tdm_tx_count_set(p_reg, p_buffers->buffer_size); + nrf_tdm_rx_count_set(p_reg, p_buffers->buffer_size); + nrf_tdm_rx_buffer_set(p_reg, p_buffers->p_rx_buffer); + nrf_tdm_tx_buffer_set(p_reg, p_buffers->p_tx_buffer); + + if (p_buffers->p_rx_buffer == NULL) { + dir = NRF_TDM_RXTXEN_TX; + } else if (p_buffers->p_tx_buffer == NULL) { + dir = NRF_TDM_RXTXEN_RX; + } + nrf_tdm_transfer_direction_set(p_reg, dir); + + ctrl_data->next_buffers = *p_buffers; + ctrl_data->buffers_needed = false; + + return true; +} + +static bool supply_next_buffers(struct tdm_drv_data *drv_data, tdm_buffers_t *next) +{ + if (drv_data->active_dir != I2S_DIR_TX) { /* -> RX active */ + if (!get_next_rx_buffer(drv_data, next)) { + drv_data->state = I2S_STATE_ERROR; + tdm_stop(drv_data->p_reg); + return false; + } + /* Set buffer size if there is no TX buffer (which effectively + * controls how many bytes will be received). + */ + if (drv_data->active_dir == I2S_DIR_RX) { + next->buffer_size = drv_data->rx.cfg.block_size / sizeof(uint32_t); + } + } + + drv_data->last_tx_buffer = next->p_tx_buffer; + + LOG_DBG("Next buffers: %p/%p", next->p_tx_buffer, next->p_rx_buffer); + return next_buffers_set(drv_data, next); +} + +static void purge_queue(const struct device *dev, enum i2s_dir dir) +{ + struct tdm_drv_data *drv_data = dev->data; + struct tdm_buf buf; + + if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) { + while (k_msgq_get(&drv_data->tx_queue, &buf, K_NO_WAIT) == 0) { + free_tx_buffer(drv_data, buf.mem_block); + } + } + + if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) { + while (k_msgq_get(&drv_data->rx_queue, &buf, K_NO_WAIT) == 0) { + free_rx_buffer(drv_data, buf.mem_block); + } + } +} + +static void tdm_uninit(struct tdm_drv_data *drv_data) +{ + NRF_TDM_Type *p_reg = drv_data->p_reg; + + tdm_stop(p_reg); + NRFX_IRQ_DISABLE(nrfx_get_irq_number(p_reg)); +} + +static int tdm_nrfx_configure(const struct device *dev, enum i2s_dir dir, + const struct i2s_config *tdm_cfg) +{ + struct tdm_drv_data *drv_data = dev->data; + const struct tdm_drv_cfg *drv_cfg = dev->config; + nrf_tdm_config_t nrfx_cfg; + uint32_t chan_mask = 0; + + if (drv_data->state != I2S_STATE_READY) { + LOG_ERR("Cannot configure in state: %d", drv_data->state); + return -EINVAL; + } + + if (tdm_cfg->frame_clk_freq == 0) { /* -> reset state */ + purge_queue(dev, dir); + if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) { + drv_data->tx_configured = false; + memset(&drv_data->tx, 0, sizeof(drv_data->tx)); + } + if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) { + drv_data->rx_configured = false; + memset(&drv_data->rx, 0, sizeof(drv_data->rx)); + } + return 0; + } + + __ASSERT_NO_MSG(tdm_cfg->mem_slab != NULL && tdm_cfg->block_size != 0); + + if ((tdm_cfg->block_size % sizeof(uint32_t)) != 0) { + LOG_ERR("This device can transfer only full 32-bit words"); + return -EINVAL; + } + + switch (tdm_cfg->word_size) { + case 8: + nrfx_cfg.sample_width = NRF_TDM_SWIDTH_8BIT; + break; + case 16: + nrfx_cfg.sample_width = NRF_TDM_SWIDTH_16BIT; + break; + case 24: + nrfx_cfg.sample_width = NRF_TDM_SWIDTH_24BIT; + break; + case 32: + nrfx_cfg.sample_width = NRF_TDM_SWIDTH_32BIT; + break; + default: + LOG_ERR("Unsupported word size: %u", tdm_cfg->word_size); + return -EINVAL; + } + + switch (tdm_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { + case I2S_FMT_DATA_FORMAT_I2S: + nrfx_cfg.alignment = NRF_TDM_ALIGN_LEFT; + nrfx_cfg.fsync_polarity = NRF_TDM_POLARITY_NEGEDGE; + nrfx_cfg.sck_polarity = NRF_TDM_POLARITY_POSEDGE; + nrfx_cfg.fsync_duration = NRF_TDM_FSYNC_DURATION_CHANNEL; + nrfx_cfg.channel_delay = NRF_TDM_CHANNEL_DELAY_1CK; + break; + case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: + nrfx_cfg.alignment = NRF_TDM_ALIGN_LEFT; + nrfx_cfg.fsync_polarity = NRF_TDM_POLARITY_POSEDGE; + nrfx_cfg.sck_polarity = NRF_TDM_POLARITY_POSEDGE; + nrfx_cfg.fsync_duration = NRF_TDM_FSYNC_DURATION_CHANNEL; + nrfx_cfg.channel_delay = NRF_TDM_CHANNEL_DELAY_NONE; + break; + case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED: + nrfx_cfg.alignment = NRF_TDM_ALIGN_RIGHT; + nrfx_cfg.fsync_polarity = NRF_TDM_POLARITY_POSEDGE; + nrfx_cfg.sck_polarity = NRF_TDM_POLARITY_POSEDGE; + nrfx_cfg.fsync_duration = NRF_TDM_FSYNC_DURATION_CHANNEL; + nrfx_cfg.channel_delay = NRF_TDM_CHANNEL_DELAY_NONE; + break; + default: + LOG_ERR("Unsupported data format: 0x%02x", tdm_cfg->format); + return -EINVAL; + } + + if ((tdm_cfg->format & I2S_FMT_DATA_ORDER_LSB) || (tdm_cfg->format & I2S_FMT_BIT_CLK_INV) || + (tdm_cfg->format & I2S_FMT_FRAME_CLK_INV)) { + LOG_ERR("Unsupported stream format: 0x%02x", tdm_cfg->format); + return -EINVAL; + } + + if (tdm_cfg->channels == 2) { + nrfx_cfg.num_of_channels = NRF_TDM_CHANNELS_COUNT_2; + } else if (tdm_cfg->channels == 1) { + nrfx_cfg.num_of_channels = NRF_TDM_CHANNELS_COUNT_1; + } else { + LOG_ERR("Unsupported number of channels: %u", tdm_cfg->channels); + return -EINVAL; + } + chan_mask = BIT_MASK(tdm_cfg->channels); + + if ((tdm_cfg->options & I2S_OPT_BIT_CLK_SLAVE) && + (tdm_cfg->options & I2S_OPT_FRAME_CLK_SLAVE)) { + nrfx_cfg.mode = NRF_TDM_MODE_SLAVE; + } else if (!(tdm_cfg->options & I2S_OPT_BIT_CLK_SLAVE) && + !(tdm_cfg->options & I2S_OPT_FRAME_CLK_SLAVE)) { + nrfx_cfg.mode = NRF_TDM_MODE_MASTER; + } else { + LOG_ERR("Unsupported operation mode: 0x%02x", tdm_cfg->options); + return -EINVAL; + } + + nrfx_cfg.mck_setup = 0; + if (nrfx_cfg.mode == NRF_TDM_MODE_MASTER) { + uint32_t sck = tdm_cfg->word_size * tdm_cfg->frame_clk_freq * tdm_cfg->channels; +#warning check description below + const uint32_t src_freq = + (drv_cfg->clk_src == ACLK) + /* The I2S_NRFX_DEVICE() macro contains build assertions that + * make sure that the ACLK clock source is only used when it is + * available and only with the "hfclkaudio-frequency" property + * defined, but the default value of 0 here needs to be used to + * prevent compilation errors when the property is not defined + * (this expression will be eventually optimized away then). + */ + ? DT_PROP_OR(DT_NODELABEL(clock), hfclkaudio_frequency, 0) + : DT_PROP(DT_NODELABEL(fll16m), clock_frequency); + + /* Unless the FLL16M source is used, + * it is required to request the proper clock to be running + * before starting the transfer itself. + */ + drv_data->request_clock = (drv_cfg->clk_src != FLL16M); + nrfx_cfg.sck_setup = div_calculate(src_freq, sck); + + if (((nrf_tdm_mck_pin_get(drv_data->p_reg) & TDM_PSEL_MCK_CONNECT_Msk) == + TDM_PSEL_MCK_CONNECT_Connected << TDM_PSEL_MCK_CONNECT_Pos) && + drv_cfg->mck_frequency != 0) { + nrfx_cfg.mck_setup = div_calculate(src_freq, drv_cfg->mck_frequency); + } + } else { + drv_data->request_clock = false; + } + + if ((tdm_cfg->options & I2S_OPT_LOOPBACK) || (tdm_cfg->options & I2S_OPT_PINGPONG)) { + LOG_ERR("Unsupported options: 0x%02x", tdm_cfg->options); + return -EINVAL; + } + + if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) { + nrfx_cfg.channels = (chan_mask << TDM_CONFIG_CHANNEL_MASK_Tx0Enable_Pos); + drv_data->tx.cfg = *tdm_cfg; + drv_data->tx.nrfx_cfg = nrfx_cfg; + drv_data->tx_configured = true; + } + + if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) { + nrfx_cfg.channels = (chan_mask << TDM_CONFIG_CHANNEL_MASK_Rx0Enable_Pos); + drv_data->rx.cfg = *tdm_cfg; + drv_data->rx.nrfx_cfg = nrfx_cfg; + drv_data->rx_configured = true; + } + return 0; +} + +static const struct i2s_config *tdm_nrfx_config_get(const struct device *dev, enum i2s_dir dir) +{ + struct tdm_drv_data *drv_data = dev->data; + + if (dir == I2S_DIR_TX && drv_data->tx_configured) { + return &drv_data->tx.cfg; + } + if (dir == I2S_DIR_RX && drv_data->rx_configured) { + return &drv_data->rx.cfg; + } + + return NULL; +} + +static int tdm_nrfx_read(const struct device *dev, void **mem_block, size_t *size) +{ + struct tdm_drv_data *drv_data = dev->data; + struct tdm_buf buf; + int ret; + + if (!drv_data->rx_configured) { + LOG_ERR("Device is not configured"); + return -EIO; + } + + ret = k_msgq_get(&drv_data->rx_queue, &buf, + (drv_data->state == I2S_STATE_ERROR) + ? K_NO_WAIT + : SYS_TIMEOUT_MS(drv_data->rx.cfg.timeout)); + if (ret == -ENOMSG) { + return -EIO; + } + + LOG_DBG("Released RX %p", buf.mem_block); + + if (ret == 0) { + *mem_block = buf.mem_block; + *size = buf.size; + } + return ret; +} + +static int tdm_nrfx_write(const struct device *dev, void *mem_block, size_t size) +{ + struct tdm_drv_data *drv_data = dev->data; + struct tdm_buf buf = {.mem_block = mem_block, .size = size}; + int ret; + + if (!drv_data->tx_configured) { + LOG_ERR("Device is not configured"); + return -EIO; + } + + if (drv_data->state != I2S_STATE_RUNNING && drv_data->state != I2S_STATE_READY) { + LOG_ERR("Cannot write in state: %d", drv_data->state); + return -EIO; + } + + if (size > drv_data->tx.cfg.block_size || size < sizeof(uint32_t)) { + LOG_ERR("This device can only write blocks up to %u bytes", + drv_data->tx.cfg.block_size); + return -EIO; + } + + ret = k_msgq_put(&drv_data->tx_queue, &buf, SYS_TIMEOUT_MS(drv_data->tx.cfg.timeout)); + if (ret < 0) { + return ret; + } + + /* Check if interrupt wanted to get next TX buffer before current buffer + * was queued. Do not move this check before queuing because doing so + * opens the possibility for a race condition between this function and + * data_handler() that is called in interrupt context. + */ + if (drv_data->state == I2S_STATE_RUNNING && drv_data->next_tx_buffer_needed) { + tdm_buffers_t next = {0}; + + if (!get_next_tx_buffer(drv_data, &next)) { + /* Log error because this is definitely unexpected. + * Do not return error because the caller is no longer + * responsible for releasing the buffer. + */ + LOG_ERR("Cannot reacquire queued buffer"); + return 0; + } + + drv_data->next_tx_buffer_needed = false; + + LOG_DBG("Next TX %p", next.p_tx_buffer); + + if (!supply_next_buffers(drv_data, &next)) { + LOG_ERR("Cannot supply buffer"); + return -EIO; + } + } + return 0; +} + +static int start_transfer(struct tdm_drv_data *drv_data) +{ + tdm_buffers_t initial_buffers = {0}; + int ret = 0; + + if (drv_data->active_dir != I2S_DIR_RX && /* -> TX to be started */ + !get_next_tx_buffer(drv_data, &initial_buffers)) { + LOG_ERR("No TX buffer available"); + ret = -ENOMEM; + } else if (drv_data->active_dir != I2S_DIR_TX && /* -> RX to be started */ + !get_next_rx_buffer(drv_data, &initial_buffers)) { + /* Failed to allocate next RX buffer */ + ret = -ENOMEM; + } else { + /* It is necessary to set buffer size here only for I2S_DIR_RX, + * because only then the get_next_tx_buffer() call in the if + * condition above gets short-circuited. + */ + if (drv_data->active_dir == I2S_DIR_RX) { + initial_buffers.buffer_size = + drv_data->rx.cfg.block_size / sizeof(uint32_t); + } + + drv_data->last_tx_buffer = initial_buffers.p_tx_buffer; + + tdm_start(drv_data, &initial_buffers); + } + if (ret < 0) { + tdm_uninit(drv_data); + if (drv_data->request_clock) { + (void)onoff_release(drv_data->clk_mgr); + } + + if (initial_buffers.p_tx_buffer) { + free_tx_buffer(drv_data, initial_buffers.p_tx_buffer); + } + if (initial_buffers.p_rx_buffer) { + free_rx_buffer(drv_data, initial_buffers.p_rx_buffer); + } + + drv_data->state = I2S_STATE_ERROR; + } + return ret; +} + +static void tdm_init(struct tdm_drv_data *drv_data, nrf_tdm_config_t const *p_config, + tdm_data_handler_t handler) +{ + tdm_ctrl_t *ctrl_data = drv_data->control_data; + NRF_TDM_Type *p_reg = drv_data->p_reg; + + nrf_tdm_configure(p_reg, p_config); + nrf_tdm_mck_set(drv_data->p_reg, p_config->mck_setup != 0); + + ctrl_data->handler = handler; + + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_RXPTRUPD); + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_TXPTRUPD); + nrf_tdm_event_clear(p_reg, NRF_TDM_EVENT_STOPPED); + NRFX_IRQ_ENABLE(nrfx_get_irq_number(p_reg)); +} + +static void clock_started_callback(struct onoff_manager *mgr, struct onoff_client *cli, + uint32_t state, int res) +{ + struct tdm_drv_data *drv_data = CONTAINER_OF(cli, struct tdm_drv_data, clk_cli); + + /* The driver state can be set back to READY at this point if the DROP + * command was triggered before the clock has started. Do not start + * the actual transfer in such case. + */ + if (drv_data->state == I2S_STATE_READY) { + tdm_uninit(drv_data); + (void)onoff_release(drv_data->clk_mgr); + } else { + (void)start_transfer(drv_data); + } +} + +static int trigger_start(const struct device *dev) +{ + struct tdm_drv_data *drv_data = dev->data; + const struct tdm_drv_cfg *drv_cfg = dev->config; + int ret; + const nrf_tdm_config_t *nrfx_cfg = (drv_data->active_dir == I2S_DIR_TX) + ? &drv_data->tx.nrfx_cfg + : &drv_data->rx.nrfx_cfg; + + tdm_init(drv_data, nrfx_cfg, drv_cfg->data_handler); + + drv_data->state = I2S_STATE_RUNNING; + + nrf_tdm_sck_configure(drv_data->p_reg, + drv_cfg->clk_src == ACLK ? NRF_TDM_SRC_ACLK : NRF_TDM_SRC_PCLK32M, + false); + + nrf_tdm_mck_configure(drv_data->p_reg, + drv_cfg->clk_src == ACLK ? NRF_TDM_SRC_ACLK : NRF_TDM_SRC_PCLK32M, + false); + /* If it is required to use certain HF clock, request it to be running + * first. If not, start the transfer directly. + */ + if (drv_data->request_clock) { + sys_notify_init_callback(&drv_data->clk_cli.notify, clock_started_callback); + ret = onoff_request(drv_data->clk_mgr, &drv_data->clk_cli); + if (ret < 0) { + tdm_uninit(drv_data); + drv_data->state = I2S_STATE_READY; + + LOG_ERR("Failed to request clock: %d", ret); + return -EIO; + } + } else { + ret = start_transfer(drv_data); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +static int tdm_nrfx_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) +{ + struct tdm_drv_data *drv_data = dev->data; + bool configured = false; + bool cmd_allowed; + + /* This driver does not use the I2S_STATE_NOT_READY value. + * Instead, if a given stream is not configured, the respective + * flag (tx_configured or rx_configured) is cleared. + */ + drv_data->tx.nrfx_cfg.channels |= drv_data->rx.nrfx_cfg.channels; + drv_data->rx.nrfx_cfg.channels |= drv_data->tx.nrfx_cfg.channels; + if (dir == I2S_DIR_BOTH) { + + configured = drv_data->tx_configured && drv_data->rx_configured; + } else if (dir == I2S_DIR_TX) { + configured = drv_data->tx_configured; + } else if (dir == I2S_DIR_RX) { + configured = drv_data->rx_configured; + } + + if (!configured) { + LOG_ERR("Device is not configured"); + return -EIO; + } + + if (dir == I2S_DIR_BOTH && (memcmp(&drv_data->tx.nrfx_cfg, &drv_data->rx.nrfx_cfg, + sizeof(drv_data->rx.nrfx_cfg)) != 0 || + (drv_data->tx.cfg.block_size != drv_data->rx.cfg.block_size))) { + LOG_ERR("TX and RX configurations are different"); + return -EIO; + } + + switch (cmd) { + case I2S_TRIGGER_START: + cmd_allowed = (drv_data->state == I2S_STATE_READY); + break; + case I2S_TRIGGER_STOP: + case I2S_TRIGGER_DRAIN: + cmd_allowed = (drv_data->state == I2S_STATE_RUNNING); + break; + case I2S_TRIGGER_DROP: + cmd_allowed = configured; + break; + case I2S_TRIGGER_PREPARE: + cmd_allowed = (drv_data->state == I2S_STATE_ERROR); + break; + default: + LOG_ERR("Invalid trigger: %d", cmd); + return -EINVAL; + } + + if (!cmd_allowed) { + LOG_ERR("Not allowed"); + return -EIO; + } + + /* For triggers applicable to the RUNNING state (i.e. STOP, DRAIN, + * and DROP), ensure that the command is applied to the streams + * that are currently active (this device cannot e.g. stop only TX + * without stopping RX). + */ + if (drv_data->state == I2S_STATE_RUNNING && drv_data->active_dir != dir) { + LOG_ERR("Inappropriate trigger (%d/%d), active stream(s): %d", cmd, dir, + drv_data->active_dir); + return -EINVAL; + } + + switch (cmd) { + case I2S_TRIGGER_START: + drv_data->stop = false; + drv_data->discard_rx = false; + drv_data->active_dir = dir; + drv_data->next_tx_buffer_needed = false; + return trigger_start(dev); + + case I2S_TRIGGER_STOP: + drv_data->state = I2S_STATE_STOPPING; + drv_data->stop = true; + return 0; + + case I2S_TRIGGER_DRAIN: + drv_data->state = I2S_STATE_STOPPING; + /* If only RX is active, DRAIN is equivalent to STOP. */ + drv_data->stop = (drv_data->active_dir == I2S_DIR_RX); + return 0; + + case I2S_TRIGGER_DROP: + if (drv_data->state != I2S_STATE_READY) { + drv_data->discard_rx = true; + tdm_stop(drv_data->p_reg); + } + purge_queue(dev, dir); + drv_data->state = I2S_STATE_READY; + return 0; + + case I2S_TRIGGER_PREPARE: + purge_queue(dev, dir); + drv_data->state = I2S_STATE_READY; + return 0; + + default: + LOG_ERR("Invalid trigger: %d", cmd); + return -EINVAL; + } +} + +#if CONFIG_CLOCK_CONTROL_NRF +static void init_clock_manager(const struct device *dev) +{ + struct tdm_drv_data *drv_data = dev->data; + clock_control_subsys_t subsys; + +#if NRF_CLOCK_HAS_HFCLKAUDIO + const struct tdm_drv_cfg *drv_cfg = dev->config; + + if (drv_cfg->clk_src == ACLK) { + subsys = CLOCK_CONTROL_NRF_SUBSYS_HFAUDIO; + } else +#endif + { + subsys = CLOCK_CONTROL_NRF_SUBSYS_HF; + } + + drv_data->clk_mgr = z_nrf_clock_control_get_onoff(subsys); + __ASSERT_NO_MSG(drv_data->clk_mgr != NULL); +} +#endif /* CONFIG_CLOCK_CONTROL_NRF */ + +static void data_handler(const struct device *dev, const tdm_buffers_t *released, uint32_t status) +{ + struct tdm_drv_data *drv_data = dev->data; + bool stop_transfer = false; + + if (status & NRFX_TDM_STATUS_TRANSFER_STOPPED) { + if (drv_data->state == I2S_STATE_STOPPING) { + drv_data->state = I2S_STATE_READY; + } + if (drv_data->last_tx_buffer) { + /* Usually, these pointers are equal, i.e. the last TX + * buffer that were to be transferred is released by the + * driver after it stops. The last TX buffer pointer is + * then set to NULL here so that the buffer can be freed + * below, just as any other TX buffer released by the + * driver. However, it may happen that the buffer is not + * released this way, for example, when the transfer + * ends with an error because an RX buffer allocation + * fails. In such case, the last TX buffer needs to be + * freed here. + */ + if (drv_data->last_tx_buffer != released->p_tx_buffer) { + free_tx_buffer(drv_data, drv_data->last_tx_buffer); + } + drv_data->last_tx_buffer = NULL; + } + tdm_uninit(drv_data); + if (drv_data->request_clock) { + (void)onoff_release(drv_data->clk_mgr); + } + } + + if (released == NULL) { + /* This means that buffers for the next part of the transfer + * were not supplied and the previous ones cannot be released + * yet, as pointers to them were latched in the I2S registers. + * It is not an error when the transfer is to be stopped (those + * buffers will be released after the transfer actually stops). + */ + if (drv_data->state != I2S_STATE_STOPPING) { + drv_data->state = I2S_STATE_ERROR; + } + tdm_stop(drv_data->p_reg); + return; + } + + if (released->p_rx_buffer) { + if (drv_data->discard_rx) { + free_rx_buffer(drv_data, released->p_rx_buffer); + } else { + struct tdm_buf buf = {.mem_block = released->p_rx_buffer, + .size = released->buffer_size * sizeof(uint32_t)}; + int ret = k_msgq_put(&drv_data->rx_queue, &buf, K_NO_WAIT); + + if (ret < 0) { + LOG_ERR("No room in RX queue"); + drv_data->state = I2S_STATE_ERROR; + stop_transfer = true; + + free_rx_buffer(drv_data, released->p_rx_buffer); + } else { + + /* If the TX direction is not active and + * the transfer should be stopped after + * the current block, stop the reception. + */ + if (drv_data->active_dir == I2S_DIR_RX && drv_data->stop) { + drv_data->discard_rx = true; + stop_transfer = true; + } + } + } + } + + if (released->p_tx_buffer) { + /* If the last buffer that was to be transferred has just been + * released, it is time to stop the transfer. + */ + if (released->p_tx_buffer == drv_data->last_tx_buffer) { + drv_data->discard_rx = true; + stop_transfer = true; + } else { + free_tx_buffer(drv_data, released->p_tx_buffer); + } + } + + if (stop_transfer) { + tdm_stop(drv_data->p_reg); + } else if (status & NRFX_TDM_STATUS_NEXT_BUFFERS_NEEDED) { + tdm_buffers_t next = {0}; + + if (drv_data->active_dir != I2S_DIR_RX) { /* -> TX active */ + if (drv_data->stop) { + /* If the stream is to be stopped, don't get + * the next TX buffer from the queue, instead + * supply the one used last time (it won't be + * transferred, the stream will stop right + * before this buffer would be started again). + */ + next.p_tx_buffer = drv_data->last_tx_buffer; + next.buffer_size = 1; + } else if (get_next_tx_buffer(drv_data, &next)) { + /* Next TX buffer successfully retrieved from + * the queue, nothing more to do here. + */ + } else if (drv_data->state == I2S_STATE_STOPPING) { + /* If there are no more TX blocks queued and + * the current state is STOPPING (so the DRAIN + * command was triggered) it is time to finish + * the transfer. + */ + drv_data->stop = true; + /* Supply the same buffer as last time; it will + * not be transferred anyway, as the transfer + * will be stopped earlier. + */ + next.p_tx_buffer = drv_data->last_tx_buffer; + next.buffer_size = 1; + } else { + /* Next TX buffer cannot be supplied now. + * Defer it to when the user writes more data. + */ + drv_data->next_tx_buffer_needed = true; + return; + } + } + (void)supply_next_buffers(drv_data, &next); + } +} + +static const struct i2s_driver_api tdm_nrf_drv_api = { + .configure = tdm_nrfx_configure, + .config_get = tdm_nrfx_config_get, + .read = tdm_nrfx_read, + .write = tdm_nrfx_write, + .trigger = tdm_nrfx_trigger, +}; + +#define TDM(idx) DT_NODELABEL(tdm##idx) +#define TDM_CLK_SRC(idx) DT_STRING_TOKEN(TDM(idx), clock_source) + +#define TDM_NRFX_DEVICE(idx) \ + static tdm_ctrl_t tdm##idx##data; \ + static struct tdm_buf tx_msgs##idx[CONFIG_TDM_NRFX_TX_BLOCK_COUNT]; \ + static struct tdm_buf rx_msgs##idx[CONFIG_TDM_NRFX_RX_BLOCK_COUNT]; \ + static void tdm_##idx##_irq_handler(const struct device *dev) \ + { \ + tdm_irq_handler(dev); \ + } \ + static void tdm_##idx##data_handler(tdm_buffers_t const *p_released, uint32_t status) \ + { \ + data_handler(DEVICE_DT_GET(TDM(idx)), p_released, status); \ + } \ + PINCTRL_DT_DEFINE(TDM(idx)); \ + static const struct tdm_drv_cfg tdm_nrfx_cfg##idx = { \ + .data_handler = tdm_##idx##data_handler, \ + .pcfg = PINCTRL_DT_DEV_CONFIG_GET(TDM(idx)), \ + .clk_src = TDM_CLK_SRC(idx), \ + .mck_frequency = DT_PROP_OR(TDM(idx), mck_frequency, 0), \ + }; \ + static struct tdm_drv_data tdm_nrfx_data##idx = { \ + .state = I2S_STATE_READY, \ + .p_reg = NRF_TDM##idx, \ + .control_data = &tdm##idx##data, \ + }; \ + static int tdm_nrfx_init##idx(const struct device *dev) \ + { \ + IRQ_CONNECT(DT_IRQN(TDM(idx)), DT_IRQ(TDM(idx), priority), \ + tdm_##idx##_irq_handler, DEVICE_DT_GET(TDM(idx)), 0); \ + const struct tdm_drv_cfg *drv_cfg = dev->config; \ + int err = pinctrl_apply_state(drv_cfg->pcfg, PINCTRL_STATE_DEFAULT); \ + if (err < 0) { \ + return err; \ + } \ + k_msgq_init(&tdm_nrfx_data##idx.tx_queue, (char *)tx_msgs##idx, \ + sizeof(struct tdm_buf), ARRAY_SIZE(tx_msgs##idx)); \ + k_msgq_init(&tdm_nrfx_data##idx.rx_queue, (char *)rx_msgs##idx, \ + sizeof(struct tdm_buf), ARRAY_SIZE(rx_msgs##idx)); \ + IF_ENABLED(CONFIG_CLOCK_CONTROL_NRF, \ + (init_clock_manager(dev);)) \ + return 0; \ + } \ + BUILD_ASSERT(TDM_CLK_SRC(idx) != ACLK, "Clock source ACLK is currently not supported. "); \ + DEVICE_DT_DEFINE(TDM(idx), tdm_nrfx_init##idx, NULL, &tdm_nrfx_data##idx, \ + &tdm_nrfx_cfg##idx, POST_KERNEL, CONFIG_I2S_INIT_PRIORITY, \ + &tdm_nrf_drv_api); + +/* Execute macro f(x) for all instances. */ +#define TDM_FOR_EACH_INSTANCE(f, sep, off_code, ...) \ + NRFX_FOREACH_PRESENT(TDM, f, sep, off_code, __VA_ARGS__) + +#define COND_TDM_NRFX_DEVICE(unused, prefix, i, _) \ + IF_ENABLED(CONFIG_HAS_HW_NRF_TDM##prefix##i, (TDM_NRFX_DEVICE(prefix##i);)) + +TDM_FOR_EACH_INSTANCE(COND_TDM_NRFX_DEVICE, (), ()) From c3b08682eff18cb75fcbd3a2884a383846cf3c49 Mon Sep 17 00:00:00 2001 From: Adam Kondraciuk Date: Thu, 28 Nov 2024 15:40:57 +0100 Subject: [PATCH 6/6] tests: i2s: align to nRF54H20 TDM Signed-off-by: Adam Kondraciuk --- tests/drivers/i2s/i2s_api/Kconfig | 6 +-- .../boards/nrf54h20dk_nrf54h20_cpuapp.overlay | 38 +++++++++++++++++++ tests/drivers/i2s/i2s_api/src/common.c | 27 ++++++++++++- tests/drivers/i2s/i2s_speed/Kconfig | 6 +-- .../boards/nrf54h20dk_nrf54h20_cpuapp.overlay | 38 +++++++++++++++++++ .../i2s/i2s_speed/src/test_i2s_speed.c | 20 ++++++++-- 6 files changed, 124 insertions(+), 11 deletions(-) create mode 100644 tests/drivers/i2s/i2s_api/boards/nrf54h20dk_nrf54h20_cpuapp.overlay create mode 100644 tests/drivers/i2s/i2s_speed/boards/nrf54h20dk_nrf54h20_cpuapp.overlay diff --git a/tests/drivers/i2s/i2s_api/Kconfig b/tests/drivers/i2s/i2s_api/Kconfig index 538157f8a79..ff78064206e 100644 --- a/tests/drivers/i2s/i2s_api/Kconfig +++ b/tests/drivers/i2s/i2s_api/Kconfig @@ -15,7 +15,7 @@ config I2S_TEST_SEPARATE_DEVICES config I2S_TEST_USE_I2S_DIR_BOTH bool "Use I2S_DIR_BOTH value to perform RX/TX transfers" - default y if DT_HAS_NORDIC_NRF_I2S_ENABLED + default y if DT_HAS_NORDIC_NRF_I2S_ENABLED || DT_HAS_NORDIC_NRF_TDM_ENABLED help Use the I2S_DIR_BOTH enumeration value to trigger commands in test cases involving both reception and transmission. Use of this option @@ -24,7 +24,7 @@ config I2S_TEST_USE_I2S_DIR_BOTH config I2S_TEST_USE_GPIO_LOOPBACK bool "Use GPIO loopback" - default y if DT_HAS_NORDIC_NRF_I2S_ENABLED + default y if DT_HAS_NORDIC_NRF_I2S_ENABLED || DT_HAS_NORDIC_NRF_TDM_ENABLED help Use wiring between the data-out and data-in pins for looping back data. This option is intended to be used for devices that do not @@ -32,7 +32,7 @@ config I2S_TEST_USE_GPIO_LOOPBACK config I2S_TEST_ALLOWED_DATA_OFFSET int "Allowed offset in received data" - default 2 if DT_HAS_NORDIC_NRF_I2S_ENABLED + default 10 if DT_HAS_NORDIC_NRF_I2S_ENABLED || DT_HAS_NORDIC_NRF_TDM_ENABLED default 0 help Maximum allowed offset between sent and received samples. Non-zero diff --git a/tests/drivers/i2s/i2s_api/boards/nrf54h20dk_nrf54h20_cpuapp.overlay b/tests/drivers/i2s/i2s_api/boards/nrf54h20dk_nrf54h20_cpuapp.overlay new file mode 100644 index 00000000000..dd76defb3c4 --- /dev/null +++ b/tests/drivers/i2s/i2s_api/boards/nrf54h20dk_nrf54h20_cpuapp.overlay @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* i2s-node0 is the transmitter/receiver */ + +/ { + aliases { + i2s-node0 = &tdm130; + }; +}; + +&pinctrl { + tdm130_default_alt: tdm130_default_alt { + group1 { + psels = , + , + , + , + ; + }; + }; +}; + +&tdm130 { + status = "okay"; + pinctrl-0 = <&tdm130_default_alt>; + pinctrl-names = "default"; + memory-regions = <&cpuapp_dma_region>; + mck-frequency = <1000000>; +}; + +&cpuapp_dma_region { + status = "okay"; +}; + diff --git a/tests/drivers/i2s/i2s_api/src/common.c b/tests/drivers/i2s/i2s_api/src/common.c index b6beb072b31..830cebbbad3 100644 --- a/tests/drivers/i2s/i2s_api/src/common.c +++ b/tests/drivers/i2s/i2s_api/src/common.c @@ -8,9 +8,32 @@ #include #include #include "i2s_api_test.h" +#include -K_MEM_SLAB_DEFINE(rx_mem_slab, BLOCK_SIZE, NUM_RX_BLOCKS, 32); -K_MEM_SLAB_DEFINE(tx_mem_slab, BLOCK_SIZE, NUM_TX_BLOCKS, 32); +#define TDM(idx) DT_NODELABEL(tdm##idx) +#define TDM_PROP(idx, prop) DT_PROP(TDM(idx), prop) +#define TDM_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(TDM(idx), prop) + + +#define TDM_MEMORY_SECTION(idx) \ + COND_CODE_1(TDM_HAS_PROP(idx, memory_regions), \ + (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ + DT_PHANDLE(TDM(idx), memory_regions)))))), \ + ()) + +#define BUFFER_MEM_REGION __attribute__((__section__("cpuapp_dma_region"))) + +char __aligned(WB_UP(32)) + _k_mem_slab_buf_rx_mem_slab[(NUM_RX_BLOCKS + 2) * WB_UP(BLOCK_SIZE)] TDM_MEMORY_SECTION(130); +STRUCT_SECTION_ITERABLE(k_mem_slab, rx_mem_slab) = + Z_MEM_SLAB_INITIALIZER(rx_mem_slab, _k_mem_slab_buf_rx_mem_slab, + WB_UP(BLOCK_SIZE), NUM_RX_BLOCKS + 2); + +char __aligned(WB_UP(32)) + _k_mem_slab_buf_tx_mem_slab[(NUM_TX_BLOCKS) * WB_UP(BLOCK_SIZE)] TDM_MEMORY_SECTION(130); +STRUCT_SECTION_ITERABLE(k_mem_slab, tx_mem_slab) = + Z_MEM_SLAB_INITIALIZER(tx_mem_slab, _k_mem_slab_buf_tx_mem_slab, + WB_UP(BLOCK_SIZE), NUM_TX_BLOCKS); /* The data_l represent a sine wave */ ZTEST_DMEM int16_t data_l[SAMPLE_NO] = { diff --git a/tests/drivers/i2s/i2s_speed/Kconfig b/tests/drivers/i2s/i2s_speed/Kconfig index 7f9e54efacc..005ab795277 100644 --- a/tests/drivers/i2s/i2s_speed/Kconfig +++ b/tests/drivers/i2s/i2s_speed/Kconfig @@ -15,7 +15,7 @@ config I2S_TEST_SEPARATE_DEVICES config I2S_TEST_USE_I2S_DIR_BOTH bool "Use I2S_DIR_BOTH value to perform RX/TX transfers" - default y if DT_HAS_NORDIC_NRF_I2S_ENABLED + default y if DT_HAS_NORDIC_NRF_I2S_ENABLED || DT_HAS_NORDIC_NRF_TDM_ENABLED help Use the I2S_DIR_BOTH enumeration value to trigger commands in test cases involving both reception and transmission. Use of this option @@ -24,7 +24,7 @@ config I2S_TEST_USE_I2S_DIR_BOTH config I2S_TEST_USE_GPIO_LOOPBACK bool "Use GPIO loopback" - default y if DT_HAS_NORDIC_NRF_I2S_ENABLED + default y if DT_HAS_NORDIC_NRF_I2S_ENABLED || DT_HAS_NORDIC_NRF_TDM_ENABLED help Use wiring between the data-out and data-in pins for looping back data. This option is intended to be used for devices that do not @@ -32,7 +32,7 @@ config I2S_TEST_USE_GPIO_LOOPBACK config I2S_TEST_ALLOWED_DATA_OFFSET int "Allowed offset in received data" - default 2 if DT_HAS_NORDIC_NRF_I2S_ENABLED + default 2 if DT_HAS_NORDIC_NRF_I2S_ENABLED || DT_HAS_NORDIC_NRF_TDM_ENABLED default 0 help Maximum allowed offset between sent and received samples. Non-zero diff --git a/tests/drivers/i2s/i2s_speed/boards/nrf54h20dk_nrf54h20_cpuapp.overlay b/tests/drivers/i2s/i2s_speed/boards/nrf54h20dk_nrf54h20_cpuapp.overlay new file mode 100644 index 00000000000..dd76defb3c4 --- /dev/null +++ b/tests/drivers/i2s/i2s_speed/boards/nrf54h20dk_nrf54h20_cpuapp.overlay @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* i2s-node0 is the transmitter/receiver */ + +/ { + aliases { + i2s-node0 = &tdm130; + }; +}; + +&pinctrl { + tdm130_default_alt: tdm130_default_alt { + group1 { + psels = , + , + , + , + ; + }; + }; +}; + +&tdm130 { + status = "okay"; + pinctrl-0 = <&tdm130_default_alt>; + pinctrl-names = "default"; + memory-regions = <&cpuapp_dma_region>; + mck-frequency = <1000000>; +}; + +&cpuapp_dma_region { + status = "okay"; +}; + diff --git a/tests/drivers/i2s/i2s_speed/src/test_i2s_speed.c b/tests/drivers/i2s/i2s_speed/src/test_i2s_speed.c index 9851f480b23..2b7d6fff4fb 100644 --- a/tests/drivers/i2s/i2s_speed/src/test_i2s_speed.c +++ b/tests/drivers/i2s/i2s_speed/src/test_i2s_speed.c @@ -9,6 +9,7 @@ #include #include #include +#include #define I2S_DEV_NODE_RX DT_ALIAS(i2s_node0) #ifdef CONFIG_I2S_TEST_SEPARATE_DEVICES @@ -17,7 +18,7 @@ #define I2S_DEV_NODE_TX DT_ALIAS(i2s_node0) #endif -#define NUM_BLOCKS 20 +#define NUM_BLOCKS 5 #define SAMPLE_NO 64 /* The data_l represent a sine wave */ @@ -58,14 +59,27 @@ static int16_t data_r[SAMPLE_NO] = { * RX blocks to satisfy this requirement */ +#define TDM(idx) DT_NODELABEL(tdm##idx) +#define TDM_PROP(idx, prop) DT_PROP(TDM(idx), prop) +#define TDM_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(TDM(idx), prop) + + +#define TDM_MEMORY_SECTION(idx) \ + COND_CODE_1(TDM_HAS_PROP(idx, memory_regions), \ + (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ + DT_PHANDLE(TDM(idx), memory_regions)))))), \ + ()) + +#define BUFFER_MEM_REGION __attribute__((__section__("cpuapp_dma_region"))) + char MEM_SLAB_CACHE_ATTR __aligned(WB_UP(32)) - _k_mem_slab_buf_rx_0_mem_slab[(NUM_BLOCKS + 2) * WB_UP(BLOCK_SIZE)]; + _k_mem_slab_buf_rx_0_mem_slab[(NUM_BLOCKS + 2) * WB_UP(BLOCK_SIZE)] TDM_MEMORY_SECTION(130); STRUCT_SECTION_ITERABLE(k_mem_slab, rx_0_mem_slab) = Z_MEM_SLAB_INITIALIZER(rx_0_mem_slab, _k_mem_slab_buf_rx_0_mem_slab, WB_UP(BLOCK_SIZE), NUM_BLOCKS + 2); char MEM_SLAB_CACHE_ATTR __aligned(WB_UP(32)) - _k_mem_slab_buf_tx_0_mem_slab[(NUM_BLOCKS) * WB_UP(BLOCK_SIZE)]; + _k_mem_slab_buf_tx_0_mem_slab[(NUM_BLOCKS) * WB_UP(BLOCK_SIZE)] TDM_MEMORY_SECTION(130); STRUCT_SECTION_ITERABLE(k_mem_slab, tx_0_mem_slab) = Z_MEM_SLAB_INITIALIZER(tx_0_mem_slab, _k_mem_slab_buf_tx_0_mem_slab, WB_UP(BLOCK_SIZE), NUM_BLOCKS);