text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```unknown # config TI_DM_TIMER bool "TI Dual-Mode Timer" default y depends on DT_HAS_TI_AM654_TIMER_ENABLED select TICKLESS_CAPABLE help This module implements a kernel device driver for TI Dual-Mode timer. This driver provides system tick interface. ```
/content/code_sandbox/drivers/timer/Kconfig.ti_dm_timer
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
61
```unknown config LEON_GPTIMER bool "LEON timer" default y depends on DT_HAS_GAISLER_GPTIMER_ENABLED select DYNAMIC_INTERRUPTS help This module implements a kernel device driver for the GRLIB GPTIMER which is common in LEON systems. ```
/content/code_sandbox/drivers/timer/Kconfig.leon_gptimer
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
66
```unknown config RISCV_MACHINE_TIMER bool "RISCV Machine Timer" default y depends on DT_HAS_ANDESTECH_MACHINE_TIMER_ENABLED || \ DT_HAS_NEORV32_MACHINE_TIMER_ENABLED || \ DT_HAS_NUCLEI_SYSTIMER_ENABLED || \ DT_HAS_SIFIVE_CLINT0_ENABLED || \ DT_HAS_TELINK_MACHINE_TIMER_ENABLED || \ DT_HAS_LOWRISC_MACHINE_TIMER_ENABLED || \ DT_HAS_NIOSV_MACHINE_TIMER_ENABLED select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER help This module implements a kernel device driver for the generic RISCV machine timer driver. It provides the standard "system clock driver" interfaces. if RISCV_MACHINE_TIMER config RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER int default 0 help Specifies the division ratio of the system clock supplied to the Machine Timer. A clock obtained by dividing the system clock by a value of [2^N] is supplied to the timer. Where N is this parameter's value. When N=2, it is divided by 4, and when N=5, it is divided by 32. Default case is N=0, this means use system clock as machine timer clock. It is normal configuration for RISC-V machine clock. This parameter usually depends on the hardware configuration. The division ratio should define in devicetree, and it is desirable usage that references it with using a function such as dt_node_int_prop_int from Kconfig. (Tune in the conf file is not preferable.) endif ```
/content/code_sandbox/drivers/timer/Kconfig.riscv_machine
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
343
```unknown config ESP32_SYS_TIMER bool "ESP32 sys-timer support (ESP32Cx series)" depends on SOC_SERIES_ESP32C2 || SOC_SERIES_ESP32C3 || SOC_SERIES_ESP32C6 default y select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER select SYSTEM_TIMER_HAS_DISABLE_SUPPORT help This option enables the system timer driver for the Espressif ESP32Cx and provides the standard "system clock driver" interface. ```
/content/code_sandbox/drivers/timer/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
108
```unknown # Timer driver configuration options menuconfig NRF_GRTC_TIMER bool "nRF GRTC Timer" default y depends on DT_HAS_NORDIC_NRF_GRTC_ENABLED select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER select NRFX_GRTC help This module implements a kernel device driver for the nRF Global Real Time Counter NRF_GRTC and provides the standard "system clock driver" interfaces. if NRF_GRTC_TIMER config NRF_GRTC_SLEEP_ALLOWED def_bool y depends on POWEROFF help This feature allows GRTC SYSCOUNTER to go to sleep state. config NRF_GRTC_TIMER_APP_DEFINED_INIT bool "Application defines GRTC initialization" help Application defines the initialization procedure and time of the GRTC drivers, rather than leaving it up to SYS_INIT. config NRF_GRTC_START_SYSCOUNTER bool "Start SYSCOUNTER on driver init" select NRF_GRTC_TIMER_CLOCK_MANAGEMENT help Start the SYSCOUNTER when initializing the GRTC. This should only be handled by one processor in the system. config NRF_GRTC_TIMER_CLOCK_MANAGEMENT bool help Compile additional driver code for enabling management functionality of the GRTC. Usually this is only needed by the processor that is starting the SYSCOUNTER, but can be shared by multiple processors in the system. config NRF_GRTC_SYSCOUNTER_SLEEP_MINIMUM_LATENCY int default 1000 depends on NRF_GRTC_SLEEP_ALLOWED help The value (in us) ensures that the wakeup event will not fire too early. In other words, applying SYSCOUNTER sleep state for less than NRF_GRTC_SYSCOUNTER_SLEEP_MINIMUM_LATENCY period makes no sense. config NRF_GRTC_TIMER_AUTO_KEEP_ALIVE bool default y if NRF_GRTC_START_SYSCOUNTER help This feature prevents the SYSCOUNTER from sleeping when any core is in active state. endif # NRF_GRTC_TIMER ```
/content/code_sandbox/drivers/timer/Kconfig.nrf_grtc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
428
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_itim_timer /** * @file * @brief Nuvoton NPCX kernel device driver for "system clock driver" interface * * This file contains a kernel device driver implemented by the internal * 64/32-bit timers in Nuvoton NPCX series. Via these two kinds of timers, the * driver provides an standard "system clock driver" interface. * * It includes: * - A system timer based on an ITIM64 (Internal 64-bit timer) instance, clocked * by APB2 which freq is the same as CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. * - Provide a 64-bit cycles reading and ticks computation based on it. * - Its prescaler is set to 1 and provide the kernel cycles reading without * handling overflow mechanism. * - After ec entered "sleep/deep sleep" power state which is used for better * power consumption, then its clock will stop. * * - A event timer based on an ITIM32 (Internal 32-bit timer) instance, clocked * by LFCLK which frequency is 32KHz and still activated when ec entered * "sleep/deep sleep" power state. * - Provide a system clock timeout notification. In its ISR, the driver informs * the kernel that the specified number of ticks have elapsed. * - Its prescaler is set to 1 and the formula between event timer's cycles and * ticks is 'cycles = (ticks * 32768) / CONFIG_SYS_CLOCK_TICKS_PER_SEC' * - Compensate reading of ITIM64 which clock is gating after ec entered * "sleep/deep sleep" power state if CONFIG_PM is enabled. */ #include <zephyr/init.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/kernel.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <soc.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(itim, LOG_LEVEL_ERR); #define NPCX_ITIM32_MAX_CNT 0xffffffff #define NPCX_ITIM64_MAX_HALF_CNT 0xffffffff #define EVT_CYCLES_PER_SEC LFCLK /* 32768 Hz */ #define SYS_CYCLES_PER_TICK (sys_clock_hw_cycles_per_sec() \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define SYS_CYCLES_PER_USEC (sys_clock_hw_cycles_per_sec() / 1000000) #define EVT_CYCLES_FROM_TICKS(ticks) \ DIV_ROUND_UP(ticks * EVT_CYCLES_PER_SEC, \ CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define NPCX_ITIM_CLK_SEL_DELAY 92 /* Delay for clock selection (Unit:us) */ /* Timeout for enabling ITIM module: 100us (Unit:cycles) */ #define NPCX_ITIM_EN_TIMEOUT_CYCLES (100 * SYS_CYCLES_PER_USEC) #define SYS_CYC_PER_EVT_CYC (sys_clock_hw_cycles_per_sec() / EVT_CYCLES_PER_SEC) /* Instance of system and event timers */ static struct itim64_reg *const sys_tmr = (struct itim64_reg *) DT_INST_REG_ADDR_BY_NAME(0, sys_itim); static struct itim32_reg *const evt_tmr = (struct itim32_reg *) DT_INST_REG_ADDR_BY_NAME(0, evt_itim); static const struct npcx_clk_cfg itim_clk_cfg[] = NPCX_DT_CLK_CFG_ITEMS_LIST(0); static struct k_spinlock lock; /* Announced cycles in system timer before executing sys_clock_announce() */ static uint64_t cyc_sys_announced; static uint64_t last_ticks; static uint32_t last_elapsed; /* Current target cycles of time-out signal in event timer */ static uint32_t cyc_evt_timeout; /* Total cycles of system timer stopped in "sleep/deep sleep" mode */ __unused static uint64_t cyc_sys_compensated; /* Current cycles in event timer when ec entered "sleep/deep sleep" mode */ __unused static uint32_t cyc_evt_enter_deep_idle; /* ITIM local inline functions */ static inline uint64_t npcx_itim_get_sys_cyc64(void) { uint32_t cnt64h, cnt64h_check, cnt64l; /* Read 64-bit counter value from two 32-bit registers */ do { cnt64h_check = sys_tmr->ITCNT64H; cnt64l = sys_tmr->ITCNT64L; cnt64h = sys_tmr->ITCNT64H; } while (cnt64h != cnt64h_check); cnt64h = NPCX_ITIM64_MAX_HALF_CNT - cnt64h; cnt64l = NPCX_ITIM64_MAX_HALF_CNT - cnt64l + 1; /* Return current value of 64-bit counter value of system timer */ if (IS_ENABLED(CONFIG_PM)) { return ((((uint64_t)cnt64h) << 32) | cnt64l) + cyc_sys_compensated; } else { return (((uint64_t)cnt64h) << 32) | cnt64l; } } static inline int npcx_itim_evt_enable(void) { uint64_t cyc_start; /* Enable event timer and wait for it to take effect */ evt_tmr->ITCTS32 |= BIT(NPCX_ITCTSXX_ITEN); /* * Usually, it need one clock (30.5 us) to take effect since * asynchronization between core and itim32's source clock (LFCLK). */ cyc_start = npcx_itim_get_sys_cyc64(); while (!IS_BIT_SET(evt_tmr->ITCTS32, NPCX_ITCTSXX_ITEN)) { if (npcx_itim_get_sys_cyc64() - cyc_start > NPCX_ITIM_EN_TIMEOUT_CYCLES) { /* ITEN bit is still unset? */ if (!IS_BIT_SET(evt_tmr->ITCTS32, NPCX_ITCTSXX_ITEN)) { LOG_ERR("Timeout: enabling EVT timer!"); return -ETIMEDOUT; } } } return 0; } static inline void npcx_itim_evt_disable(void) { /* Disable event timer and no need to wait for it to take effect */ evt_tmr->ITCTS32 &= ~BIT(NPCX_ITCTSXX_ITEN); } /* ITIM local functions */ static int npcx_itim_start_evt_tmr_by_tick(int32_t ticks) { k_spinlock_key_t key = k_spin_lock(&lock); /* * Get desired cycles of event timer from the requested ticks which * round up to next tick boundary. */ if (ticks == K_TICKS_FOREVER) { cyc_evt_timeout = NPCX_ITIM32_MAX_CNT; } else { uint64_t next_cycs; uint64_t curr = npcx_itim_get_sys_cyc64(); uint32_t dcycles; if (ticks <= 0) { ticks = 1; } next_cycs = (last_ticks + last_elapsed + ticks) * SYS_CYCLES_PER_TICK; if (unlikely(next_cycs <= curr)) { cyc_evt_timeout = 1; } else { dcycles = next_cycs - curr; cyc_evt_timeout = CLAMP((dcycles / SYS_CYC_PER_EVT_CYC), 1, NPCX_ITIM32_MAX_CNT); } } LOG_DBG("ticks %x, cyc_evt_timeout %x", ticks, cyc_evt_timeout); /* Disable event timer if needed before configuring counter */ if (IS_BIT_SET(evt_tmr->ITCTS32, NPCX_ITCTSXX_ITEN)) { npcx_itim_evt_disable(); } /* Upload counter of event timer */ evt_tmr->ITCNT32 = MAX(cyc_evt_timeout - 1, 1); k_spin_unlock(&lock, key); /* Enable event timer and start ticking */ return npcx_itim_evt_enable(); } static void npcx_itim_evt_isr(const struct device *dev) { ARG_UNUSED(dev); /* Disable ITIM event module first */ npcx_itim_evt_disable(); /* Clear timeout status for event */ evt_tmr->ITCTS32 |= BIT(NPCX_ITCTSXX_TO_STS); if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { k_spinlock_key_t key = k_spin_lock(&lock); uint64_t curr = npcx_itim_get_sys_cyc64(); uint32_t delta_ticks = (uint32_t)((curr - cyc_sys_announced) / SYS_CYCLES_PER_TICK); cyc_sys_announced += delta_ticks * SYS_CYCLES_PER_TICK; last_ticks += delta_ticks; last_elapsed = 0; k_spin_unlock(&lock, key); /* Informs kernel that specified number of ticks have elapsed */ sys_clock_announce(delta_ticks); } else { /* Enable event timer for ticking and wait to it take effect */ npcx_itim_evt_enable(); /* Informs kernel that one tick has elapsed */ sys_clock_announce(1); } } #if defined(CONFIG_PM) static inline uint32_t npcx_itim_get_evt_cyc32(void) { uint32_t cnt1, cnt2; cnt1 = evt_tmr->ITCNT32; /* * Wait for two consecutive equal values are read since the source clock * of event timer is 32KHz. */ while ((cnt2 = evt_tmr->ITCNT32) != cnt1) cnt1 = cnt2; /* Return current value of 32-bit counter of event timer */ return cnt2; } static uint32_t npcx_itim_evt_elapsed_cyc32(void) { uint32_t cnt1 = npcx_itim_get_evt_cyc32(); uint8_t sys_cts = evt_tmr->ITCTS32; uint16_t cnt2 = npcx_itim_get_evt_cyc32(); /* Event has been triggered but timer ISR doesn't handle it */ if (IS_BIT_SET(sys_cts, NPCX_ITCTSXX_TO_STS) || (cnt2 > cnt1)) { cnt2 = cyc_evt_timeout; } else { cnt2 = cyc_evt_timeout - cnt2; } /* Return elapsed cycles of 32-bit counter of event timer */ return cnt2; } #endif /* CONFIG_PM */ /* System timer api functions */ void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Only for tickless kernel system */ return; } LOG_DBG("timeout is %d", ticks); /* Start a event timer in ticks */ npcx_itim_start_evt_tmr_by_tick(ticks); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Always return 0 for tickful kernel system */ return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t delta_cycle = npcx_itim_get_sys_cyc64() - cyc_sys_announced; uint32_t delta_ticks = (uint32_t)delta_cycle / SYS_CYCLES_PER_TICK; last_elapsed = delta_ticks; k_spin_unlock(&lock, key); /* Return how many ticks elapsed since last sys_clock_announce() call */ return delta_ticks; } uint32_t sys_clock_cycle_get_32(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint64_t current = npcx_itim_get_sys_cyc64(); k_spin_unlock(&lock, key); /* Return how many cycles since system kernel timer start counting */ return (uint32_t)(current); } uint64_t sys_clock_cycle_get_64(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint64_t current = npcx_itim_get_sys_cyc64(); k_spin_unlock(&lock, key); /* Return how many cycles since system kernel timer start counting */ return current; } /* Platform specific system timer functions */ #if defined(CONFIG_PM) void npcx_clock_capture_low_freq_timer(void) { cyc_evt_enter_deep_idle = npcx_itim_evt_elapsed_cyc32(); } void npcx_clock_compensate_system_timer(void) { uint32_t cyc_evt_elapsed_in_deep = npcx_itim_evt_elapsed_cyc32() - cyc_evt_enter_deep_idle; cyc_sys_compensated += ((uint64_t)cyc_evt_elapsed_in_deep * sys_clock_hw_cycles_per_sec()) / EVT_CYCLES_PER_SEC; } uint64_t npcx_clock_get_sleep_ticks(void) { return cyc_sys_compensated / SYS_CYCLES_PER_TICK; } #endif /* CONFIG_PM */ static int sys_clock_driver_init(void) { int ret; uint32_t sys_tmr_rate; const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); if (!device_is_ready(clk_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Turn on all itim module clocks used for counting */ for (int i = 0; i < ARRAY_SIZE(itim_clk_cfg); i++) { ret = clock_control_on(clk_dev, (clock_control_subsys_t) &itim_clk_cfg[i]); if (ret < 0) { LOG_ERR("Turn on timer %d clock failed.", i); return ret; } } /* * In npcx series, we use ITIM64 as system kernel timer. Its source * clock frequency must equal to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. */ ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t) &itim_clk_cfg[1], &sys_tmr_rate); if (ret < 0) { LOG_ERR("Get ITIM64 clock rate failed %d", ret); return ret; } if (sys_tmr_rate != CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) { LOG_ERR("CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC doesn't match " "ITIM64 clock frequency %d", sys_tmr_rate); return -EINVAL; } /* * Step 1. Use a ITIM64 timer as system kernel timer for counting. * Configure 64-bit timer counter and its prescaler to 1 first. */ sys_tmr->ITPRE64 = 0; sys_tmr->ITCNT64L = NPCX_ITIM64_MAX_HALF_CNT; sys_tmr->ITCNT64H = NPCX_ITIM64_MAX_HALF_CNT; /* * Select APB2 clock which freq is CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC, * and clear timeout status bit before enabling the whole module. */ sys_tmr->ITCTS64 = BIT(NPCX_ITCTSXX_TO_STS); /* Enable 64-bit timer and start ticking */ sys_tmr->ITCTS64 |= BIT(NPCX_ITCTSXX_ITEN); /* * Step 2. Use a ITIM32 timer for event handling (ex. timeout event). * Configure 32-bit timer's prescaler to 1 first. */ evt_tmr->ITPRE32 = 0; /* * Select low frequency clock source (The freq is 32kHz), enable its * interrupt/wake-up sources, and clear timeout status bit before * enabling it. */ evt_tmr->ITCTS32 = BIT(NPCX_ITCTSXX_CKSEL) | BIT(NPCX_ITCTSXX_TO_WUE) | BIT(NPCX_ITCTSXX_TO_IE) | BIT(NPCX_ITCTSXX_TO_STS); /* A delay for ITIM source clock selection */ k_busy_wait(NPCX_ITIM_CLK_SEL_DELAY); /* Configure event timer's ISR */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), npcx_itim_evt_isr, NULL, 0); /* Enable event timer interrupt */ irq_enable(DT_INST_IRQN(0)); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Start a event timer in one tick */ ret = npcx_itim_start_evt_tmr_by_tick(1); if (ret < 0) { return ret; } } return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/npcx_itim_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,567
```unknown config ALTERA_AVALON_TIMER bool "Altera Avalon Interval Timer" default y depends on NIOS2 help This module implements a kernel device driver for the Altera Avalon Interval Timer as described in the Embedded IP documentation, for use with Nios II and possibly other Altera soft CPUs. It provides the standard "system clock driver" interfaces. ```
/content/code_sandbox/drivers/timer/Kconfig.altera_avalon
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
86
```c /* * */ #include <zephyr/init.h> #include <soc.h> #include <stm32_ll_lptim.h> #include <stm32_ll_bus.h> #include <stm32_ll_rcc.h> #include <stm32_ll_pwr.h> #include <stm32_ll_system.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/irq.h> #include <zephyr/drivers/counter.h> #include <zephyr/pm/policy.h> #include <zephyr/spinlock.h> #define DT_DRV_COMPAT st_stm32_lptim #if DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) > 1 #error Only one LPTIM instance should be enabled #endif #define LPTIM (LPTIM_TypeDef *) DT_INST_REG_ADDR(0) #if DT_INST_NUM_CLOCKS(0) == 1 #warning Kconfig for LPTIM source clock (LSI/LSE) is deprecated, use device tree. static const struct stm32_pclken lptim_clk[] = { STM32_CLOCK_INFO(0, DT_DRV_INST(0)), /* Use Kconfig to configure source clocks fields */ /* Fortunately, values are consistent across enabled series */ #ifdef CONFIG_STM32_LPTIM_CLOCK_LSI {.bus = STM32_SRC_LSI, .enr = LPTIM1_SEL(1)} #else {.bus = STM32_SRC_LSE, .enr = LPTIM1_SEL(3)} #endif }; #else static const struct stm32_pclken lptim_clk[] = STM32_DT_INST_CLOCKS(0); #endif static const struct device *const clk_ctrl = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); /* * Assumptions and limitations: * * - system clock based on an LPTIM instance, clocked by LSI or LSE * - prescaler is set to a 2^value from 1 (division of the LPTIM source clock by 1) * to 128 (division of the LPTIM source clock by 128) * - using LPTIM AutoReload capability to trig the IRQ (timeout irq) * - when timeout irq occurs the counter is already reset * - the maximum timeout duration is reached with the lptim_time_base value * - with prescaler of 1, the max timeout (LPTIM_TIMEBASE) is 2 seconds: * 0xFFFF / (LSE freq (32768Hz) / 1) * - with prescaler of 128, the max timeout (LPTIM_TIMEBASE) is 256 seconds: * 0xFFFF / (LSE freq (32768Hz) / 128) */ static int32_t lptim_time_base; static uint32_t lptim_clock_freq = CONFIG_STM32_LPTIM_CLOCK; /* The prescaler given by the DTS and to apply to the lptim_clock_freq */ static uint32_t lptim_clock_presc = DT_PROP(DT_DRV_INST(0), st_prescaler); /* Minimum nb of clock cycles to have to set autoreload register correctly */ #define LPTIM_GUARD_VALUE 2 /* A 32bit value cannot exceed 0xFFFFFFFF/LPTIM_TIMEBASE counting cycles. * This is for example about of 65000 x 2000ms when clocked by LSI */ static uint32_t accumulated_lptim_cnt; /* Next autoreload value to set */ static uint32_t autoreload_next; /* Indicate if the autoreload register is ready for a write */ static bool autoreload_ready = true; static struct k_spinlock lock; #ifdef CONFIG_STM32_LPTIM_STDBY_TIMER #define CURRENT_CPU \ (COND_CODE_1(CONFIG_SMP, (arch_curr_cpu()->id), (_current_cpu->id))) #define cycle_t uint32_t /* This local variable indicates that the timeout was set right before * entering standby state. * * It is used for chips that has to use a separate standby timer in such * case because the LPTIM is not clocked in some low power mode state. */ static bool timeout_stdby; /* Cycle counter before entering the standby state. */ static cycle_t lptim_cnt_pre_stdby; /* Standby timer value before entering the standby state. */ static uint32_t stdby_timer_pre_stdby; /* Standby timer used for timer while entering the standby state */ static const struct device *stdby_timer = DEVICE_DT_GET(DT_CHOSEN(st_lptim_stdby_timer)); #endif /* CONFIG_STM32_LPTIM_STDBY_TIMER */ static inline bool arrm_state_get(void) { return (LL_LPTIM_IsActiveFlag_ARRM(LPTIM) && LL_LPTIM_IsEnabledIT_ARRM(LPTIM)); } static void lptim_irq_handler(const struct device *unused) { ARG_UNUSED(unused); uint32_t autoreload = LL_LPTIM_GetAutoReload(LPTIM); if ((LL_LPTIM_IsActiveFlag_ARROK(LPTIM) != 0) && LL_LPTIM_IsEnabledIT_ARROK(LPTIM) != 0) { LL_LPTIM_ClearFlag_ARROK(LPTIM); if ((autoreload_next > 0) && (autoreload_next != autoreload)) { /* the new autoreload value change, we set it */ autoreload_ready = false; LL_LPTIM_SetAutoReload(LPTIM, autoreload_next); } else { autoreload_ready = true; } } if (arrm_state_get()) { k_spinlock_key_t key = k_spin_lock(&lock); /* do not change ARR yet, sys_clock_announce will do */ LL_LPTIM_ClearFLAG_ARRM(LPTIM); /* increase the total nb of autoreload count * used in the sys_clock_cycle_get_32() function. */ autoreload++; accumulated_lptim_cnt += autoreload; k_spin_unlock(&lock, key); /* announce the elapsed time in ms (count register is 16bit) */ uint32_t dticks = (autoreload * CONFIG_SYS_CLOCK_TICKS_PER_SEC) / lptim_clock_freq; sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : (dticks > 0)); } } static void lptim_set_autoreload(uint32_t arr) { /* Update autoreload register */ autoreload_next = arr; if (!autoreload_ready) { return; } /* The ARR register ready, we could set it directly */ if ((arr > 0) && (arr != LL_LPTIM_GetAutoReload(LPTIM))) { /* The new autoreload value change, we set it */ autoreload_ready = false; LL_LPTIM_ClearFlag_ARROK(LPTIM); LL_LPTIM_SetAutoReload(LPTIM, arr); } } static inline uint32_t z_clock_lptim_getcounter(void) { uint32_t lp_time; uint32_t lp_time_prev_read; /* It should be noted that to read reliably the content * of the LPTIM_CNT register, two successive read accesses * must be performed and compared */ lp_time = LL_LPTIM_GetCounter(LPTIM); do { lp_time_prev_read = lp_time; lp_time = LL_LPTIM_GetCounter(LPTIM); } while (lp_time != lp_time_prev_read); return lp_time; } void sys_clock_set_timeout(int32_t ticks, bool idle) { /* new LPTIM AutoReload value to set (aligned on Kernel ticks) */ uint32_t next_arr = 0; int err; ARG_UNUSED(idle); #ifdef CONFIG_STM32_LPTIM_STDBY_TIMER const struct pm_state_info *next; next = pm_policy_next_state(CURRENT_CPU, ticks); /* Check if STANBY or STOP3 is requested */ timeout_stdby = false; if ((next != NULL) && idle) { #ifdef CONFIG_PM_S2RAM if (next->state == PM_STATE_SUSPEND_TO_RAM) { timeout_stdby = true; } #endif #ifdef CONFIG_STM32_STOP3_LP_MODE if ((next->state == PM_STATE_SUSPEND_TO_IDLE) && (next->substate_id == 4)) { timeout_stdby = true; } #endif } if (timeout_stdby) { uint64_t timeout_us = ((uint64_t)ticks * USEC_PER_SEC) / CONFIG_SYS_CLOCK_TICKS_PER_SEC; struct counter_alarm_cfg cfg = { .callback = NULL, .ticks = counter_us_to_ticks(stdby_timer, timeout_us), .user_data = NULL, .flags = 0, }; /* Set the alarm using timer that runs the standby. * Needed rump-up/setting time, lower accurency etc. should be * included in the exit-latency in the power state definition. */ counter_cancel_channel_alarm(stdby_timer, 0); counter_set_channel_alarm(stdby_timer, 0, &cfg); /* Store current values to calculate a difference in * measurements after exiting the standby state. */ counter_get_value(stdby_timer, &stdby_timer_pre_stdby); lptim_cnt_pre_stdby = z_clock_lptim_getcounter(); /* Stop clocks for LPTIM, since RTC is used instead */ clock_control_off(clk_ctrl, (clock_control_subsys_t) &lptim_clk[0]); return; } #endif /* CONFIG_STM32_LPTIM_STDBY_TIMER */ if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } /* * When CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE = y, ticks equals to -1 * is treated as a lptim off ; never waking up ; lptim not clocked anymore */ if (ticks == K_TICKS_FOREVER) { clock_control_off(clk_ctrl, (clock_control_subsys_t) &lptim_clk[0]); return; } /* * When CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE = n, ticks equals to INT_MAX * is treated as a maximum possible value LPTIM_MAX_TIMEBASE (16bit counter) */ /* if LPTIM clock was previously stopped, it must now be restored */ err = clock_control_on(clk_ctrl, (clock_control_subsys_t) &lptim_clk[0]); if (err < 0) { return; } /* passing ticks==1 means "announce the next tick", * ticks value of zero (or even negative) is legal and * treated identically: it simply indicates the kernel would like the * next tick announcement as soon as possible. */ ticks = CLAMP(ticks - 1, 1, lptim_time_base); k_spinlock_key_t key = k_spin_lock(&lock); /* read current counter value (cannot exceed 16bit) */ uint32_t lp_time = z_clock_lptim_getcounter(); uint32_t autoreload = LL_LPTIM_GetAutoReload(LPTIM); if (LL_LPTIM_IsActiveFlag_ARRM(LPTIM) || ((autoreload - lp_time) < LPTIM_GUARD_VALUE)) { /* interrupt happens or happens soon. * It's impossible to set autoreload value. */ k_spin_unlock(&lock, key); return; } /* calculate the next arr value (cannot exceed 16bit) * adjust the next ARR match value to align on Ticks * from the current counter value to first next Tick */ next_arr = (((lp_time * CONFIG_SYS_CLOCK_TICKS_PER_SEC) / lptim_clock_freq) + 1) * lptim_clock_freq / (CONFIG_SYS_CLOCK_TICKS_PER_SEC); next_arr = next_arr + ((uint32_t)(ticks) * lptim_clock_freq) / CONFIG_SYS_CLOCK_TICKS_PER_SEC; /* if the lptim_clock_freq < one ticks/sec, then next_arr must be > 0 */ /* maximise to TIMEBASE */ if (next_arr > lptim_time_base) { next_arr = lptim_time_base; } /* The new autoreload value must be LPTIM_GUARD_VALUE clock cycles * after current lptim to make sure we don't miss * an autoreload interrupt */ else if (next_arr < (lp_time + LPTIM_GUARD_VALUE)) { next_arr = lp_time + LPTIM_GUARD_VALUE; } /* with slow lptim_clock_freq, LPTIM_GUARD_VALUE of 1 is enough */ next_arr = next_arr - 1; /* Update autoreload register */ lptim_set_autoreload(next_arr); k_spin_unlock(&lock, key); } static uint32_t sys_clock_lp_time_get(void) { uint32_t lp_time; do { /* In case of counter roll-over, add the autoreload value, * because the irq has not yet been handled */ if (arrm_state_get()) { lp_time = LL_LPTIM_GetAutoReload(LPTIM) + 1; lp_time += z_clock_lptim_getcounter(); break; } lp_time = z_clock_lptim_getcounter(); /* Check if the flag ARRM wasn't be set during the process */ } while (arrm_state_get()); return lp_time; } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint32_t lp_time = sys_clock_lp_time_get(); k_spin_unlock(&lock, key); /* gives the value of LPTIM counter (ms) * since the previous 'announce' */ uint64_t ret = ((uint64_t)lp_time * CONFIG_SYS_CLOCK_TICKS_PER_SEC) / lptim_clock_freq; return (uint32_t)(ret); } uint32_t sys_clock_cycle_get_32(void) { /* just gives the accumulated count in a number of hw cycles */ k_spinlock_key_t key = k_spin_lock(&lock); uint32_t lp_time = sys_clock_lp_time_get(); lp_time += accumulated_lptim_cnt; /* convert lptim count in a nb of hw cycles with precision */ uint64_t ret = ((uint64_t)lp_time * sys_clock_hw_cycles_per_sec()) / lptim_clock_freq; k_spin_unlock(&lock, key); /* convert in hw cycles (keeping 32bit value) */ return (uint32_t)(ret); } /* Wait for the IER register of the stm32U5 ready, after any bit write operation */ void stm32_lptim_wait_ready(void) { #ifdef CONFIG_SOC_SERIES_STM32U5X while (LL_LPTIM_IsActiveFlag_DIEROK(LPTIM) == 0) { } LL_LPTIM_ClearFlag_DIEROK(LPTIM); #else /* Empty : not relevant */ #endif } static int sys_clock_driver_init(void) { uint32_t count_per_tick; int err; if (!device_is_ready(clk_ctrl)) { return -ENODEV; } /* Enable LPTIM bus clock */ err = clock_control_on(clk_ctrl, (clock_control_subsys_t) &lptim_clk[0]); if (err < 0) { return -EIO; } #if defined(LL_SRDAMR_GRP1_PERIPH_LPTIM1AMEN) LL_SRDAMR_GRP1_EnableAutonomousClock(LL_SRDAMR_GRP1_PERIPH_LPTIM1AMEN); #endif /* Enable LPTIM clock source */ err = clock_control_configure(clk_ctrl, (clock_control_subsys_t) &lptim_clk[1], NULL); if (err < 0) { return -EIO; } /* Get LPTIM clock freq */ err = clock_control_get_rate(clk_ctrl, (clock_control_subsys_t) &lptim_clk[1], &lptim_clock_freq); if (err < 0) { return -EIO; } #if defined(CONFIG_SOC_SERIES_STM32L0X) /* Driver only supports freqs up to 32768Hz. On L0, LSI freq is 37KHz, * which will overflow the LPTIM counter. * Previous LPTIM configuration using device tree was doing forcing this * with a Kconfig default. Impact is that time is 1.13 faster than reality. * Following lines reproduce this behavior in order not to change behavior. * This issue will be fixed by implementation LPTIM prescaler support. */ if (lptim_clk[1].bus == STM32_SRC_LSI) { lptim_clock_freq = KHZ(32); } #endif /* Set LPTIM time base based on clock source freq */ if (lptim_clock_freq == KHZ(32)) { lptim_time_base = 0xF9FF; } else if (lptim_clock_freq == 32768) { lptim_time_base = 0xFFFF; } else { return -EIO; } #if !defined(CONFIG_STM32_LPTIM_TICK_FREQ_RATIO_OVERRIDE) /* * Check coherency between CONFIG_SYS_CLOCK_TICKS_PER_SEC * and the lptim_clock_freq which is the CONFIG_STM32_LPTIM_CLOCK reduced * by the lptim_clock_presc */ if (lptim_clock_presc <= 8) { __ASSERT(CONFIG_STM32_LPTIM_CLOCK / 8 >= CONFIG_SYS_CLOCK_TICKS_PER_SEC, "It is recommended to set SYS_CLOCK_TICKS_PER_SEC to CONFIG_STM32_LPTIM_CLOCK/8"); } else { __ASSERT(CONFIG_STM32_LPTIM_CLOCK / lptim_clock_presc >= CONFIG_SYS_CLOCK_TICKS_PER_SEC, "Set SYS_CLOCK_TICKS_PER_SEC to CONFIG_STM32_LPTIM_CLOCK/lptim_clock_presc"); } #endif /* !CONFIG_STM32_LPTIM_TICK_FREQ_RATIO_OVERRIDE */ /* Actual lptim clock freq when the clock source is reduced by the prescaler */ lptim_clock_freq = lptim_clock_freq / lptim_clock_presc; /* Clear the event flag and possible pending interrupt */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), lptim_irq_handler, 0, 0); irq_enable(DT_INST_IRQN(0)); #ifdef CONFIG_SOC_SERIES_STM32WLX /* Enable the LPTIM wakeup EXTI line */ LL_EXTI_EnableIT_0_31(LL_EXTI_LINE_29); #endif /* configure the LPTIM counter */ LL_LPTIM_SetClockSource(LPTIM, LL_LPTIM_CLK_SOURCE_INTERNAL); /* the LPTIM clock freq is affected by the prescaler */ LL_LPTIM_SetPrescaler(LPTIM, (__CLZ(__RBIT(lptim_clock_presc)) << LPTIM_CFGR_PRESC_Pos)); #if defined(CONFIG_SOC_SERIES_STM32U5X) || \ defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_SOC_SERIES_STM32WBAX) LL_LPTIM_OC_SetPolarity(LPTIM, LL_LPTIM_CHANNEL_CH1, LL_LPTIM_OUTPUT_POLARITY_REGULAR); #else LL_LPTIM_SetPolarity(LPTIM, LL_LPTIM_OUTPUT_POLARITY_REGULAR); #endif LL_LPTIM_SetUpdateMode(LPTIM, LL_LPTIM_UPDATE_MODE_IMMEDIATE); LL_LPTIM_SetCounterMode(LPTIM, LL_LPTIM_COUNTER_MODE_INTERNAL); LL_LPTIM_DisableTimeout(LPTIM); /* counting start is initiated by software */ LL_LPTIM_TrigSw(LPTIM); #if defined(CONFIG_SOC_SERIES_STM32U5X) || \ defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_SOC_SERIES_STM32WBAX) /* Enable the LPTIM before proceeding with configuration */ LL_LPTIM_Enable(LPTIM); LL_LPTIM_DisableIT_CC1(LPTIM); stm32_lptim_wait_ready(); LL_LPTIM_ClearFLAG_CC1(LPTIM); #else /* LPTIM interrupt set-up before enabling */ /* no Compare match Interrupt */ LL_LPTIM_DisableIT_CMPM(LPTIM); LL_LPTIM_ClearFLAG_CMPM(LPTIM); #endif /* Autoreload match Interrupt */ LL_LPTIM_EnableIT_ARRM(LPTIM); stm32_lptim_wait_ready(); LL_LPTIM_ClearFLAG_ARRM(LPTIM); /* ARROK bit validates the write operation to ARR register */ LL_LPTIM_EnableIT_ARROK(LPTIM); stm32_lptim_wait_ready(); LL_LPTIM_ClearFlag_ARROK(LPTIM); #if !defined(CONFIG_SOC_SERIES_STM32U5X) && \ !defined(CONFIG_SOC_SERIES_STM32H5X) && \ !defined(CONFIG_SOC_SERIES_STM32WBAX) /* Enable the LPTIM counter */ LL_LPTIM_Enable(LPTIM); #endif /* Set the Autoreload value once the timer is enabled */ if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* LPTIM is triggered on a LPTIM_TIMEBASE period */ lptim_set_autoreload(lptim_time_base); } else { /* nb of LPTIM counter unit per kernel tick (depends on lptim clock prescaler) */ count_per_tick = (lptim_clock_freq / CONFIG_SYS_CLOCK_TICKS_PER_SEC); /* LPTIM is triggered on a Tick period */ lptim_set_autoreload(count_per_tick - 1); } /* Start the LPTIM counter in continuous mode */ LL_LPTIM_StartCounter(LPTIM, LL_LPTIM_OPERATING_MODE_CONTINUOUS); #ifdef CONFIG_DEBUG /* stop LPTIM during DEBUG */ #if defined(LL_DBGMCU_APB1_GRP1_LPTIM1_STOP) LL_DBGMCU_APB1_GRP1_FreezePeriph(LL_DBGMCU_APB1_GRP1_LPTIM1_STOP); #elif defined(LL_DBGMCU_APB3_GRP1_LPTIM1_STOP) LL_DBGMCU_APB3_GRP1_FreezePeriph(LL_DBGMCU_APB3_GRP1_LPTIM1_STOP); #endif #endif return 0; } void stm32_clock_control_standby_exit(void) { #ifdef CONFIG_STM32_LPTIM_STDBY_TIMER if (clock_control_get_status(clk_ctrl, (clock_control_subsys_t) &lptim_clk[0]) != CLOCK_CONTROL_STATUS_ON) { sys_clock_driver_init(); } #endif /* CONFIG_STM32_LPTIM_STDBY_TIMER */ } void sys_clock_idle_exit(void) { #ifdef CONFIG_STM32_LPTIM_STDBY_TIMER if (timeout_stdby) { cycle_t missed_lptim_cnt; uint32_t stdby_timer_diff, stdby_timer_post, dticks; uint64_t stdby_timer_us; /* Get current value for standby timer and reset LPTIM counter value * to start anew. */ LL_LPTIM_ResetCounter(LPTIM); counter_get_value(stdby_timer, &stdby_timer_post); /* Calculate how much time has passed since last measurement for standby timer */ /* Check IDLE timer overflow */ if (stdby_timer_pre_stdby > stdby_timer_post) { stdby_timer_diff = (counter_get_top_value(stdby_timer) - stdby_timer_pre_stdby) + stdby_timer_post + 1; } else { stdby_timer_diff = stdby_timer_post - stdby_timer_pre_stdby; } stdby_timer_us = counter_ticks_to_us(stdby_timer, stdby_timer_diff); /* Convert standby time in LPTIM cnt */ missed_lptim_cnt = (sys_clock_hw_cycles_per_sec() * stdby_timer_us) / USEC_PER_SEC; /* Add the LPTIM cnt pre standby */ missed_lptim_cnt += lptim_cnt_pre_stdby; /* Update the cycle counter to include the cycles missed in standby */ accumulated_lptim_cnt += missed_lptim_cnt; /* Announce the passed ticks to the kernel */ dticks = (missed_lptim_cnt * CONFIG_SYS_CLOCK_TICKS_PER_SEC) / lptim_clock_freq; sys_clock_announce(dticks); /* We've already performed all needed operations */ timeout_stdby = false; } #endif /* CONFIG_STM32_LPTIM_STDBY_TIMER */ } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/stm32_lptim_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,494
```c /* * */ #include <zephyr/arch/cpu.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/renesas_cpg_mssr.h> #include <zephyr/irq.h> #define DT_DRV_COMPAT renesas_rcar_cmt #define TIMER_IRQ DT_INST_IRQN(0) #define TIMER_BASE_ADDR DT_INST_REG_ADDR(0) #define TIMER_CLOCK_FREQUENCY DT_INST_PROP(0, clock_frequency) #define CLOCK_SUBSYS DT_INST_CLOCKS_CELL(0, module) #define CYCLES_PER_SEC TIMER_CLOCK_FREQUENCY #define CYCLES_PER_TICK (CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQN(DT_INST(0, renesas_rcar_cmt)); #endif static struct rcar_cpg_clk mod_clk = { .module = DT_INST_CLOCKS_CELL(0, module), .domain = DT_INST_CLOCKS_CELL(0, domain), }; BUILD_ASSERT(CYCLES_PER_TICK > 1, "CYCLES_PER_TICK must be greater than 1"); #define CMCOR0_OFFSET 0x018 /* constant register 0 */ #define CMCNT0_OFFSET 0x014 /* counter 0 */ #define CMCSR0_OFFSET 0x010 /* control/status register 0 */ #define CMCOR1_OFFSET 0x118 /* constant register 1 */ #define CMCNT1_OFFSET 0x114 /* counter 1 */ #define CMCSR1_OFFSET 0x110 /* control/status register 1 */ #define CMCLKE 0xB00 /* CLK enable register */ #define CLKEN0 BIT(5) /* Enable Clock for channel 0 */ #define CLKEN1 BIT(6) /* Enable Clock for channel 1 */ #define CMSTR0_OFFSET 0x000 /* Timer start register 0 */ #define CMSTR1_OFFSET 0x100 /* Timer start register 1 */ #define START_BIT BIT(0) #define CSR_CLK_DIV_1 0x00000007 #define CSR_ENABLE_COUNTER_IN_DEBUG BIT(3) #define CSR_ENABLE_INTERRUPT BIT(5) #define CSR_FREE_RUN BIT(8) #define CSR_WRITE_FLAG BIT(13) #define CSR_OVERFLOW_FLAG BIT(14) #define CSR_MATCH_FLAG BIT(15) static void cmt_isr(void *arg) { ARG_UNUSED(arg); uint32_t reg_val; /* clear the interrupt */ reg_val = sys_read32(TIMER_BASE_ADDR + CMCSR0_OFFSET); reg_val &= ~CSR_MATCH_FLAG; sys_write32(reg_val, TIMER_BASE_ADDR + CMCSR0_OFFSET); /* Announce to the kernel */ sys_clock_announce(1); } uint32_t sys_clock_elapsed(void) { /* Always return 0 for tickful operation */ return 0; } uint32_t sys_clock_cycle_get_32(void) { return sys_read32(TIMER_BASE_ADDR + CMCNT1_OFFSET); } /* * Initialize both channels at same frequency, * Set the first one to generates interrupt at CYCLES_PER_TICK. * The second one is used for cycles count, the match value is set * at max uint32_t. */ static int sys_clock_driver_init(void) { const struct device *clk; uint32_t reg_val; int i, ret; clk = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(0)); if (!device_is_ready(clk)) { return -ENODEV; } ret = clock_control_on(clk, (clock_control_subsys_t)&mod_clk); if (ret < 0) { return ret; } /* Supply clock for both channels */ sys_write32(CLKEN0 | CLKEN1, TIMER_BASE_ADDR + CMCLKE); /* Stop both channels */ reg_val = sys_read32(TIMER_BASE_ADDR + CMSTR0_OFFSET); reg_val &= ~START_BIT; sys_write32(reg_val, TIMER_BASE_ADDR + CMSTR0_OFFSET); reg_val = sys_read32(TIMER_BASE_ADDR + CMSTR1_OFFSET); reg_val &= ~START_BIT; sys_write32(reg_val, TIMER_BASE_ADDR + CMSTR1_OFFSET); /* Set the timers as 32-bit, with RCLK/1 clock */ sys_write32(CSR_FREE_RUN | CSR_CLK_DIV_1 | CSR_ENABLE_INTERRUPT, TIMER_BASE_ADDR + CMCSR0_OFFSET); /* Do not enable interrupts for the second channel */ sys_write32(CSR_FREE_RUN | CSR_CLK_DIV_1, TIMER_BASE_ADDR + CMCSR1_OFFSET); /* Set the first channel match to CYCLES Per tick*/ sys_write32(CYCLES_PER_TICK, TIMER_BASE_ADDR + CMCOR0_OFFSET); /* Set the second channel match to max uint32 */ sys_write32(0xffffffff, TIMER_BASE_ADDR + CMCOR1_OFFSET); /* Reset the counter for first channel, check WRFLG first */ while (sys_read32(TIMER_BASE_ADDR + CMCSR0_OFFSET) & CSR_WRITE_FLAG) { ; } sys_write32(0, TIMER_BASE_ADDR + CMCNT0_OFFSET); for (i = 0; i < 1000; i++) { if (!sys_read32(TIMER_BASE_ADDR + CMCNT0_OFFSET)) { break; } } __ASSERT(sys_read32(TIMER_BASE_ADDR + CMCNT0_OFFSET) == 0, "Fail to clear CMCNT0 register"); /* Connect timer interrupt for channel 0*/ IRQ_CONNECT(TIMER_IRQ, 0, cmt_isr, 0, 0); irq_enable(TIMER_IRQ); /* Start the timers */ sys_write32(START_BIT, TIMER_BASE_ADDR + CMSTR0_OFFSET); sys_write32(START_BIT, TIMER_BASE_ADDR + CMSTR1_OFFSET); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/rcar_cmt_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,300
```unknown DT_CHOSEN_IDLE_TIMER := zephyr,cortex-m-idle-timer config CORTEX_M_SYSTICK bool "Cortex-M SYSTICK timer" depends on CPU_CORTEX_M_HAS_SYSTICK default y depends on DT_HAS_ARM_ARMV6M_SYSTICK_ENABLED || \ DT_HAS_ARM_ARMV7M_SYSTICK_ENABLED || \ DT_HAS_ARM_ARMV8M_SYSTICK_ENABLED || \ DT_HAS_ARM_ARMV8_1M_SYSTICK_ENABLED select TICKLESS_CAPABLE select SYSTEM_TIMER_HAS_DISABLE_SUPPORT select CORTEX_M_SYSTICK_INSTALL_ISR help This module implements a kernel device driver for the Cortex-M processor SYSTICK timer and provides the standard "system clock driver" interfaces. config CORTEX_M_SYSTICK_INSTALL_ISR bool depends on CPU_CORTEX_M_HAS_SYSTICK help This option should be selected by SysTick-based drivers so that the sys_clock_isr() function is installed. config CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER bool "Cortex-M SYSTICK timer with sys_clock_cycle_get_64() support" depends on CORTEX_M_SYSTICK default y if (SYS_CLOCK_HW_CYCLES_PER_SEC > 60000000) select TIMER_HAS_64BIT_CYCLE_COUNTER help This driver, due to its limited 24-bits hardware counter, is already tracking a separate cycle count in software. This option make that count a 64-bits value to support sys_clock_cycle_get_64(). This is cheap to do as expensive math operations (i.e. divisions) are performed only on counter interval values that always fit in 32 bits. This is set to y by default when the hardware clock is fast enough to wrap sys_clock_cycle_get_32() in about a minute or less. config CORTEX_M_SYSTICK_IDLE_TIMER bool "Use an additional timer while entering IDLE" default $(dt_chosen_enabled,$(DT_CHOSEN_IDLE_TIMER)) depends on COUNTER depends on TICKLESS_KERNEL depends on PM help There are chips e.g. STMFX family that use SysTick as a system timer, but SysTick is not clocked in low power mode. These chips usually have another timer that is not stopped, but it has lower frequency e.g. RTC, thus it can't be used as a main system timer. Use the IDLE timer for timeout (wakeup) when the system is entering IDLE state. The chosen IDLE timer node has to support setting alarm from the counter API. ```
/content/code_sandbox/drivers/timer/Kconfig.cortex_m_systick
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
568
```unknown # Keep this option as an alias to INTEL_ADSP_TIMER because # SoF is still referencing this symbol. The reason is the # symbiotic relationship between these two projects, that from # Zephyr's perspective is the framework and also the application. # Once it gets merged, we have to update SoF to use INTEL_ADSP_TIMER # option and remove it. config CAVS_TIMER bool help Temporary alias to INTEL_ADSP_TIMER config INTEL_ADSP_TIMER bool "Intel Audio DSP timer" default y depends on DT_HAS_INTEL_ADSP_TIMER_ENABLED select CAVS_TIMER select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER select SYSTEM_CLOCK_LOCK_FREE_COUNT help The DSP wall clock timer is a timer driven directly by external oscillator and is external to the CPU core(s). It is not as fast as the internal core clock, but provides a common and synchronized counter for all CPU cores (which is useful for SMP). ```
/content/code_sandbox/drivers/timer/Kconfig.cavs
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
221
```c /* */ #define DT_DRV_COMPAT microchip_xec_rtos_timer #include <zephyr/init.h> #include <zephyr/devicetree.h> #include <soc.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <cmsis_core.h> #include <zephyr/irq.h> BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP), "XEC RTOS timer doesn't support SMP"); BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 32768, "XEC RTOS timer HW frequency is fixed at 32768"); #define DEBUG_RTOS_TIMER 0 #if DEBUG_RTOS_TIMER != 0 /* Enable feature to halt timer on JTAG/SWD CPU halt */ #define TIMER_START_VAL (MCHP_RTMR_CTRL_BLK_EN | MCHP_RTMR_CTRL_START \ | MCHP_RTMR_CTRL_HW_HALT_EN) #else #define TIMER_START_VAL (MCHP_RTMR_CTRL_BLK_EN | MCHP_RTMR_CTRL_START) #endif /* * Overview: * * This driver enables the Microchip XEC 32KHz based RTOS timer as the Zephyr * system timer. It supports both legacy ("tickful") mode as well as * TICKLESS_KERNEL. The XEC RTOS timer is a down counter with a fixed * frequency of 32768 Hz. The driver is based upon the Intel local APIC * timer driver. * Configuration: * * CONFIG_MCHP_XEC_RTOS_TIMER=y * * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=<hz> must be set to 32768. * * To reduce truncation errors from accumulating due to conversion * to/from time, ticks, and HW cycles set ticks per second equal to * the frequency. With tickless kernel mode enabled the kernel will not * program a periodic timer at this fast rate. * CONFIG_SYS_CLOCK_TICKS_PER_SEC=32768 */ #define CYCLES_PER_TICK \ (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define TIMER_REGS \ ((struct rtmr_regs *)DT_INST_REG_ADDR(0)) #define ECIA_XEC_REGS \ ((struct ecia_regs *)DT_REG_ADDR(DT_NODELABEL(ecia))) #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT #define PCR_XEC_REGS \ ((struct pcr_regs *)DT_REG_ADDR(DT_NODELABEL(pcr))) /* * pcrs property at index 0 is register index into array of 32-bit PCR SLP_EN, * CLK_REQ, or RST_EN registers. Property at index 1 is the bit position. */ /*DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 0)*/ #define BTMR32_0_PCR_REG_IDX (DT_PROP_BY_IDX(DT_NODELABEL(timer4), pcrs, 0)) #define BTMR32_0_PCR_BITPOS (DT_PROP_BY_IDX(DT_NODELABEL(timer4), pcrs, 1)) #define BTMR32_0_REGS \ ((struct btmr_regs *)(DT_REG_ADDR(DT_NODELABEL(timer4)))) #endif /* Mask off bits[31:28] of 32-bit count */ #define TIMER_MAX 0x0fffffffu #define TIMER_COUNT_MASK 0x0fffffffu #define TIMER_STOPPED 0xf0000000u /* Adjust cycle count programmed into timer for HW restart latency */ #define TIMER_ADJUST_LIMIT 2 #define TIMER_ADJUST_CYCLES 1 /* max number of ticks we can load into the timer in one shot */ #define MAX_TICKS (TIMER_MAX / CYCLES_PER_TICK) #define TIMER_GIRQ DT_INST_PROP_BY_IDX(0, girqs, 0) #define TIMER_GIRQ_POS DT_INST_PROP_BY_IDX(0, girqs, 1) #define TIMER_NVIC_NO DT_INST_IRQN(0) #define TIMER_NVIC_PRIO DT_INST_IRQ(0, priority) /* * The spinlock protects all access to the RTMR registers, as well as * 'total_cycles', 'last_announcement', and 'cached_icr'. * * One important invariant that must be observed: `total_cycles` + `cached_icr` * is always an integral multiple of CYCLE_PER_TICK; this is, timer interrupts * are only ever scheduled to occur at tick boundaries. */ static struct k_spinlock lock; static uint32_t total_cycles; static uint32_t cached_icr = CYCLES_PER_TICK; /* * NOTE: using inline for speed instead of call to external SoC function. * MEC GIRQ numbers are documented as 8 to 26, check and convert to zero * based index. */ static inline void girq_src_clr(int girq, int bitpos) { if ((girq < 8) || (girq > 26)) { return; } ECIA_XEC_REGS->GIRQ[girq - 8].SRC = BIT(bitpos); } static inline void girq_src_en(int girq, int bitpos) { if ((girq < 8) || (girq > 26)) { return; } ECIA_XEC_REGS->GIRQ[girq - 8].EN_SET = BIT(bitpos); } static inline void girq_src_dis(int girq, int bitpos) { if ((girq < 8) || (girq > 26)) { return; } ECIA_XEC_REGS->GIRQ[girq - 8].EN_CLR = BIT(bitpos); } static void timer_restart(uint32_t countdown) { TIMER_REGS->CTRL = 0U; TIMER_REGS->CTRL = MCHP_RTMR_CTRL_BLK_EN; TIMER_REGS->PRLD = countdown; TIMER_REGS->CTRL = TIMER_START_VAL; } /* * Read the RTOS timer counter handling the case where the timer * has been reloaded within 1 32KHz clock of reading its count register. * The RTOS timer hardware must synchronize the write to its control register * on the AHB clock domain with the 32KHz clock domain of its internal logic. * This synchronization can take from nearly 0 time up to 1 32KHz clock as it * depends upon which 48MHz AHB clock with a 32KHz period the register write * was on. We detect the timer is in the load state by checking the read-only * count register and the START bit in the control register. If count register * is 0 and the START bit is set then the timer has been started and is in the * process of moving the preload register value into the count register. */ static inline uint32_t timer_count(void) { uint32_t ccr = TIMER_REGS->CNT; if ((ccr == 0) && (TIMER_REGS->CTRL & MCHP_RTMR_CTRL_START)) { ccr = cached_icr; } return ccr; } #ifdef CONFIG_TICKLESS_KERNEL static uint32_t last_announcement; /* last time we called sys_clock_announce() */ /* * Request a timeout n Zephyr ticks in the future from now. * Requested number of ticks in the future of n <= 1 means the kernel wants * the tick announced as soon as possible, ideally no more than one tick * in the future. * * Per comment below we don't clear RTMR pending interrupt. * RTMR counter register is read-only and is loaded from the preload * register by a 0->1 transition of the control register start bit. * Writing a new value to preload only takes effect once the count * register reaches 0. */ void sys_clock_set_timeout(int32_t n, bool idle) { ARG_UNUSED(idle); uint32_t ccr, temp; int full_ticks; /* number of complete ticks we'll wait */ uint32_t full_cycles; /* full_ticks represented as cycles */ uint32_t partial_cycles; /* number of cycles to first tick boundary */ if (idle && (n == K_TICKS_FOREVER)) { /* * We are not in a locked section. Are writes to two * global objects safe from pre-emption? */ TIMER_REGS->CTRL = 0U; /* stop timer */ cached_icr = TIMER_STOPPED; return; } if (n < 1) { full_ticks = 0; } else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) { full_ticks = MAX_TICKS - 1; } else { full_ticks = n - 1; } full_cycles = full_ticks * CYCLES_PER_TICK; k_spinlock_key_t key = k_spin_lock(&lock); ccr = timer_count(); /* turn off to clear any pending interrupt status */ TIMER_REGS->CTRL = 0u; girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS); NVIC_ClearPendingIRQ(TIMER_NVIC_NO); temp = total_cycles; temp += (cached_icr - ccr); temp &= TIMER_COUNT_MASK; total_cycles = temp; partial_cycles = CYCLES_PER_TICK - (total_cycles % CYCLES_PER_TICK); cached_icr = full_cycles + partial_cycles; /* adjust for up to one 32KHz cycle startup time */ temp = cached_icr; if (temp > TIMER_ADJUST_LIMIT) { temp -= TIMER_ADJUST_CYCLES; } timer_restart(temp); k_spin_unlock(&lock, key); } /* * Return the number of Zephyr ticks elapsed from last call to * sys_clock_announce in the ISR. The caller casts uint32_t to int32_t. * We must make sure bit[31] is 0 in the return value. */ uint32_t sys_clock_elapsed(void) { uint32_t ccr; uint32_t ticks; int32_t elapsed; k_spinlock_key_t key = k_spin_lock(&lock); ccr = timer_count(); /* It may not look efficient but the compiler does a good job */ elapsed = (int32_t)total_cycles - (int32_t)last_announcement; if (elapsed < 0) { elapsed = -1 * elapsed; } ticks = (uint32_t)elapsed; ticks += cached_icr - ccr; ticks /= CYCLES_PER_TICK; ticks &= TIMER_COUNT_MASK; k_spin_unlock(&lock, key); return ticks; } static void xec_rtos_timer_isr(const void *arg) { ARG_UNUSED(arg); uint32_t cycles; int32_t ticks; k_spinlock_key_t key = k_spin_lock(&lock); girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS); /* Restart the timer as early as possible to minimize drift... */ timer_restart(MAX_TICKS * CYCLES_PER_TICK); cycles = cached_icr; cached_icr = MAX_TICKS * CYCLES_PER_TICK; total_cycles += cycles; total_cycles &= TIMER_COUNT_MASK; /* handle wrap by using (power of 2) - 1 mask */ ticks = total_cycles - last_announcement; ticks &= TIMER_COUNT_MASK; ticks /= CYCLES_PER_TICK; last_announcement = total_cycles; k_spin_unlock(&lock, key); sys_clock_announce(ticks); } #else /* Non-tickless kernel build. */ static void xec_rtos_timer_isr(const void *arg) { ARG_UNUSED(arg); k_spinlock_key_t key = k_spin_lock(&lock); girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS); /* Restart the timer as early as possible to minimize drift... */ timer_restart(cached_icr); uint32_t temp = total_cycles + CYCLES_PER_TICK; total_cycles = temp & TIMER_COUNT_MASK; k_spin_unlock(&lock, key); sys_clock_announce(1); } uint32_t sys_clock_elapsed(void) { return 0U; } #endif /* CONFIG_TICKLESS_KERNEL */ /* * Warning RTOS timer resolution is 30.5 us. * This is called by two code paths: * 1. Kernel call to k_cycle_get_32() -> arch_k_cycle_get_32() -> here. * The kernel is casting return to (int) and using it uncasted in math * expressions with int types. Expression result is stored in an int. * 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then * z_impl_k_busy_wait calls here. This code path uses the value as uint32_t. * */ uint32_t sys_clock_cycle_get_32(void) { uint32_t ret; uint32_t ccr; k_spinlock_key_t key = k_spin_lock(&lock); ccr = timer_count(); ret = (total_cycles + (cached_icr - ccr)) & TIMER_COUNT_MASK; k_spin_unlock(&lock, key); return ret; } void sys_clock_idle_exit(void) { if (cached_icr == TIMER_STOPPED) { cached_icr = CYCLES_PER_TICK; timer_restart(cached_icr); } } void sys_clock_disable(void) { TIMER_REGS->CTRL = 0U; } #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT /* * We implement custom busy wait using a MEC1501 basic timer running on * the 48MHz clock domain. This code is here for future power management * save/restore of the timer context. */ /* * 32-bit basic timer 0 configured for 1MHz count up, auto-reload, * and no interrupt generation. */ void arch_busy_wait(uint32_t usec_to_wait) { if (usec_to_wait == 0) { return; } uint32_t start = BTMR32_0_REGS->CNT; for (;;) { uint32_t curr = BTMR32_0_REGS->CNT; if ((curr - start) >= usec_to_wait) { break; } } } #endif static int sys_clock_driver_init(void) { #ifdef CONFIG_TICKLESS_KERNEL cached_icr = MAX_TICKS; #endif TIMER_REGS->CTRL = 0u; girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS); girq_src_dis(TIMER_GIRQ, TIMER_GIRQ_POS); NVIC_ClearPendingIRQ(TIMER_NVIC_NO); IRQ_CONNECT(TIMER_NVIC_NO, TIMER_NVIC_PRIO, xec_rtos_timer_isr, 0, 0); irq_enable(TIMER_NVIC_NO); girq_src_en(TIMER_GIRQ, TIMER_GIRQ_POS); #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT uint32_t btmr_ctrl = (MCHP_BTMR_CTRL_ENABLE | MCHP_BTMR_CTRL_AUTO_RESTART | MCHP_BTMR_CTRL_COUNT_UP | (47UL << MCHP_BTMR_CTRL_PRESCALE_POS)); #if CONFIG_SOC_SERIES_MEC15XX mchp_pcr_periph_slp_ctrl(PCR_B32TMR0, 0); #else PCR_XEC_REGS->SLP_EN[BTMR32_0_PCR_REG_IDX] &= ~BIT(BTMR32_0_PCR_BITPOS); #endif BTMR32_0_REGS->CTRL = MCHP_BTMR_CTRL_SOFT_RESET; BTMR32_0_REGS->CTRL = btmr_ctrl; BTMR32_0_REGS->PRLD = UINT32_MAX; btmr_ctrl |= MCHP_BTMR_CTRL_START; timer_restart(cached_icr); /* wait for RTOS timer to load count register from preload */ while (TIMER_REGS->CNT == 0) { ; } BTMR32_0_REGS->CTRL = btmr_ctrl; #else timer_restart(cached_icr); #endif return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/mchp_xec_rtos_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,397
```c /* * */ #include <zephyr/devicetree.h> #if DT_HAS_COMPAT_STATUS_OKAY(nxp_kinetis_lptmr) #define DT_DRV_COMPAT nxp_kinetis_lptmr #else #define DT_DRV_COMPAT nxp_lptmr #endif #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/kernel.h> #include <zephyr/sys/time_units.h> #include <fsl_lptmr.h> #include <zephyr/irq.h> BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "No LPTMR instance enabled in devicetree"); /* Prescaler mapping */ #define LPTMR_PRESCALER_2 kLPTMR_Prescale_Glitch_0 #define LPTMR_PRESCALER_4 kLPTMR_Prescale_Glitch_1 #define LPTMR_PRESCALER_8 kLPTMR_Prescale_Glitch_2 #define LPTMR_PRESCALER_16 kLPTMR_Prescale_Glitch_3 #define LPTMR_PRESCALER_32 kLPTMR_Prescale_Glitch_4 #define LPTMR_PRESCALER_64 kLPTMR_Prescale_Glitch_5 #define LPTMR_PRESCALER_128 kLPTMR_Prescale_Glitch_6 #define LPTMR_PRESCALER_256 kLPTMR_Prescale_Glitch_7 #define LPTMR_PRESCALER_512 kLPTMR_Prescale_Glitch_8 #define LPTMR_PRESCALER_1024 kLPTMR_Prescale_Glitch_9 #define LPTMR_PRESCALER_2048 kLPTMR_Prescale_Glitch_10 #define LPTMR_PRESCALER_4096 kLPTMR_Prescale_Glitch_11 #define LPTMR_PRESCALER_8192 kLPTMR_Prescale_Glitch_12 #define LPTMR_PRESCALER_16384 kLPTMR_Prescale_Glitch_13 #define LPTMR_PRESCALER_32768 kLPTMR_Prescale_Glitch_14 #define LPTMR_PRESCALER_65536 kLPTMR_Prescale_Glitch_15 #define TO_LPTMR_PRESCALER(val) _DO_CONCAT(LPTMR_PRESCALER_, val) /* Prescaler clock mapping */ #define TO_LPTMR_CLK_SEL(val) _DO_CONCAT(kLPTMR_PrescalerClock_, val) /* Devicetree properties */ #define LPTMR_BASE ((LPTMR_Type *)(DT_INST_REG_ADDR(0))) #define LPTMR_CLK_SOURCE TO_LPTMR_CLK_SEL(DT_INST_PROP(0, clk_source)); #define LPTMR_PRESCALER TO_LPTMR_PRESCALER(DT_INST_PROP(0, prescaler)); #define LPTMR_BYPASS_PRESCALER DT_INST_PROP(0, prescaler) == 1 #define LPTMR_IRQN DT_INST_IRQN(0) #define LPTMR_IRQ_PRIORITY DT_INST_IRQ(0, priority) /* Timer cycles per tick */ #define CYCLES_PER_TICK ((uint32_t)((uint64_t)sys_clock_hw_cycles_per_sec() \ / (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC)) /* 32 bit cycle counter */ static volatile uint32_t cycles; void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (idle && (ticks == K_TICKS_FOREVER)) { LPTMR_DisableInterrupts(LPTMR_BASE, kLPTMR_TimerInterruptEnable); } } void sys_clock_idle_exit(void) { if (LPTMR_GetEnabledInterrupts(LPTMR_BASE) != kLPTMR_TimerInterruptEnable) { LPTMR_EnableInterrupts(LPTMR_BASE, kLPTMR_TimerInterruptEnable); } } void sys_clock_disable(void) { LPTMR_DisableInterrupts(LPTMR_BASE, kLPTMR_TimerInterruptEnable); LPTMR_StopTimer(LPTMR_BASE); } uint32_t sys_clock_elapsed(void) { return 0; } uint32_t sys_clock_cycle_get_32(void) { return LPTMR_GetCurrentTimerCount(LPTMR_BASE) + cycles; } static void mcux_lptmr_timer_isr(const void *arg) { ARG_UNUSED(arg); cycles += CYCLES_PER_TICK; sys_clock_announce(1); LPTMR_ClearStatusFlags(LPTMR_BASE, kLPTMR_TimerCompareFlag); } static int sys_clock_driver_init(void) { lptmr_config_t config; LPTMR_GetDefaultConfig(&config); config.timerMode = kLPTMR_TimerModeTimeCounter; config.enableFreeRunning = false; config.prescalerClockSource = LPTMR_CLK_SOURCE; #if LPTMR_BYPASS_PRESCALER config.bypassPrescaler = true; #else /* LPTMR_BYPASS_PRESCALER */ config.bypassPrescaler = false; config.value = LPTMR_PRESCALER; #endif /* !LPTMR_BYPASS_PRESCALER */ LPTMR_Init(LPTMR_BASE, &config); IRQ_CONNECT(LPTMR_IRQN, LPTMR_IRQ_PRIORITY, mcux_lptmr_timer_isr, NULL, 0); irq_enable(LPTMR_IRQN); LPTMR_EnableInterrupts(LPTMR_BASE, kLPTMR_TimerInterruptEnable); LPTMR_SetTimerPeriod(LPTMR_BASE, CYCLES_PER_TICK); LPTMR_StartTimer(LPTMR_BASE); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/mcux_lptmr_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,244
```unknown config MCUX_OS_TIMER bool "MCUX OS Event timer" default y depends on DT_HAS_NXP_OS_TIMER_ENABLED select TICKLESS_CAPABLE help This module implements a kernel device driver for the NXP OS event timer and provides the standard "system clock driver" interfaces. if MCUX_OS_TIMER config MCUX_OS_TIMER_PM_POWERED_OFF bool "Reinitialize the OS Timer" help OS Timer is turned off in certain low power modes. When this option is picked, OS Timer will take steps to store state and reinitialize on wakeups. endif # MCUX_OS_TIMER ```
/content/code_sandbox/drivers/timer/Kconfig.mcux_os
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
134
```c /* * */ /** * @file * @brief Initialize system clock driver * * Initializing the timer driver is done in this module to reduce code * duplication. */ #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> /* Weak-linked noop defaults for optional driver interfaces*/ void __weak sys_clock_set_timeout(int32_t ticks, bool idle) { } void __weak sys_clock_idle_exit(void) { } ```
/content/code_sandbox/drivers/timer/sys_clock_init.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
102
```unknown config ITE_IT8XXX2_TIMER bool "ITE it8xxx2 timer driver" default y depends on DT_HAS_ITE_IT8XXX2_TIMER_ENABLED select TICKLESS_CAPABLE help This module implements a kernel device driver for the ITE it8xxx2 HW timer model ```
/content/code_sandbox/drivers/timer/Kconfig.ite_it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
66
```c /* * */ #define DT_DRV_COMPAT nxp_os_timer #include <limits.h> #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/irq.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <zephyr/drivers/counter.h> #include <zephyr/pm/pm.h> #include "fsl_ostimer.h" #ifndef CONFIG_SOC_MCXN236 #include "fsl_power.h" #endif #define CYC_PER_TICK ((uint32_t)((uint64_t)sys_clock_hw_cycles_per_sec() \ / (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC)) #define MAX_CYC INT_MAX #define MAX_TICKS ((MAX_CYC - CYC_PER_TICK) / CYC_PER_TICK) #define MIN_DELAY 1000 #define TICKLESS IS_ENABLED(CONFIG_TICKLESS_KERNEL) static struct k_spinlock lock; static uint64_t last_count; static OSTIMER_Type *base; /* Total cycles of the timer compensated to include the time lost in "sleep/deep sleep" modes. * This maintains the timer count to account for the case if the OS Timer is reset in * certain deep sleep modes and the time elapsed when it is powered off. */ static uint64_t cyc_sys_compensated; #if DT_NODE_HAS_STATUS(DT_NODELABEL(standby), okay) && CONFIG_PM static const struct device *counter_dev; #endif static uint64_t mcux_lpc_ostick_get_compensated_timer_value(void) { return (OSTIMER_GetCurrentTimerValue(base) + cyc_sys_compensated); } void mcux_lpc_ostick_isr(const void *arg) { ARG_UNUSED(arg); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = mcux_lpc_ostick_get_compensated_timer_value(); uint32_t dticks = (uint32_t)((now - last_count) / CYC_PER_TICK); /* Clear interrupt flag by writing 1. */ base->OSEVENT_CTRL &= ~OSTIMER_OSEVENT_CTRL_OSTIMER_INTENA_MASK; last_count += dticks * CYC_PER_TICK; if (!TICKLESS) { uint64_t next = last_count + CYC_PER_TICK; if ((int64_t)(next - now) < MIN_DELAY) { next += CYC_PER_TICK; } OSTIMER_SetMatchValue(base, next, NULL); } k_spin_unlock(&lock, key); sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1); } #if DT_NODE_HAS_STATUS(DT_NODELABEL(standby), okay) && CONFIG_PM /* The OS Timer is disabled in certain low power modes and cannot wakeup the system * on timeout. This function will be called by the low power code to allow the * OS Timer to save off the count if needed and also start a wakeup counter * that would wakeup the system from deep power down modes. */ static uint32_t mcux_lpc_ostick_set_counter_timeout(int32_t curr_timeout) { uint32_t ret = 0; if (counter_dev) { uint32_t timeout; int32_t ticks; struct counter_top_cfg top_cfg = { 0 }; timeout = k_ticks_to_us_ceil32(curr_timeout); ticks = counter_us_to_ticks(counter_dev, timeout); ticks = CLAMP(ticks, 1, counter_get_max_top_value(counter_dev)); top_cfg.ticks = ticks; top_cfg.callback = NULL; top_cfg.user_data = NULL; top_cfg.flags = 0; if (counter_set_top_value(counter_dev, &top_cfg) != 0) { /* Setting top value failed, try setting an alarm */ struct counter_alarm_cfg alarm_cfg; alarm_cfg.ticks = ticks; alarm_cfg.callback = NULL; alarm_cfg.user_data = NULL; alarm_cfg.flags = 0; if (counter_set_channel_alarm(counter_dev, 0, &alarm_cfg) != 0) { ret = 1; goto done; } } #if CONFIG_MCUX_OS_TIMER_PM_POWERED_OFF /* Capture the current timer value for cases where it loses its state * in low power modes. */ cyc_sys_compensated += OSTIMER_GetCurrentTimerValue(base); #endif /* Counter is set to wakeup the system after the requested time */ if (counter_start(counter_dev) != 0) { ret = 1; } } else { ret = 1; } done: return ret; } /* After exit from certain low power modes where the OS Timer was disabled, the * current tick value should be updated to account for the period when the OS Timer * was disabled. Also in certain cases, the OS Timer might lose its state and needs * to be reinitialized. */ static uint32_t mcux_lpc_ostick_compensate_system_timer(void) { uint32_t ret = 0; if (counter_dev) { uint32_t slept_time_ticks; uint32_t slept_time_us; counter_stop(counter_dev); counter_get_value(counter_dev, &slept_time_ticks); if (!(counter_is_counting_up(counter_dev))) { slept_time_ticks = counter_get_top_value(counter_dev) - slept_time_ticks; } slept_time_us = counter_ticks_to_us(counter_dev, slept_time_ticks); cyc_sys_compensated += (k_us_to_ticks_floor32(slept_time_us) * CYC_PER_TICK); #if CONFIG_MCUX_OS_TIMER_PM_POWERED_OFF /* Reactivate os_timer for cases where it loses its state */ OSTIMER_Init(base); #endif /* Announce the time slept to the kernel*/ mcux_lpc_ostick_isr(NULL); } else { ret = 1; } return ret; } #endif void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Only for tickless kernel system */ return; } #if DT_NODE_HAS_STATUS(DT_NODELABEL(standby), okay) && CONFIG_PM if (idle) { /* OS Timer may not be able to wakeup in certain low power modes. * For these cases, we start a counter that can wakeup * from low power modes. */ if (pm_state_next_get(0)->state == PM_STATE_STANDBY) { if (mcux_lpc_ostick_set_counter_timeout(ticks) == 0) { /* A low power counter has been started. No need to * go further, simply return */ return; } } } #endif ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks; ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = mcux_lpc_ostick_get_compensated_timer_value(); uint32_t adj, cyc = ticks * CYC_PER_TICK; /* Round up to next tick boundary. */ adj = (uint32_t)(now - last_count) + (CYC_PER_TICK - 1); if (cyc <= MAX_CYC - adj) { cyc += adj; } else { cyc = MAX_CYC; } cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK; if ((int32_t)(cyc + last_count - now) < MIN_DELAY) { cyc += CYC_PER_TICK; } OSTIMER_SetMatchValue(base, cyc + last_count - cyc_sys_compensated, NULL); k_spin_unlock(&lock, key); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Always return 0 for tickful kernel system */ return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint32_t ret = ((uint32_t)mcux_lpc_ostick_get_compensated_timer_value() - (uint32_t)last_count) / CYC_PER_TICK; k_spin_unlock(&lock, key); return ret; } uint32_t sys_clock_cycle_get_32(void) { return (uint32_t)mcux_lpc_ostick_get_compensated_timer_value(); } uint64_t sys_clock_cycle_get_64(void) { return mcux_lpc_ostick_get_compensated_timer_value(); } void sys_clock_idle_exit(void) { #if DT_NODE_HAS_STATUS(DT_NODELABEL(standby), okay) && CONFIG_PM /* The tick should be compensated for states where the * OS Timer is disabled */ if (pm_state_next_get(0)->state == PM_STATE_STANDBY) { mcux_lpc_ostick_compensate_system_timer(); } #endif } static int sys_clock_driver_init(void) { /* Configure event timer's ISR */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), mcux_lpc_ostick_isr, NULL, 0); base = (OSTIMER_Type *)DT_INST_REG_ADDR(0); #if (DT_INST_PROP(0, wakeup_source)) EnableDeepSleepIRQ(DT_INST_IRQN(0)); #endif /* Initialize the OS timer, setting clock configuration. */ OSTIMER_Init(base); last_count = mcux_lpc_ostick_get_compensated_timer_value(); OSTIMER_SetMatchValue(base, last_count + CYC_PER_TICK, NULL); /* Enable event timer interrupt */ irq_enable(DT_INST_IRQN(0)); /* On some SoC's, OS Timer cannot wakeup from low power mode in standby modes */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(standby), okay) && CONFIG_PM counter_dev = DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE(0, deep_sleep_counter)); #endif return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/mcux_os_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,193
```c /* * */ #define DT_DRV_COMPAT xlnx_ttcps #include <zephyr/arch/cpu.h> #include <zephyr/init.h> #include <zephyr/irq.h> #include <zephyr/sys_clock.h> #include <soc.h> #include <zephyr/drivers/timer/system_timer.h> #include "xlnx_psttc_timer_priv.h" #define TIMER_INDEX CONFIG_XLNX_PSTTC_TIMER_INDEX #define TIMER_IRQ DT_INST_IRQN(0) #define TIMER_BASE_ADDR DT_INST_REG_ADDR(0) #define TIMER_CLOCK_FREQUECY DT_INST_PROP(0, clock_frequency) #define TICKS_PER_SEC CONFIG_SYS_CLOCK_TICKS_PER_SEC #define CYCLES_PER_SEC TIMER_CLOCK_FREQUECY #define CYCLES_PER_TICK (CYCLES_PER_SEC / TICKS_PER_SEC) #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQN(DT_INST(0, xlnx_ttcps)); #endif /* * CYCLES_NEXT_MIN must be large enough to ensure that the timer does not miss * interrupts. This value was conservatively set using the trial and error * method, and there is room for improvement. */ #define CYCLES_NEXT_MIN (10000) #define CYCLES_NEXT_MAX (XTTC_MAX_INTERVAL_COUNT) BUILD_ASSERT(TIMER_CLOCK_FREQUECY == CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC, "Configured system timer frequency does not match the TTC " "clock frequency in the device tree"); BUILD_ASSERT(CYCLES_PER_SEC >= TICKS_PER_SEC, "Timer clock frequency must be greater than the system tick " "frequency"); BUILD_ASSERT((CYCLES_PER_SEC % TICKS_PER_SEC) == 0, "Timer clock frequency is not divisible by the system tick " "frequency"); #ifdef CONFIG_TICKLESS_KERNEL static uint32_t last_cycles; #endif static uint32_t read_count(void) { /* Read current counter value */ return sys_read32(TIMER_BASE_ADDR + XTTCPS_COUNT_VALUE_OFFSET); } static void update_match(uint32_t cycles, uint32_t match) { uint32_t delta = match - cycles; /* Ensure that the match value meets the minimum timing requirements */ if (delta < CYCLES_NEXT_MIN) { match += CYCLES_NEXT_MIN - delta; } /* Write counter match value for interrupt generation */ sys_write32(match, TIMER_BASE_ADDR + XTTCPS_MATCH_0_OFFSET); } static void ttc_isr(const void *arg) { uint32_t cycles; uint32_t ticks; ARG_UNUSED(arg); /* Acknowledge interrupt */ sys_read32(TIMER_BASE_ADDR + XTTCPS_ISR_OFFSET); /* Read counter value */ cycles = read_count(); #ifdef CONFIG_TICKLESS_KERNEL /* Calculate the number of ticks since last announcement */ ticks = (cycles - last_cycles) / CYCLES_PER_TICK; /* Update last cycles count */ last_cycles = cycles; #else /* Update counter match value for the next interrupt */ update_match(cycles, cycles + CYCLES_PER_TICK); /* Advance tick count by 1 */ ticks = 1; #endif /* Announce to the kernel*/ sys_clock_announce(ticks); } void sys_clock_set_timeout(int32_t ticks, bool idle) { #ifdef CONFIG_TICKLESS_KERNEL uint32_t cycles; uint32_t next_cycles; /* Read counter value */ cycles = read_count(); /* Calculate timeout counter value */ if (ticks == K_TICKS_FOREVER) { next_cycles = cycles + CYCLES_NEXT_MAX; } else { next_cycles = cycles + ((uint32_t)ticks * CYCLES_PER_TICK); } /* Set match value for the next interrupt */ update_match(cycles, next_cycles); #endif } uint32_t sys_clock_elapsed(void) { #ifdef CONFIG_TICKLESS_KERNEL uint32_t cycles; /* Read counter value */ cycles = read_count(); /* Return the number of ticks since last announcement */ return (cycles - last_cycles) / CYCLES_PER_TICK; #else /* Always return 0 for tickful operation */ return 0; #endif } uint32_t sys_clock_cycle_get_32(void) { /* Return the current counter value */ return read_count(); } static int sys_clock_driver_init(void) { uint32_t reg_val; /* Stop timer */ sys_write32(XTTCPS_CNT_CNTRL_DIS_MASK, TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); #ifdef CONFIG_TICKLESS_KERNEL /* Initialise internal states */ last_cycles = 0; #endif /* Initialise timer registers */ sys_write32(XTTCPS_CNT_CNTRL_RESET_VALUE, TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); sys_write32(0, TIMER_BASE_ADDR + XTTCPS_CLK_CNTRL_OFFSET); sys_write32(0, TIMER_BASE_ADDR + XTTCPS_INTERVAL_VAL_OFFSET); sys_write32(0, TIMER_BASE_ADDR + XTTCPS_MATCH_0_OFFSET); sys_write32(0, TIMER_BASE_ADDR + XTTCPS_MATCH_1_OFFSET); sys_write32(0, TIMER_BASE_ADDR + XTTCPS_MATCH_2_OFFSET); sys_write32(0, TIMER_BASE_ADDR + XTTCPS_IER_OFFSET); sys_write32(XTTCPS_IXR_ALL_MASK, TIMER_BASE_ADDR + XTTCPS_ISR_OFFSET); /* Reset counter value */ reg_val = sys_read32(TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); reg_val |= XTTCPS_CNT_CNTRL_RST_MASK; sys_write32(reg_val, TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); /* Set match mode */ reg_val = sys_read32(TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); reg_val |= XTTCPS_CNT_CNTRL_MATCH_MASK; sys_write32(reg_val, TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); /* Set initial timeout */ reg_val = IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? CYCLES_NEXT_MAX : CYCLES_PER_TICK; sys_write32(reg_val, TIMER_BASE_ADDR + XTTCPS_MATCH_0_OFFSET); /* Connect timer interrupt */ IRQ_CONNECT(TIMER_IRQ, 0, ttc_isr, 0, 0); irq_enable(TIMER_IRQ); /* Enable timer interrupt */ reg_val = sys_read32(TIMER_BASE_ADDR + XTTCPS_IER_OFFSET); reg_val |= XTTCPS_IXR_MATCH_0_MASK; sys_write32(reg_val, TIMER_BASE_ADDR + XTTCPS_IER_OFFSET); /* Start timer */ reg_val = sys_read32(TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); reg_val &= (~XTTCPS_CNT_CNTRL_DIS_MASK); sys_write32(reg_val, TIMER_BASE_ADDR + XTTCPS_CNT_CNTRL_OFFSET); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/xlnx_psttc_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,472
```unknown config NRF_RTC_TIMER bool "nRF Real Time Counter (NRF_RTC1) Timer" depends on CLOCK_CONTROL depends on SOC_COMPATIBLE_NRF select TICKLESS_CAPABLE select SYSTEM_TIMER_HAS_DISABLE_SUPPORT depends on !$(dt_nodelabel_enabled,rtc1) help This module implements a kernel device driver for the nRF Real Time Counter NRF_RTC1 and provides the standard "system clock driver" interfaces. if NRF_RTC_TIMER config NRF_RTC_TIMER_USER_CHAN_COUNT int "Additional channels that can be used" default 2 if NRF_802154_RADIO_DRIVER && SOC_COMPATIBLE_NRF5340_CPUNET default 3 if NRF_802154_RADIO_DRIVER default 0 help Use nrf_rtc_timer.h API. Driver is not managing allocation of channels. config NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS # hidden option bool depends on ZERO_LATENCY_IRQS default y if !BT_LL_SW_SPLIT help Enable use of __disable_irq() to disable Zero Latency IRQs to prevent higher priority contexts (including ZLIs) that might preempt the handler and call nrf_rtc_timer API from destroying the internal state in nrf_rtc_timer. config NRF_RTC_TIMER_TRIGGER_OVERFLOW bool "Trigger overflow" help When enabled, a function can be used to trigger RTC overflow and effectively shift time into the future. endif # NRF_RTC_TIMER ```
/content/code_sandbox/drivers/timer/Kconfig.nrf_rtc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
314
```c /* * */ #define DT_DRV_COMPAT atmel_sam0_rtc /** * @file * @brief Atmel SAM0 series RTC-based system timer * * This system timer implementation supports both tickless and ticking modes. * In tickless mode, RTC counts continually in 32-bit mode and timeouts are * scheduled using the RTC comparator. In ticking mode, RTC is configured to * generate an interrupt every tick. */ #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys_clock.h> #include <zephyr/irq.h> #include <zephyr/sys/util.h> /* RTC registers. */ #define RTC0 ((RtcMode0 *) DT_INST_REG_ADDR(0)) #ifdef MCLK #define RTC_CLOCK_HW_CYCLES_PER_SEC SOC_ATMEL_SAM0_OSC32K_FREQ_HZ #else #define RTC_CLOCK_HW_CYCLES_PER_SEC SOC_ATMEL_SAM0_GCLK0_FREQ_HZ #endif /* Number of sys timer cycles per on tick. */ #define CYCLES_PER_TICK (RTC_CLOCK_HW_CYCLES_PER_SEC \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) /* Maximum number of ticks. */ #define MAX_TICKS (UINT32_MAX / CYCLES_PER_TICK - 2) #ifdef CONFIG_TICKLESS_KERNEL /* * Due to the nature of clock synchronization, reading from or writing to some * RTC registers takes approximately six RTC_GCLK cycles. This constant defines * a safe threshold for the comparator. */ #define TICK_THRESHOLD 7 BUILD_ASSERT(CYCLES_PER_TICK > TICK_THRESHOLD, "CYCLES_PER_TICK must be greater than TICK_THRESHOLD for " "tickless mode"); #else /* !CONFIG_TICKLESS_KERNEL */ /* * For some reason, RTC does not generate interrupts when COMP == 0, * MATCHCLR == 1 and PRESCALER == 0. So we need to check that CYCLES_PER_TICK * is more than one. */ BUILD_ASSERT(CYCLES_PER_TICK > 1, "CYCLES_PER_TICK must be greater than 1 for ticking mode"); #endif /* CONFIG_TICKLESS_KERNEL */ /* Helper macro to get the correct GCLK GEN based on configuration. */ #define GCLK_GEN(n) GCLK_EVAL(n) #define GCLK_EVAL(n) GCLK_CLKCTRL_GEN_GCLK##n /* Tick/cycle count of the last announce call. */ static volatile uint32_t rtc_last; #ifndef CONFIG_TICKLESS_KERNEL /* Current tick count. */ static volatile uint32_t rtc_counter; /* Tick value of the next timeout. */ static volatile uint32_t rtc_timeout; PINCTRL_DT_INST_DEFINE(0); static const struct pinctrl_dev_config *pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0); #endif /* CONFIG_TICKLESS_KERNEL */ /* * Waits for RTC bus synchronization. */ static inline void rtc_sync(void) { /* Wait for bus synchronization... */ #ifdef RTC_STATUS_SYNCBUSY while (RTC0->STATUS.reg & RTC_STATUS_SYNCBUSY) { } #else while (RTC0->SYNCBUSY.reg) { } #endif } /* * Reads RTC COUNT register. First a read request must be written to READREQ, * then - when bus synchronization completes - the COUNT register is read and * returned. */ static uint32_t rtc_count(void) { #ifdef RTC_READREQ_RREQ RTC0->READREQ.reg = RTC_READREQ_RREQ; #endif rtc_sync(); return RTC0->COUNT.reg; } static void rtc_reset(void) { rtc_sync(); /* Disable interrupt. */ RTC0->INTENCLR.reg = RTC_MODE0_INTENCLR_MASK; /* Clear interrupt flag. */ RTC0->INTFLAG.reg = RTC_MODE0_INTFLAG_MASK; /* Disable RTC module. */ #ifdef RTC_MODE0_CTRL_ENABLE RTC0->CTRL.reg &= ~RTC_MODE0_CTRL_ENABLE; #else RTC0->CTRLA.reg &= ~RTC_MODE0_CTRLA_ENABLE; #endif rtc_sync(); /* Initiate software reset. */ #ifdef RTC_MODE0_CTRL_SWRST RTC0->CTRL.bit.SWRST = 1; while (RTC0->CTRL.bit.SWRST) { } #else RTC0->CTRLA.bit.SWRST = 1; while (RTC0->CTRLA.bit.SWRST) { } #endif } static void rtc_isr(const void *arg) { ARG_UNUSED(arg); /* Read and clear the interrupt flag register. */ uint16_t status = RTC0->INTFLAG.reg; RTC0->INTFLAG.reg = status; #ifdef CONFIG_TICKLESS_KERNEL /* Read the current counter and announce the elapsed time in ticks. */ uint32_t count = rtc_count(); if (count != rtc_last) { uint32_t ticks = (count - rtc_last) / CYCLES_PER_TICK; sys_clock_announce(ticks); rtc_last += ticks * CYCLES_PER_TICK; } #else /* !CONFIG_TICKLESS_KERNEL */ if (status) { /* RTC just ticked one more tick... */ if (++rtc_counter == rtc_timeout) { sys_clock_announce(rtc_counter - rtc_last); rtc_last = rtc_counter; } } else { /* ISR was invoked directly from sys_clock_set_timeout. */ sys_clock_announce(0); } #endif /* CONFIG_TICKLESS_KERNEL */ } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); #ifdef CONFIG_TICKLESS_KERNEL ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = CLAMP(ticks - 1, 0, (int32_t) MAX_TICKS); /* Compute number of RTC cycles until the next timeout. */ uint32_t count = rtc_count(); uint32_t timeout = ticks * CYCLES_PER_TICK + count % CYCLES_PER_TICK; /* Round to the nearest tick boundary. */ timeout = DIV_ROUND_UP(timeout, CYCLES_PER_TICK) * CYCLES_PER_TICK; if (timeout < TICK_THRESHOLD) { timeout += CYCLES_PER_TICK; } rtc_sync(); RTC0->COMP[0].reg = count + timeout; #else /* !CONFIG_TICKLESS_KERNEL */ if (ticks == K_TICKS_FOREVER) { /* Disable comparator for K_TICKS_FOREVER and other negative * values. */ rtc_timeout = rtc_counter; return; } if (ticks < 1) { ticks = 1; } /* Avoid race condition between reading counter and ISR incrementing * it. */ unsigned int key = irq_lock(); rtc_timeout = rtc_counter + ticks; irq_unlock(key); #endif /* CONFIG_TICKLESS_KERNEL */ } uint32_t sys_clock_elapsed(void) { #ifdef CONFIG_TICKLESS_KERNEL return (rtc_count() - rtc_last) / CYCLES_PER_TICK; #else return rtc_counter - rtc_last; #endif } uint32_t sys_clock_cycle_get_32(void) { /* Just return the absolute value of RTC cycle counter. */ return rtc_count(); } static int sys_clock_driver_init(void) { int retval; #ifdef MCLK MCLK->APBAMASK.reg |= MCLK_APBAMASK_RTC; OSC32KCTRL->RTCCTRL.reg = OSC32KCTRL_RTCCTRL_RTCSEL_ULP32K; #else /* Set up bus clock and GCLK generator. */ PM->APBAMASK.reg |= PM_APBAMASK_RTC; GCLK->CLKCTRL.reg = GCLK_CLKCTRL_ID(RTC_GCLK_ID) | GCLK_CLKCTRL_CLKEN | GCLK_GEN(DT_INST_PROP(0, clock_generator)); /* Synchronize GCLK. */ while (GCLK->STATUS.bit.SYNCBUSY) { } #endif retval = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT); if (retval < 0) { return retval; } /* Reset module to hardware defaults. */ rtc_reset(); rtc_last = 0U; /* Configure RTC with 32-bit mode, configured prescaler and MATCHCLR. */ #ifdef RTC_MODE0_CTRL_MODE uint16_t ctrl = RTC_MODE0_CTRL_MODE(0) | RTC_MODE0_CTRL_PRESCALER(0); #else uint16_t ctrl = RTC_MODE0_CTRLA_MODE(0) | RTC_MODE0_CTRLA_PRESCALER(0); #endif #ifdef RTC_MODE0_CTRLA_COUNTSYNC ctrl |= RTC_MODE0_CTRLA_COUNTSYNC; #endif #ifndef CONFIG_TICKLESS_KERNEL #ifdef RTC_MODE0_CTRL_MATCHCLR ctrl |= RTC_MODE0_CTRL_MATCHCLR; #else ctrl |= RTC_MODE0_CTRLA_MATCHCLR; #endif #endif rtc_sync(); #ifdef RTC_MODE0_CTRL_MODE RTC0->CTRL.reg = ctrl; #else RTC0->CTRLA.reg = ctrl; #endif #ifdef CONFIG_TICKLESS_KERNEL /* Tickless kernel lets RTC count continually and ignores overflows. */ RTC0->INTENSET.reg = RTC_MODE0_INTENSET_CMP0; #else /* Non-tickless mode uses comparator together with MATCHCLR. */ rtc_sync(); RTC0->COMP[0].reg = CYCLES_PER_TICK; RTC0->INTENSET.reg = RTC_MODE0_INTENSET_OVF; rtc_counter = 0U; rtc_timeout = 0U; #endif /* Enable RTC module. */ rtc_sync(); #ifdef RTC_MODE0_CTRL_ENABLE RTC0->CTRL.reg |= RTC_MODE0_CTRL_ENABLE; #else RTC0->CTRLA.reg |= RTC_MODE0_CTRLA_ENABLE; #endif /* Enable RTC interrupt. */ NVIC_ClearPendingIRQ(DT_INST_IRQN(0)); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), rtc_isr, 0, 0); irq_enable(DT_INST_IRQN(0)); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/sam0_rtc_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,159
```c /* * */ #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <zephyr/drivers/interrupt_controller/dw_ace.h> #include <cavs-idc.h> #include <adsp_shim.h> #include <adsp_interrupt.h> #include <zephyr/irq.h> #define DT_DRV_COMPAT intel_adsp_timer /** * @file * @brief Intel Audio DSP Wall Clock Timer driver * * The Audio DSP on Intel SoC has a timer with one counter and two compare * registers that is external to the CPUs. This timer is accessible from * all available CPU cores and provides a synchronized timer under SMP. */ #define COMPARATOR_IDX 0 /* 0 or 1 */ #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE #define TIMER_IRQ ACE_IRQ_TO_ZEPHYR(ACE_INTL_TTS) #else #define TIMER_IRQ DSP_WCT_IRQ(COMPARATOR_IDX) #endif #define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define MAX_CYC 0xFFFFFFFFUL #define MAX_TICKS ((MAX_CYC - CYC_PER_TICK) / CYC_PER_TICK) #define MIN_DELAY (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 100000) BUILD_ASSERT(MIN_DELAY < CYC_PER_TICK); BUILD_ASSERT(COMPARATOR_IDX >= 0 && COMPARATOR_IDX <= 1); #define DSP_WCT_CS_TT(x) BIT(4 + x) static struct k_spinlock lock; static uint64_t last_count; /* Not using current syscon driver due to overhead due to MMU support */ #define SYSCON_REG_ADDR DT_REG_ADDR(DT_INST_PHANDLE(0, syscon)) #define DSPWCTCS_ADDR (SYSCON_REG_ADDR + ADSP_DSPWCTCS_OFFSET) #define DSPWCT0C_LO_ADDR (SYSCON_REG_ADDR + ADSP_DSPWCT0C_OFFSET) #define DSPWCT0C_HI_ADDR (SYSCON_REG_ADDR + ADSP_DSPWCT0C_OFFSET + 4) #define DSPWC_LO_ADDR (SYSCON_REG_ADDR + ADSP_DSPWC_OFFSET) #define DSPWC_HI_ADDR (SYSCON_REG_ADDR + ADSP_DSPWC_OFFSET + 4) #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = TIMER_IRQ; /* See tests/kernel/context */ #endif static void set_compare(uint64_t time) { /* Disarm the comparator to prevent spurious triggers */ sys_write32(sys_read32(DSPWCTCS_ADDR) & (~DSP_WCT_CS_TA(COMPARATOR_IDX)), SYSCON_REG_ADDR + ADSP_DSPWCTCS_OFFSET); sys_write32((uint32_t)time, DSPWCT0C_LO_ADDR); sys_write32((uint32_t)(time >> 32), DSPWCT0C_HI_ADDR); /* Arm the timer */ sys_write32(sys_read32(DSPWCTCS_ADDR) | (DSP_WCT_CS_TA(COMPARATOR_IDX)), DSPWCTCS_ADDR); } static uint64_t count(void) { /* The count register is 64 bits, but we're a 32 bit CPU that * can only read four bytes at a time, so a bit of care is * needed to prevent racing against a wraparound of the low * word. Wrap the low read between two reads of the high word * and make sure it didn't change. */ uint32_t hi0, hi1, lo; do { hi0 = sys_read32(DSPWC_HI_ADDR); lo = sys_read32(DSPWC_LO_ADDR); hi1 = sys_read32(DSPWC_HI_ADDR); } while (hi0 != hi1); return (((uint64_t)hi0) << 32) | lo; } static uint32_t count32(void) { uint32_t counter_lo; counter_lo = sys_read32(DSPWC_LO_ADDR); return counter_lo; } static void compare_isr(const void *arg) { ARG_UNUSED(arg); uint64_t curr; uint64_t dticks; k_spinlock_key_t key = k_spin_lock(&lock); curr = count(); dticks = (curr - last_count) / CYC_PER_TICK; /* Clear the triggered bit */ sys_write32(sys_read32(DSPWCTCS_ADDR) | DSP_WCT_CS_TT(COMPARATOR_IDX), DSPWCTCS_ADDR); last_count += dticks * CYC_PER_TICK; #ifndef CONFIG_TICKLESS_KERNEL uint64_t next = last_count + CYC_PER_TICK; if ((int64_t)(next - curr) < MIN_DELAY) { next += CYC_PER_TICK; } set_compare(next); #endif k_spin_unlock(&lock, key); sys_clock_announce((int32_t)dticks); } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); #ifdef CONFIG_TICKLESS_KERNEL ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks; ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t curr = count(); uint64_t next; uint32_t adj, cyc = ticks * CYC_PER_TICK; /* Round up to next tick boundary */ adj = (uint32_t)(curr - last_count) + (CYC_PER_TICK - 1); if (cyc <= MAX_CYC - adj) { cyc += adj; } else { cyc = MAX_CYC; } cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK; next = last_count + cyc; if (((uint32_t)next - (uint32_t)curr) < MIN_DELAY) { next += CYC_PER_TICK; } set_compare(next); k_spin_unlock(&lock, key); #endif } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t ret = (count() - last_count) / CYC_PER_TICK; k_spin_unlock(&lock, key); return (uint32_t)ret; } uint32_t sys_clock_cycle_get_32(void) { return count32(); } uint64_t sys_clock_cycle_get_64(void) { return count(); } /* Interrupt setup is partially-cpu-local state, so needs to be * repeated for each core when it starts. Note that this conforms to * the Zephyr convention of sending timer interrupts to all cpus (for * the benefit of timeslicing). */ static void irq_init(void) { int cpu = arch_curr_cpu()->id; /* These platforms have an extra layer of interrupt masking * (for per-core control) above the interrupt controller. * Drivers need to do that part. */ #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE ACE_DINT[cpu].ie[ACE_INTL_TTS] |= BIT(COMPARATOR_IDX + 1); sys_write32(sys_read32(DSPWCTCS_ADDR) | ADSP_SHIM_DSPWCTCS_TTIE(COMPARATOR_IDX), DSPWCTCS_ADDR); #else CAVS_INTCTRL[cpu].l2.clear = CAVS_L2_DWCT0; #endif irq_enable(TIMER_IRQ); } void smp_timer_init(void) { } static int sys_clock_driver_init(void) { uint64_t curr = count(); IRQ_CONNECT(TIMER_IRQ, 0, compare_isr, 0, 0); set_compare(curr + CYC_PER_TICK); last_count = curr; irq_init(); return 0; } /* Runs on core 0 only */ void intel_adsp_clock_soft_off_exit(void) { (void)sys_clock_driver_init(); } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/intel_adsp_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,726
```c /* */ #define DT_DRV_COMPAT ite_it8xxx2_timer #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/dt-bindings/interrupt-controller/ite-intc.h> #include <soc.h> #include <zephyr/spinlock.h> #include <zephyr/sys_clock.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(timer, LOG_LEVEL_ERR); #define COUNT_1US (EC_FREQ / USEC_PER_SEC - 1) BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 32768, "ITE RTOS timer HW frequency is fixed at 32768Hz"); /* Event timer configurations */ #define EVENT_TIMER EXT_TIMER_3 #define EVENT_TIMER_IRQ DT_INST_IRQ_BY_IDX(0, 0, irq) #define EVENT_TIMER_FLAG DT_INST_IRQ_BY_IDX(0, 0, flags) /* Event timer max count is 512 sec (base on clock source 32768Hz) */ #define EVENT_TIMER_MAX_CNT 0x00FFFFFFUL /* Busy wait low timer configurations */ #define BUSY_WAIT_L_TIMER EXT_TIMER_5 #define BUSY_WAIT_L_TIMER_IRQ DT_INST_IRQ_BY_IDX(0, 2, irq) #define BUSY_WAIT_L_TIMER_FLAG DT_INST_IRQ_BY_IDX(0, 2, flags) /* Busy wait high timer configurations */ #define BUSY_WAIT_H_TIMER EXT_TIMER_6 #define BUSY_WAIT_H_TIMER_IRQ DT_INST_IRQ_BY_IDX(0, 3, irq) #define BUSY_WAIT_H_TIMER_FLAG DT_INST_IRQ_BY_IDX(0, 3, flags) /* Busy wait high timer max count is 71.58min (base on clock source 1MHz) */ #define BUSY_WAIT_TIMER_H_MAX_CNT 0xFFFFFFFFUL #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQ_BY_IDX(DT_NODELABEL(timer), 5, irq); #endif #ifdef CONFIG_SOC_IT8XXX2_PLL_FLASH_48M /* * One shot timer configurations * * NOTE: Timer1/2 register address isn't regular like timer3/4/5/6/7/8, and * timer1 is used for printing watchdog warning message. So now we use * timer2 only one shot to wake up chip and change pll. */ #define WDT_BASE DT_REG_ADDR(DT_NODELABEL(twd0)) #define WDT_REG (struct wdt_it8xxx2_regs *)(WDT_BASE) #define ONE_SHOT_TIMER_IRQ DT_IRQ_BY_IDX(DT_NODELABEL(twd0), 1, irq) #define ONE_SHOT_TIMER_FLAG DT_IRQ_BY_IDX(DT_NODELABEL(twd0), 1, flags) #endif #define MS_TO_COUNT(hz, ms) ((hz) * (ms) / 1000) /* * One system (kernel) tick is as how much HW timer counts * * NOTE: Event and free run timer individually select the same clock source * frequency, so they can use the same HW_CNT_PER_SYS_TICK to transform * unit between HW count and system tick. If clock source frequency is * different, then we should define another to transform. */ #define HW_CNT_PER_SYS_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) /* Event timer max count is as how much system (kernel) tick */ #define EVEN_TIMER_MAX_CNT_SYS_TICK (EVENT_TIMER_MAX_CNT \ / HW_CNT_PER_SYS_TICK) static struct k_spinlock lock; /* Last HW count that we called sys_clock_announce() */ static volatile uint32_t last_announced_hw_cnt; /* Last system (kernel) elapse and ticks */ static volatile uint32_t last_elapsed; static volatile uint32_t last_ticks; enum ext_timer_raw_cnt { EXT_NOT_RAW_CNT = 0, EXT_RAW_CNT, }; enum ext_timer_init { EXT_NOT_FIRST_TIME_ENABLE = 0, EXT_FIRST_TIME_ENABLE, }; enum ext_timer_int { EXT_WITHOUT_TIMER_INT = 0, EXT_WITH_TIMER_INT, }; enum ext_timer_start { EXT_NOT_START_TIMER = 0, EXT_START_TIMER, }; #ifdef CONFIG_SOC_IT8XXX2_PLL_FLASH_48M static void timer_5ms_one_shot_isr(const void *unused) { ARG_UNUSED(unused); /* * We are here because we have completed changing PLL sequence, * so disabled one shot timer interrupt. */ irq_disable(ONE_SHOT_TIMER_IRQ); } /* * This timer is used to wake up chip from sleep mode to complete * changing PLL sequence. */ void timer_5ms_one_shot(void) { struct wdt_it8xxx2_regs *const timer2_reg = WDT_REG; uint32_t hw_cnt; /* Initialize interrupt handler of one shot timer */ IRQ_CONNECT(ONE_SHOT_TIMER_IRQ, 0, timer_5ms_one_shot_isr, NULL, ONE_SHOT_TIMER_FLAG); /* Set rising edge triggered of one shot timer */ ite_intc_irq_polarity_set(ONE_SHOT_TIMER_IRQ, ONE_SHOT_TIMER_FLAG); /* Clear interrupt status of one shot timer */ ite_intc_isr_clear(ONE_SHOT_TIMER_IRQ); /* Set clock source of one shot timer */ timer2_reg->ET2PSR = EXT_PSR_32P768K; /* * Set count of one shot timer, * and after write ET2CNTLLR timer will start */ hw_cnt = MS_TO_COUNT(32768, 5/*ms*/); timer2_reg->ET2CNTLH2R = (uint8_t)((hw_cnt >> 16) & 0xff); timer2_reg->ET2CNTLHR = (uint8_t)((hw_cnt >> 8) & 0xff); timer2_reg->ET2CNTLLR = (uint8_t)(hw_cnt & 0xff); irq_enable(ONE_SHOT_TIMER_IRQ); } #endif /* CONFIG_SOC_IT8XXX2_PLL_FLASH_48M */ #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT void arch_busy_wait(uint32_t usec_to_wait) { if (!usec_to_wait) { return; } /* Decrease 1us here to calibrate our access registers latency */ usec_to_wait--; /* * We want to set the bit(1) re-start busy wait timer as soon * as possible, so we directly write 0xb instead of | bit(1). */ IT8XXX2_EXT_CTRLX(BUSY_WAIT_L_TIMER) = IT8XXX2_EXT_ETX_COMB_RST_EN; for (;;) { uint32_t curr = IT8XXX2_EXT_CNTOX(BUSY_WAIT_H_TIMER); if (curr >= usec_to_wait) { break; } } } #endif static void evt_timer_enable(void) { /* Enable and re-start event timer */ IT8XXX2_EXT_CTRLX(EVENT_TIMER) |= (IT8XXX2_EXT_ETXEN | IT8XXX2_EXT_ETXRST); } static void evt_timer_isr(const void *unused) { ARG_UNUSED(unused); /* Disable event timer */ IT8XXX2_EXT_CTRLX(EVENT_TIMER) &= ~IT8XXX2_EXT_ETXEN; /* W/C event timer interrupt status */ ite_intc_isr_clear(EVENT_TIMER_IRQ); if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* * Get free run observer count from last time announced and * transform unit to system tick */ uint32_t dticks = (~(IT8XXX2_EXT_CNTOX(FREE_RUN_TIMER)) - last_announced_hw_cnt) / HW_CNT_PER_SYS_TICK; last_announced_hw_cnt += (dticks * HW_CNT_PER_SYS_TICK); last_ticks += dticks; last_elapsed = 0; sys_clock_announce(dticks); } else { /* enable event timer */ evt_timer_enable(); /* Informs kernel that one system tick has elapsed */ sys_clock_announce(1); } } static void free_run_timer_overflow_isr(const void *unused) { ARG_UNUSED(unused); /* Read to clear terminal count flag */ __unused uint8_t rc_tc = IT8XXX2_EXT_CTRLX(FREE_RUN_TIMER); /* * TODO: to increment 32-bit "top half" here for software 64-bit * timer emulation. */ } void sys_clock_set_timeout(int32_t ticks, bool idle) { uint32_t hw_cnt; ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Always return for non-tickless kernel system */ return; } /* Critical section */ k_spinlock_key_t key = k_spin_lock(&lock); /* Disable event timer */ IT8XXX2_EXT_CTRLX(EVENT_TIMER) &= ~IT8XXX2_EXT_ETXEN; if (ticks == K_TICKS_FOREVER) { /* * If kernel doesn't have a timeout: * 1.CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE = y (no future timer interrupts * are expected), kernel pass K_TICKS_FOREVER (0xFFFF FFFF FFFF FFFF), * we handle this case in here. * 2.CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE = n (schedule timeout as far * into the future as possible), kernel pass INT_MAX (0x7FFF FFFF), * we handle it in later else {}. */ k_spin_unlock(&lock, key); return; } else { uint32_t next_cycs; uint32_t now; uint32_t dcycles; /* * If ticks <= 1 means the kernel wants the tick announced * as soon as possible, ideally no more than one system tick * in the future. So set event timer count to 1 HW tick. */ ticks = CLAMP(ticks, 1, (int32_t)EVEN_TIMER_MAX_CNT_SYS_TICK); next_cycs = (last_ticks + last_elapsed + ticks) * HW_CNT_PER_SYS_TICK; now = ~(IT8XXX2_EXT_CNTOX(FREE_RUN_TIMER)); if (unlikely(next_cycs <= now)) { hw_cnt = 1; } else { dcycles = next_cycs - now; hw_cnt = MIN(dcycles, EVENT_TIMER_MAX_CNT); } } /* Set event timer 24-bit count */ IT8XXX2_EXT_CNTX(EVENT_TIMER) = hw_cnt; /* W/C event timer interrupt status */ ite_intc_isr_clear(EVENT_TIMER_IRQ); /* enable event timer */ evt_timer_enable(); k_spin_unlock(&lock, key); LOG_DBG("timeout is 0x%x, set hw count 0x%x", ticks, hw_cnt); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Always return 0 for non-tickless kernel system */ return 0; } /* Critical section */ k_spinlock_key_t key = k_spin_lock(&lock); /* * Get free run observer count from last time announced and transform * unit to system tick */ uint32_t dticks = (~(IT8XXX2_EXT_CNTOX(FREE_RUN_TIMER)) - last_announced_hw_cnt) / HW_CNT_PER_SYS_TICK; last_elapsed = dticks; k_spin_unlock(&lock, key); return dticks; } uint32_t sys_clock_cycle_get_32(void) { /* * Get free run observer count * * NOTE: Timer is counting down from 0xffffffff. In not combined * mode, the observer count value is the same as count, so after * NOT count operation we can get counting up value; In * combined mode, the observer count value is the same as NOT * count operation. */ uint32_t dticks = ~(IT8XXX2_EXT_CNTOX(FREE_RUN_TIMER)); return dticks; } static int timer_init(enum ext_timer_idx ext_timer, enum ext_clk_src_sel clock_source_sel, enum ext_timer_raw_cnt raw, uint32_t ms, enum ext_timer_init first_time_enable, uint32_t irq_num, uint32_t irq_flag, enum ext_timer_int with_int, enum ext_timer_start start) { uint32_t hw_cnt; if (raw == EXT_RAW_CNT) { hw_cnt = ms; } else { if (clock_source_sel == EXT_PSR_32P768K) { hw_cnt = MS_TO_COUNT(32768, ms); } else if (clock_source_sel == EXT_PSR_1P024K) { hw_cnt = MS_TO_COUNT(1024, ms); } else if (clock_source_sel == EXT_PSR_32) { hw_cnt = MS_TO_COUNT(32, ms); } else if (clock_source_sel == EXT_PSR_EC_CLK) { hw_cnt = MS_TO_COUNT(EC_FREQ, ms); } else { LOG_ERR("Timer %d clock source error !", ext_timer); return -1; } } if (hw_cnt == 0) { LOG_ERR("Timer %d count shouldn't be 0 !", ext_timer); return -1; } if (first_time_enable == EXT_FIRST_TIME_ENABLE) { /* Enable and re-start external timer x */ IT8XXX2_EXT_CTRLX(ext_timer) |= (IT8XXX2_EXT_ETXEN | IT8XXX2_EXT_ETXRST); /* Disable external timer x */ IT8XXX2_EXT_CTRLX(ext_timer) &= ~IT8XXX2_EXT_ETXEN; } /* Set rising edge triggered of external timer x */ ite_intc_irq_polarity_set(irq_num, irq_flag); /* Clear interrupt status of external timer x */ ite_intc_isr_clear(irq_num); /* Set clock source of external timer x */ IT8XXX2_EXT_PSRX(ext_timer) = clock_source_sel; /* Set count of external timer x */ IT8XXX2_EXT_CNTX(ext_timer) = hw_cnt; /* Disable external timer x */ IT8XXX2_EXT_CTRLX(ext_timer) &= ~IT8XXX2_EXT_ETXEN; if (start == EXT_START_TIMER) /* Enable and re-start external timer x */ IT8XXX2_EXT_CTRLX(ext_timer) |= (IT8XXX2_EXT_ETXEN | IT8XXX2_EXT_ETXRST); if (with_int == EXT_WITH_TIMER_INT) { irq_enable(irq_num); } else { irq_disable(irq_num); } return 0; } static int sys_clock_driver_init(void) { int ret; /* Enable 32-bit free run timer overflow interrupt */ IRQ_CONNECT(FREE_RUN_TIMER_IRQ, 0, free_run_timer_overflow_isr, NULL, FREE_RUN_TIMER_FLAG); /* Set 32-bit timer4 for free run*/ ret = timer_init(FREE_RUN_TIMER, EXT_PSR_32P768K, EXT_RAW_CNT, FREE_RUN_TIMER_MAX_CNT, EXT_FIRST_TIME_ENABLE, FREE_RUN_TIMER_IRQ, FREE_RUN_TIMER_FLAG, EXT_WITH_TIMER_INT, EXT_START_TIMER); if (ret < 0) { LOG_ERR("Init free run timer failed"); return ret; } /* Set 24-bit timer3 for timeout event */ IRQ_CONNECT(EVENT_TIMER_IRQ, 0, evt_timer_isr, NULL, EVENT_TIMER_FLAG); if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { ret = timer_init(EVENT_TIMER, EXT_PSR_32P768K, EXT_RAW_CNT, EVENT_TIMER_MAX_CNT, EXT_FIRST_TIME_ENABLE, EVENT_TIMER_IRQ, EVENT_TIMER_FLAG, EXT_WITH_TIMER_INT, EXT_NOT_START_TIMER); } else { /* Start a event timer in one system tick */ ret = timer_init(EVENT_TIMER, EXT_PSR_32P768K, EXT_RAW_CNT, MAX((1 * HW_CNT_PER_SYS_TICK), 1), EXT_FIRST_TIME_ENABLE, EVENT_TIMER_IRQ, EVENT_TIMER_FLAG, EXT_WITH_TIMER_INT, EXT_START_TIMER); } if (ret < 0) { LOG_ERR("Init event timer failed"); return ret; } if (IS_ENABLED(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)) { /* Set timer5 and timer6 combinational mode for busy wait */ IT8XXX2_EXT_CTRLX(BUSY_WAIT_L_TIMER) |= IT8XXX2_EXT_ETXCOMB; /* Set 32-bit timer6 to count-- every 1us */ ret = timer_init(BUSY_WAIT_H_TIMER, EXT_PSR_EC_CLK, EXT_RAW_CNT, BUSY_WAIT_TIMER_H_MAX_CNT, EXT_FIRST_TIME_ENABLE, BUSY_WAIT_H_TIMER_IRQ, BUSY_WAIT_H_TIMER_FLAG, EXT_WITHOUT_TIMER_INT, EXT_START_TIMER); if (ret < 0) { LOG_ERR("Init busy wait high timer failed"); return ret; } /* * Set 24-bit timer5 to overflow every 1us * NOTE: When the timer5 count down to overflow in combinational * mode, timer6 counter will automatically decrease one count * and timer5 will automatically re-start counting down * from COUNT_1US. Timer5 clock source is EC_FREQ, so the * time period from COUNT_1US to overflow is * (1 / EC_FREQ) * (EC_FREQ / USEC_PER_SEC) = 1us. */ ret = timer_init(BUSY_WAIT_L_TIMER, EXT_PSR_EC_CLK, EXT_RAW_CNT, COUNT_1US, EXT_FIRST_TIME_ENABLE, BUSY_WAIT_L_TIMER_IRQ, BUSY_WAIT_L_TIMER_FLAG, EXT_WITHOUT_TIMER_INT, EXT_START_TIMER); if (ret < 0) { LOG_ERR("Init busy wait low timer failed"); return ret; } } return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/ite_it8xxx2_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,928
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/irq.h> #if defined(CONFIG_CLOCK_CONTROL_NRF) #include <zephyr/drivers/clock_control/nrf_clock_control.h> #endif #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/drivers/timer/nrf_grtc_timer.h> #include <nrfx_grtc.h> #include <zephyr/sys/math_extras.h> #define GRTC_NODE DT_NODELABEL(grtc) /* Ensure that GRTC properties in devicetree are defined correctly. */ #if !DT_NODE_HAS_PROP(GRTC_NODE, owned_channels) #error GRTC owned-channels DT property is not defined #endif #define OWNED_CHANNELS_MASK NRFX_CONFIG_MASK_DT(GRTC_NODE, owned_channels) #define CHILD_OWNED_CHANNELS_MASK NRFX_CONFIG_MASK_DT(GRTC_NODE, child_owned_channels) #if ((OWNED_CHANNELS_MASK | CHILD_OWNED_CHANNELS_MASK) != OWNED_CHANNELS_MASK) #error GRTC child-owned-channels DT property must be a subset of owned-channels #endif #define CHAN_COUNT NRFX_GRTC_CONFIG_NUM_OF_CC_CHANNELS #define EXT_CHAN_COUNT (CHAN_COUNT - 1) #ifndef GRTC_SYSCOUNTERL_VALUE_Msk #define GRTC_SYSCOUNTERL_VALUE_Msk GRTC_SYSCOUNTER_SYSCOUNTERL_VALUE_Msk #endif #ifndef GRTC_SYSCOUNTERH_VALUE_Msk #define GRTC_SYSCOUNTERH_VALUE_Msk GRTC_SYSCOUNTER_SYSCOUNTERH_VALUE_Msk #endif #define MAX_CC_LATCH_WAIT_TIME_US 77 #define CYC_PER_TICK \ ((uint64_t)sys_clock_hw_cycles_per_sec() / (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define COUNTER_SPAN (GRTC_SYSCOUNTERL_VALUE_Msk | ((uint64_t)GRTC_SYSCOUNTERH_VALUE_Msk << 32)) #define MAX_TICKS \ (((COUNTER_SPAN / CYC_PER_TICK) > INT_MAX) ? INT_MAX : (COUNTER_SPAN / CYC_PER_TICK)) #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK) #define LFCLK_FREQUENCY_HZ 32768 #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQN(GRTC_NODE); #endif static void sys_clock_timeout_handler(int32_t id, uint64_t cc_val, void *p_context); static struct k_spinlock lock; static uint64_t last_count; /* Time (SYSCOUNTER value) @last sys_clock_announce() */ static atomic_t int_mask; static uint8_t ext_channels_allocated; static nrfx_grtc_channel_t system_clock_channel_data = { .handler = sys_clock_timeout_handler, .p_context = NULL, .channel = (uint8_t)-1, }; #define IS_CHANNEL_ALLOWED_ASSERT(chan) \ __ASSERT_NO_MSG((NRFX_GRTC_CONFIG_ALLOWED_CC_CHANNELS_MASK & (1UL << (chan))) && \ ((chan) != system_clock_channel_data.channel)) static inline uint64_t counter_sub(uint64_t a, uint64_t b) { return (a - b); } static inline uint64_t counter(void) { uint64_t now; nrfx_grtc_syscounter_get(&now); return now; } static inline int get_comparator(uint32_t chan, uint64_t *cc) { nrfx_err_t result; result = nrfx_grtc_syscounter_cc_value_read(chan, cc); if (result != NRFX_SUCCESS) { if (result != NRFX_ERROR_INVALID_PARAM) { return -EAGAIN; } return -EPERM; } return 0; } /* * Program a new callback <value> microseconds in the future */ static void system_timeout_set_relative(uint64_t value) { if (value <= NRF_GRTC_SYSCOUNTER_CCADD_MASK) { nrfx_grtc_syscounter_cc_relative_set(&system_clock_channel_data, value, true, NRFX_GRTC_CC_RELATIVE_SYSCOUNTER); } else { nrfx_grtc_syscounter_cc_absolute_set(&system_clock_channel_data, value + counter(), true); } } /* * Program a new callback in the absolute time given by <value> */ static void system_timeout_set_abs(uint64_t value) { nrfx_grtc_syscounter_cc_absolute_set(&system_clock_channel_data, value, true); } static bool compare_int_lock(int32_t chan) { atomic_val_t prev = atomic_and(&int_mask, ~BIT(chan)); nrfx_grtc_syscounter_cc_int_disable(chan); return prev & BIT(chan); } static void compare_int_unlock(int32_t chan, bool key) { if (key) { atomic_or(&int_mask, BIT(chan)); nrfx_grtc_syscounter_cc_int_enable(chan); } } static void sys_clock_timeout_handler(int32_t id, uint64_t cc_val, void *p_context) { ARG_UNUSED(id); ARG_UNUSED(p_context); uint64_t dticks; uint64_t now = counter(); if (unlikely(now < cc_val)) { return; } dticks = counter_sub(cc_val, last_count) / CYC_PER_TICK; last_count += dticks * CYC_PER_TICK; if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* protection is not needed because we are in the GRTC interrupt * so it won't get preempted by the interrupt. */ system_timeout_set_abs(last_count + CYC_PER_TICK); } sys_clock_announce((int32_t)dticks); } int32_t z_nrf_grtc_timer_chan_alloc(void) { uint8_t chan; nrfx_err_t err_code; /* Prevent allocating all available channels - one must be left for system purposes. */ if (ext_channels_allocated >= EXT_CHAN_COUNT) { return -ENOMEM; } err_code = nrfx_grtc_channel_alloc(&chan); if (err_code != NRFX_SUCCESS) { return -ENOMEM; } ext_channels_allocated++; return (int32_t)chan; } void z_nrf_grtc_timer_chan_free(int32_t chan) { IS_CHANNEL_ALLOWED_ASSERT(chan); nrfx_err_t err_code = nrfx_grtc_channel_free(chan); if (err_code == NRFX_SUCCESS) { ext_channels_allocated--; } } bool z_nrf_grtc_timer_compare_evt_check(int32_t chan) { IS_CHANNEL_ALLOWED_ASSERT(chan); uint32_t event_address = nrfx_grtc_event_compare_address_get(chan); return *(volatile uint32_t *)event_address != 0; } uint32_t z_nrf_grtc_timer_compare_evt_address_get(int32_t chan) { IS_CHANNEL_ALLOWED_ASSERT(chan); return nrfx_grtc_event_compare_address_get(chan); } uint32_t z_nrf_grtc_timer_capture_task_address_get(int32_t chan) { IS_CHANNEL_ALLOWED_ASSERT(chan); return nrfx_grtc_capture_task_address_get(chan); } uint64_t z_nrf_grtc_timer_read(void) { return counter(); } bool z_nrf_grtc_timer_compare_int_lock(int32_t chan) { IS_CHANNEL_ALLOWED_ASSERT(chan); return compare_int_lock(chan); } void z_nrf_grtc_timer_compare_int_unlock(int32_t chan, bool key) { IS_CHANNEL_ALLOWED_ASSERT(chan); compare_int_unlock(chan, key); } int z_nrf_grtc_timer_compare_read(int32_t chan, uint64_t *val) { IS_CHANNEL_ALLOWED_ASSERT(chan); return get_comparator(chan, val); } static int compare_set_nolocks(int32_t chan, uint64_t target_time, z_nrf_grtc_timer_compare_handler_t handler, void *user_data) { nrfx_err_t result; __ASSERT_NO_MSG(target_time < COUNTER_SPAN); nrfx_grtc_channel_t user_channel_data = { .handler = handler, .p_context = user_data, .channel = chan, }; result = nrfx_grtc_syscounter_cc_absolute_set(&user_channel_data, target_time, true); if (result != NRFX_SUCCESS) { return -EPERM; } return 0; } static int compare_set(int32_t chan, uint64_t target_time, z_nrf_grtc_timer_compare_handler_t handler, void *user_data) { bool key = compare_int_lock(chan); int ret = compare_set_nolocks(chan, target_time, handler, user_data); compare_int_unlock(chan, key); return ret; } int z_nrf_grtc_timer_set(int32_t chan, uint64_t target_time, z_nrf_grtc_timer_compare_handler_t handler, void *user_data) { IS_CHANNEL_ALLOWED_ASSERT(chan); return compare_set(chan, target_time, (nrfx_grtc_cc_handler_t)handler, user_data); } void z_nrf_grtc_timer_abort(int32_t chan) { IS_CHANNEL_ALLOWED_ASSERT(chan); bool key = compare_int_lock(chan); (void)nrfx_grtc_syscounter_cc_disable(chan); compare_int_unlock(chan, key); } uint64_t z_nrf_grtc_timer_get_ticks(k_timeout_t t) { uint64_t curr_time; int64_t curr_tick; int64_t result; int64_t abs_ticks; int64_t grtc_ticks; curr_time = counter(); curr_tick = sys_clock_tick_get(); grtc_ticks = t.ticks * CYC_PER_TICK; abs_ticks = Z_TICK_ABS(t.ticks); if (abs_ticks < 0) { /* relative timeout */ return (grtc_ticks > (int64_t)COUNTER_SPAN) ? -EINVAL : (curr_time + grtc_ticks); } /* absolute timeout */ result = (abs_ticks - curr_tick) * CYC_PER_TICK; if (result > (int64_t)COUNTER_SPAN) { return -EINVAL; } return curr_time + result; } int z_nrf_grtc_timer_capture_prepare(int32_t chan) { nrfx_grtc_channel_t user_channel_data = { .handler = NULL, .p_context = NULL, .channel = chan, }; nrfx_err_t result; IS_CHANNEL_ALLOWED_ASSERT(chan); /* Set the CC value to mark channel as not triggered and also to enable it * (makes CCEN=1). COUNTER_SPAN is used so as not to fire an event unnecessarily * - it can be assumed that such a large value will never be reached. */ result = nrfx_grtc_syscounter_cc_absolute_set(&user_channel_data, COUNTER_SPAN, false); if (result != NRFX_SUCCESS) { return -EPERM; } return 0; } int z_nrf_grtc_timer_capture_read(int32_t chan, uint64_t *captured_time) { /* TODO: The implementation should probably go to nrfx_grtc and this * should be just a wrapper for some nrfx_grtc_syscounter_capture_read. */ uint64_t capt_time; nrfx_err_t result; IS_CHANNEL_ALLOWED_ASSERT(chan); /* TODO: Use `nrfy_grtc_sys_counter_enable_check` when available (NRFX-2480) */ if (NRF_GRTC->CC[chan].CCEN == GRTC_CC_CCEN_ACTIVE_Enable) { /* If the channel is enabled (.CCEN), it means that there was no capture * triggering event. */ return -EBUSY; } result = nrfx_grtc_syscounter_cc_value_read(chan, &capt_time); if (result != NRFX_SUCCESS) { return -EPERM; } __ASSERT_NO_MSG(capt_time < COUNTER_SPAN); *captured_time = capt_time; return 0; } #if defined(CONFIG_NRF_GRTC_SLEEP_ALLOWED) int z_nrf_grtc_wakeup_prepare(uint64_t wake_time_us) { nrfx_err_t err_code; static uint8_t systemoff_channel; uint64_t now = counter(); nrfx_grtc_sleep_config_t sleep_cfg; /* Minimum time that ensures valid execution of system-off procedure. */ uint32_t minimum_latency_us; uint32_t chan; int ret; nrfx_grtc_sleep_configuration_get(&sleep_cfg); minimum_latency_us = (sleep_cfg.waketime + sleep_cfg.timeout) * USEC_PER_SEC / LFCLK_FREQUENCY_HZ + CONFIG_NRF_GRTC_SYSCOUNTER_SLEEP_MINIMUM_LATENCY; sleep_cfg.auto_mode = false; nrfx_grtc_sleep_configure(&sleep_cfg); if (minimum_latency_us > wake_time_us) { return -EINVAL; } k_spinlock_key_t key = k_spin_lock(&lock); err_code = nrfx_grtc_channel_alloc(&systemoff_channel); if (err_code != NRFX_SUCCESS) { k_spin_unlock(&lock, key); return -ENOMEM; } (void)nrfx_grtc_syscounter_cc_int_disable(systemoff_channel); ret = compare_set(systemoff_channel, now + wake_time_us * sys_clock_hw_cycles_per_sec() / USEC_PER_SEC, NULL, NULL); if (ret < 0) { k_spin_unlock(&lock, key); return ret; } for (uint32_t grtc_chan_mask = NRFX_GRTC_CONFIG_ALLOWED_CC_CHANNELS_MASK; grtc_chan_mask > 0; grtc_chan_mask &= ~BIT(chan)) { /* Clear all GRTC channels except the systemoff_channel. */ chan = u32_count_trailing_zeros(grtc_chan_mask); if (chan != systemoff_channel) { nrfx_grtc_syscounter_cc_disable(chan); } } /* Make sure that wake_time_us was not triggered yet. */ if (nrfx_grtc_syscounter_compare_event_check(systemoff_channel)) { k_spin_unlock(&lock, key); return -EINVAL; } /* This mechanism ensures that stored CC value is latched. */ uint32_t wait_time = nrfy_grtc_timeout_get(NRF_GRTC) * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 32768 + MAX_CC_LATCH_WAIT_TIME_US; k_busy_wait(wait_time); #if NRF_GRTC_HAS_CLKSEL nrfx_grtc_clock_source_set(NRF_GRTC_CLKSEL_LFXO); #endif k_spin_unlock(&lock, key); return 0; } #endif /* CONFIG_NRF_GRTC_SLEEP_ALLOWED */ uint32_t sys_clock_cycle_get_32(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint32_t ret = (uint32_t)counter(); k_spin_unlock(&lock, key); return ret; } uint64_t sys_clock_cycle_get_64(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint64_t ret = counter(); k_spin_unlock(&lock, key); return ret; } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } return (uint32_t)(counter_sub(counter(), last_count) / CYC_PER_TICK); } static int sys_clock_driver_init(void) { nrfx_err_t err_code; #if defined(CONFIG_NRF_GRTC_TIMER_CLOCK_MANAGEMENT) && \ (defined(NRF_GRTC_HAS_CLKSEL) && (NRF_GRTC_HAS_CLKSEL == 1)) /* Use System LFCLK as the low-frequency clock source. */ nrfx_grtc_clock_source_set(NRF_GRTC_CLKSEL_LFCLK); #endif IRQ_CONNECT(DT_IRQN(GRTC_NODE), DT_IRQ(GRTC_NODE, priority), nrfx_isr, nrfx_grtc_irq_handler, 0); err_code = nrfx_grtc_init(0); if (err_code != NRFX_SUCCESS) { return -EPERM; } #if defined(CONFIG_NRF_GRTC_START_SYSCOUNTER) err_code = nrfx_grtc_syscounter_start(true, &system_clock_channel_data.channel); if (err_code != NRFX_SUCCESS) { return err_code == NRFX_ERROR_NO_MEM ? -ENOMEM : -EPERM; } #else err_code = nrfx_grtc_channel_alloc(&system_clock_channel_data.channel); if (err_code != NRFX_SUCCESS) { return -ENOMEM; } #endif /* CONFIG_NRF_GRTC_START_SYSCOUNTER */ int_mask = NRFX_GRTC_CONFIG_ALLOWED_CC_CHANNELS_MASK; if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { system_timeout_set_relative(CYC_PER_TICK); } #if defined(CONFIG_CLOCK_CONTROL_NRF) static const enum nrf_lfclk_start_mode mode = IS_ENABLED(CONFIG_SYSTEM_CLOCK_NO_WAIT) ? CLOCK_CONTROL_NRF_LF_START_NOWAIT : (IS_ENABLED(CONFIG_SYSTEM_CLOCK_WAIT_FOR_AVAILABILITY) ? CLOCK_CONTROL_NRF_LF_START_AVAILABLE : CLOCK_CONTROL_NRF_LF_START_STABLE); z_nrf_clock_control_lf_on(mode); #endif return 0; } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : MIN(MAX_TICKS, MAX(ticks, 0)); uint64_t delta_time = ticks * CYC_PER_TICK; uint64_t target_time = counter() + delta_time; /* Rounded down target_time to the tick boundary * (but not less than one tick after the last) */ target_time = MAX((target_time - last_count)/CYC_PER_TICK, 1)*CYC_PER_TICK + last_count; system_timeout_set_abs(target_time); } #if defined(CONFIG_NRF_GRTC_TIMER_APP_DEFINED_INIT) int nrf_grtc_timer_clock_driver_init(void) { return sys_clock_driver_init(); } #else SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); #endif ```
/content/code_sandbox/drivers/timer/nrf_grtc_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,808
```unknown config SMARTBOND_TIMER bool "Renesas SmartBond(tm) timer" default y depends on SOC_FAMILY_RENESAS_SMARTBOND depends on CLOCK_CONTROL depends on !$(dt_nodelabel_enabled,timer2) select TICKLESS_CAPABLE help This module implements a kernel device driver for the TIMER2 timer and provides the standard "system clock driver" interfaces. ```
/content/code_sandbox/drivers/timer/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
89
```c /* * */ #include <soc/soc_caps.h> #include <soc/soc.h> #include <hal/systimer_hal.h> #include <hal/systimer_ll.h> #include <esp_private/systimer.h> #include <rom/ets_sys.h> #include <esp_attr.h> #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/spinlock.h> #define CYC_PER_TICK ((uint32_t)((uint64_t)sys_clock_hw_cycles_per_sec() \ / (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC)) #define MAX_CYC 0xffffffffu #define MAX_TICKS ((MAX_CYC - CYC_PER_TICK) / CYC_PER_TICK) #define MIN_DELAY 1 #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQN(DT_NODELABEL(systimer0)); #endif #define TICKLESS IS_ENABLED(CONFIG_TICKLESS_KERNEL) static struct k_spinlock lock; static uint64_t last_count; /* Systimer HAL layer object */ static systimer_hal_context_t systimer_hal; static void set_systimer_alarm(uint64_t time) { systimer_hal_select_alarm_mode(&systimer_hal, SYSTIMER_ALARM_OS_TICK_CORE0, SYSTIMER_ALARM_MODE_ONESHOT); systimer_counter_value_t alarm = {.val = time}; systimer_ll_enable_alarm(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0, false); systimer_ll_set_alarm_target(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0, alarm.val); systimer_ll_apply_alarm_value(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0); systimer_ll_enable_alarm(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0, true); systimer_ll_enable_alarm_int(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0, true); } static uint64_t get_systimer_alarm(void) { return systimer_hal_get_counter_value(&systimer_hal, SYSTIMER_COUNTER_OS_TICK); } static void sys_timer_isr(const void *arg) { ARG_UNUSED(arg); systimer_ll_clear_alarm_int(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = get_systimer_alarm(); uint64_t dticks = (uint64_t)((now - last_count) / CYC_PER_TICK); last_count += dticks * CYC_PER_TICK; if (!TICKLESS) { uint64_t next = last_count + CYC_PER_TICK; if ((int64_t)(next - now) < MIN_DELAY) { next += CYC_PER_TICK; } set_systimer_alarm(next); } k_spin_unlock(&lock, key); sys_clock_announce(dticks); } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); #if defined(CONFIG_TICKLESS_KERNEL) ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks; ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = get_systimer_alarm(); uint32_t adj, cyc = ticks * CYC_PER_TICK; /* Round up to next tick boundary. */ adj = (uint32_t)(now - last_count) + (CYC_PER_TICK - 1); if (cyc <= MAX_CYC - adj) { cyc += adj; } else { cyc = MAX_CYC; } cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK; if ((int32_t)(cyc + last_count - now) < MIN_DELAY) { cyc += CYC_PER_TICK; } set_systimer_alarm(cyc + last_count); k_spin_unlock(&lock, key); #endif } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint32_t ret = ((uint32_t)get_systimer_alarm() - (uint32_t)last_count) / CYC_PER_TICK; k_spin_unlock(&lock, key); return ret; } uint32_t sys_clock_cycle_get_32(void) { return (uint32_t)get_systimer_alarm(); } uint64_t sys_clock_cycle_get_64(void) { return get_systimer_alarm(); } void sys_clock_disable(void) { systimer_ll_enable_alarm(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0, false); systimer_ll_enable_alarm_int(systimer_hal.dev, SYSTIMER_ALARM_OS_TICK_CORE0, false); systimer_hal_deinit(&systimer_hal); } static int sys_clock_driver_init(void) { esp_intr_alloc(DT_IRQN(DT_NODELABEL(systimer0)), 0, sys_timer_isr, NULL, NULL); systimer_hal_init(&systimer_hal); systimer_hal_connect_alarm_counter(&systimer_hal, SYSTIMER_ALARM_OS_TICK_CORE0, SYSTIMER_COUNTER_OS_TICK); systimer_hal_enable_counter(&systimer_hal, SYSTIMER_COUNTER_OS_TICK); systimer_hal_counter_can_stall_by_cpu(&systimer_hal, SYSTIMER_COUNTER_OS_TICK, 0, true); last_count = get_systimer_alarm(); set_systimer_alarm(last_count + CYC_PER_TICK); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_1, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/esp32_sys_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,251
```unknown config MCUX_GPT_TIMER bool "MCUX GPT Event timer" default y depends on PM depends on DT_HAS_NXP_GPT_HW_TIMER_ENABLED select TICKLESS_CAPABLE help This module implements a kernel device driver for the NXP GPT timer, and provides the standard "system clock driver" interfaces. It uses the first GPT peripheral defined in the system, which can no longer be used for the GPT counter driver. ```
/content/code_sandbox/drivers/timer/Kconfig.mcux_gpt
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
104
```c /* * */ #define DT_DRV_COMPAT intel_hpet #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <zephyr/irq.h> #include <zephyr/linker/sections.h> #include <zephyr/dt-bindings/interrupt-controller/intel-ioapic.h> #include <soc.h> /** * @file * @brief HPET (High Precision Event Timers) driver * * HPET hardware contains a number of timers which can be used by * the operating system, where the number of timers is implementation * specific. The timers are implemented as a single up-counter with * a set of comparators where the counter increases monotonically. * Each timer has a match register and a comparator, and can generate * an interrupt when the value in the match register equals the value of * the free running counter. Some of these timers can be enabled to * generate periodic interrupt. * * The HPET registers are usually mapped to memory space on x86 * hardware. If this is not the case, custom register access functions * can be used by defining macro HPET_USE_CUSTOM_REG_ACCESS_FUNCS in * soc.h, and implementing necessary initialization and access * functions as described below. * * HPET_COUNTER_CLK_PERIOD can be overridden in soc.h if * COUNTER_CLK_PERIOD is not in femtoseconds (1e-15 sec). */ /* General Configuration register */ #define GCONF_ENABLE BIT(0) #define GCONF_LR BIT(1) /* legacy interrupt routing, */ /* disables PIT */ /* General Interrupt Status register */ #define TIMER0_INT_STS BIT(0) /* Timer Configuration and Capabilities register */ #define TIMER_CONF_INT_LEVEL BIT(1) #define TIMER_CONF_INT_ENABLE BIT(2) #define TIMER_CONF_PERIODIC BIT(3) #define TIMER_CONF_VAL_SET BIT(6) #define TIMER_CONF_MODE32 BIT(8) #define TIMER_CONF_FSB_EN BIT(14) /* FSB interrupt delivery */ /* enable */ DEVICE_MMIO_TOPLEVEL_STATIC(hpet_regs, DT_DRV_INST(0)); #define HPET_REG_ADDR(off) \ ((mm_reg_t)(DEVICE_MMIO_TOPLEVEL_GET(hpet_regs) + (off))) /* High dword of General Capabilities and ID register */ #define CLK_PERIOD_REG HPET_REG_ADDR(0x04) /* General Configuration register */ #define GCONF_REG HPET_REG_ADDR(0x10) /* General Interrupt Status register */ #define INTR_STATUS_REG HPET_REG_ADDR(0x20) /* Main Counter Register */ #define MAIN_COUNTER_LOW_REG HPET_REG_ADDR(0xf0) #define MAIN_COUNTER_HIGH_REG HPET_REG_ADDR(0xf4) /* Timer 0 Configuration and Capabilities register */ #define TIMER0_CONF_REG HPET_REG_ADDR(0x100) /* Timer 0 Comparator Register */ #define TIMER0_COMPARATOR_LOW_REG HPET_REG_ADDR(0x108) #define TIMER0_COMPARATOR_HIGH_REG HPET_REG_ADDR(0x10c) #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQN(DT_INST(0, intel_hpet)); #endif /** * @brief Return the value of the main counter. * * @return Value of Main Counter */ static inline uint64_t hpet_counter_get(void) { #ifdef CONFIG_64BIT uint64_t val = sys_read64(MAIN_COUNTER_LOW_REG); return val; #else uint32_t high; uint32_t low; do { high = sys_read32(MAIN_COUNTER_HIGH_REG); low = sys_read32(MAIN_COUNTER_LOW_REG); } while (high != sys_read32(MAIN_COUNTER_HIGH_REG)); return ((uint64_t)high << 32) | low; #endif } /** * @brief Get COUNTER_CLK_PERIOD * * Read and return the COUNTER_CLK_PERIOD, which is the high * 32-bit of the General Capabilities and ID Register. This can * be used to calculate the frequency of the main counter. * * Usually the period is in femtoseconds. If this is not * the case, define HPET_COUNTER_CLK_PERIOD in soc.h so * it can be used to calculate frequency. * * @return COUNTER_CLK_PERIOD */ static inline uint32_t hpet_counter_clk_period_get(void) { return sys_read32(CLK_PERIOD_REG); } /** * @brief Return the value of the General Configuration Register * * @return Value of the General Configuration Register */ static inline uint32_t hpet_gconf_get(void) { return sys_read32(GCONF_REG); } /** * @brief Write to General Configuration Register * * @param val Value to be written to the register */ static inline void hpet_gconf_set(uint32_t val) { sys_write32(val, GCONF_REG); } /** * @brief Return the value of the Timer Configuration Register * * This reads and returns the value of the Timer Configuration * Register of Timer #0. * * @return Value of the Timer Configuration Register */ static inline uint32_t hpet_timer_conf_get(void) { return sys_read32(TIMER0_CONF_REG); } /** * @brief Write to the Timer Configuration Register * * This writes the specified value to the Timer Configuration * Register of Timer #0. * * @param val Value to be written to the register */ static inline void hpet_timer_conf_set(uint32_t val) { sys_write32(val, TIMER0_CONF_REG); } /* * The following register access functions should work on generic x86 * hardware. If the targeted SoC requires special handling of HPET * registers, these functions will need to be implemented in the SoC * layer by first defining the macro HPET_USE_CUSTOM_REG_ACCESS_FUNCS * in soc.h to signal such intent. * * This is a list of functions which must be implemented in the SoC * layer: * void hpet_timer_comparator_set(uint32_t val) */ #ifndef HPET_USE_CUSTOM_REG_ACCESS_FUNCS /** * @brief Write to the Timer Comparator Value Register * * This writes the specified value to the Timer Comparator * Value Register of Timer #0. * * @param val Value to be written to the register */ static inline void hpet_timer_comparator_set(uint64_t val) { #if CONFIG_X86_64 sys_write64(val, TIMER0_COMPARATOR_LOW_REG); #else sys_write32((uint32_t)val, TIMER0_COMPARATOR_LOW_REG); sys_write32((uint32_t)(val >> 32), TIMER0_COMPARATOR_HIGH_REG); #endif } #endif /* HPET_USE_CUSTOM_REG_ACCESS_FUNCS */ #ifndef HPET_COUNTER_CLK_PERIOD /* COUNTER_CLK_PERIOD (CLK_PERIOD_REG) is in femtoseconds (1e-15 sec) */ #define HPET_COUNTER_CLK_PERIOD (1000000000000000ULL) #endif /* * HPET_INT_LEVEL_TRIGGER is used to set HPET interrupt as level trigger * for ARM CPU with NVIC like EHL PSE, whose DTS interrupt setting * has no "sense" cell. */ #if (DT_INST_IRQ_HAS_CELL(0, sense)) #ifdef HPET_INT_LEVEL_TRIGGER __WARN("HPET_INT_LEVEL_TRIGGER has no effect, DTS setting is used instead") #undef HPET_INT_LEVEL_TRIGGER #endif #if ((DT_INST_IRQ(0, sense) & IRQ_TYPE_LEVEL) == IRQ_TYPE_LEVEL) #define HPET_INT_LEVEL_TRIGGER #endif #endif /* (DT_INST_IRQ_HAS_CELL(0, sense)) */ static __pinned_bss struct k_spinlock lock; static __pinned_bss uint64_t last_count; static __pinned_bss uint64_t last_tick; static __pinned_bss uint32_t last_elapsed; #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME static __pinned_bss unsigned int cyc_per_tick; #else #define cyc_per_tick \ (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ #define HPET_MAX_TICKS ((int32_t)0x7fffffff) #ifdef HPET_INT_LEVEL_TRIGGER /** * @brief Write to General Interrupt Status Register * * This is used to acknowledge and clear interrupt bits. * * @param val Value to be written to the register */ static inline void hpet_int_sts_set(uint32_t val) { sys_write32(val, INTR_STATUS_REG); } #endif /* ensure the comparator is always set ahead of the current counter value */ static inline void hpet_timer_comparator_set_safe(uint64_t next) { hpet_timer_comparator_set(next); uint64_t now = hpet_counter_get(); if (unlikely((int64_t)(next - now) <= 0)) { uint32_t bump = 1; do { next = now + bump; bump *= 2; hpet_timer_comparator_set(next); now = hpet_counter_get(); } while ((int64_t)(next - now) <= 0); } } __isr static void hpet_isr(const void *arg) { ARG_UNUSED(arg); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = hpet_counter_get(); #ifdef HPET_INT_LEVEL_TRIGGER /* * Clear interrupt only if level trigger is selected. * When edge trigger is selected, spec says only 0 can * be written. */ hpet_int_sts_set(TIMER0_INT_STS); #endif if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_QEMU_TARGET)) { /* Qemu in SMP mode has observed the clock going * "backwards" relative to interrupts already received * on the other CPU, despite the HPET being * theoretically a global device. */ int64_t diff = (int64_t)(now - last_count); if (last_count && diff < 0) { now = last_count; } } uint32_t dticks = (uint32_t)((now - last_count) / cyc_per_tick); last_count += (uint64_t)dticks * cyc_per_tick; last_tick += dticks; last_elapsed = 0; if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { uint64_t next = last_count + cyc_per_tick; hpet_timer_comparator_set_safe(next); } k_spin_unlock(&lock, key); sys_clock_announce(dticks); } __pinned_func static void config_timer0(unsigned int irq) { uint32_t val = hpet_timer_conf_get(); /* 5-bit IRQ field starting at bit 9 */ val = (val & ~(0x1f << 9)) | ((irq & 0x1f) << 9); #ifdef HPET_INT_LEVEL_TRIGGER /* Set level trigger if selected */ val |= TIMER_CONF_INT_LEVEL; #endif val &= ~((uint32_t)(TIMER_CONF_MODE32 | TIMER_CONF_PERIODIC | TIMER_CONF_FSB_EN)); val |= TIMER_CONF_INT_ENABLE; hpet_timer_conf_set(val); } __boot_func void smp_timer_init(void) { /* Noop, the HPET is a single system-wide device and it's * configured to deliver interrupts to every CPU, so there's * nothing to do at initialization on auxiliary CPUs. */ } __pinned_func void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); #if defined(CONFIG_TICKLESS_KERNEL) uint32_t reg; if (ticks == K_TICKS_FOREVER && idle) { reg = hpet_gconf_get(); reg &= ~GCONF_ENABLE; hpet_gconf_set(reg); return; } ticks = ticks == K_TICKS_FOREVER ? HPET_MAX_TICKS : ticks; ticks = CLAMP(ticks, 0, HPET_MAX_TICKS/2); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t cyc = (last_tick + last_elapsed + ticks) * cyc_per_tick; hpet_timer_comparator_set_safe(cyc); k_spin_unlock(&lock, key); #endif } __pinned_func uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = hpet_counter_get(); uint32_t ret = (uint32_t)((now - last_count) / cyc_per_tick); last_elapsed = ret; k_spin_unlock(&lock, key); return ret; } __pinned_func uint32_t sys_clock_cycle_get_32(void) { return (uint32_t)hpet_counter_get(); } __pinned_func uint64_t sys_clock_cycle_get_64(void) { return hpet_counter_get(); } __pinned_func void sys_clock_idle_exit(void) { uint32_t reg; reg = hpet_gconf_get(); reg |= GCONF_ENABLE; hpet_gconf_set(reg); } __boot_func static int sys_clock_driver_init(void) { extern int z_clock_hw_cycles_per_sec; uint32_t hz, reg; ARG_UNUSED(hz); ARG_UNUSED(z_clock_hw_cycles_per_sec); DEVICE_MMIO_TOPLEVEL_MAP(hpet_regs, K_MEM_CACHE_NONE); #if DT_INST_IRQ_HAS_CELL(0, sense) IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), hpet_isr, 0, DT_INST_IRQ(0, sense)); #else IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), hpet_isr, 0, 0); #endif config_timer0(DT_INST_IRQN(0)); irq_enable(DT_INST_IRQN(0)); #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME hz = (uint32_t)(HPET_COUNTER_CLK_PERIOD / hpet_counter_clk_period_get()); z_clock_hw_cycles_per_sec = hz; cyc_per_tick = hz / CONFIG_SYS_CLOCK_TICKS_PER_SEC; #endif reg = hpet_gconf_get(); reg |= GCONF_ENABLE; #if (DT_INST_PROP(0, no_legacy_irq) == 0) /* Note: we set the legacy routing bit, because otherwise * nothing in Zephyr disables the PIT which then fires * interrupts into the same IRQ. But that means we're then * forced to use IRQ2 contra the way the kconfig IRQ selection * is supposed to work. Should fix this. */ reg |= GCONF_LR; #endif hpet_gconf_set(reg); last_tick = hpet_counter_get() / cyc_per_tick; last_count = last_tick * cyc_per_tick; hpet_timer_comparator_set_safe(last_count + cyc_per_tick); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/hpet.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,211
```c /* * */ /** * Driver for the timer model of the POSIX native_sim/posix board * It provides the interfaces required by the kernel and the sanity testcases * It also provides a custom k_busy_wait() which can be used with the * POSIX arch and InfClock SOC */ #include <zephyr/types.h> #include <zephyr/irq.h> #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include "timer_model.h" #include "soc.h" #include <zephyr/arch/posix/posix_trace.h> static uint64_t tick_period; /* System tick period in microseconds */ /* Time (microseconds since boot) of the last timer tick interrupt */ static uint64_t last_tick_time; /** * Return the current HW cycle counter * (number of microseconds since boot in 32bits) */ uint32_t sys_clock_cycle_get_32(void) { return hwm_get_time(); } uint64_t sys_clock_cycle_get_64(void) { return hwm_get_time(); } /** * Interrupt handler for the timer interrupt * Announce to the kernel that a number of ticks have passed */ static void np_timer_isr(const void *arg) { ARG_UNUSED(arg); uint64_t now = hwm_get_time(); int32_t elapsed_ticks = (now - last_tick_time)/tick_period; last_tick_time += elapsed_ticks*tick_period; sys_clock_announce(elapsed_ticks); } /** * This function exists only to enable tests to call into the timer ISR */ void np_timer_isr_test_hook(const void *arg) { np_timer_isr(NULL); } /** * @brief Set system clock timeout * * Informs the system clock driver that the next needed call to * sys_clock_announce() will not be until the specified number of ticks * from the current time have elapsed. * * See system_timer.h for more information * * @param ticks Timeout in tick units * @param idle Hint to the driver that the system is about to enter * the idle state immediately after setting the timeout */ void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); #if defined(CONFIG_TICKLESS_KERNEL) uint64_t silent_ticks; /* Note that we treat INT_MAX literally as anyhow the maximum amount of * ticks we can report with sys_clock_announce() is INT_MAX */ if (ticks == K_TICKS_FOREVER) { silent_ticks = INT64_MAX; } else if (ticks > 0) { silent_ticks = ticks - 1; } else { silent_ticks = 0; } hwtimer_set_silent_ticks(silent_ticks); #endif } /** * @brief Ticks elapsed since last sys_clock_announce() call * * Queries the clock driver for the current time elapsed since the * last call to sys_clock_announce() was made. The kernel will call * this with appropriate locking, the driver needs only provide an * instantaneous answer. */ uint32_t sys_clock_elapsed(void) { return (hwm_get_time() - last_tick_time)/tick_period; } /** * @brief Stop announcing sys ticks into the kernel * * Disable the system ticks generation */ void sys_clock_disable(void) { irq_disable(TIMER_TICK_IRQ); hwtimer_set_silent_ticks(INT64_MAX); } /** * @brief Initialize system timer driver * * Enable the hw timer, setting its tick period, and setup its interrupt */ static int sys_clock_driver_init(void) { tick_period = 1000000ul / CONFIG_SYS_CLOCK_TICKS_PER_SEC; last_tick_time = hwm_get_time(); hwtimer_enable(tick_period); IRQ_CONNECT(TIMER_TICK_IRQ, 1, np_timer_isr, 0, 0); irq_enable(TIMER_TICK_IRQ); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/native_posix_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
844
```unknown config ARM_ARCH_TIMER bool "ARM architected timer" depends on GIC select ARCH_HAS_CUSTOM_BUSY_WAIT select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER help This module implements a kernel device driver for the ARM architected timer which provides per-cpu timers attached to a GIC to deliver its per-processor interrupts via PPIs. config ARM_ARCH_TIMER_ERRATUM_740657 bool "ARM architected timer is affected by ARM erratum 740657" depends on ARM_ARCH_TIMER help This option indicates that the ARM architected timer as implemented in the target hardware is affected by the ARM erratum 740657 (comp. ARM Cortex-A9 processors Software Developers Errata Notice, ARM document ID032315) which leads to an additional, spurious interrupt indication upon every actual timer interrupt. This option activates the workaround for the erratum within the timer driver. ```
/content/code_sandbox/drivers/timer/Kconfig.arm_arch
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
206
```unknown menuconfig ARCV2_TIMER bool "ARC Timer" default y depends on ARC select TICKLESS_CAPABLE help This module implements a kernel device driver for the ARCv2 processor timer 0 and provides the standard "system clock driver" interfaces. config ARCV2_TIMER_IRQ_PRIORITY int "ARC timer interrupt priority" default 0 depends on ARCV2_TIMER help This option specifies the IRQ priority used by the ARC timer. Lower values have higher priority. ```
/content/code_sandbox/drivers/timer/Kconfig.arcv2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
110
```unknown config XTENSA_TIMER bool "Xtensa timer support" depends on XTENSA default y select TICKLESS_CAPABLE help Enables a system timer driver for Xtensa based on the CCOUNT and CCOMPARE special registers. config XTENSA_TIMER_ID int "System timer CCOMPAREn register index" default 0 depends on XTENSA_TIMER help Index of the CCOMPARE register (and associated interrupt) used for the system timer. Xtensa CPUs have hard-configured interrupt priorities associated with each timer, and some of them can be unmaskable (and thus not usable by OS code that need synchronization, like the timer subsystem!). In general timer zero is guaranteed to be present and usable. ```
/content/code_sandbox/drivers/timer/Kconfig.xtensa
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
168
```unknown config SAM0_RTC_TIMER bool "Atmel SAM0 series RTC timer" default y depends on DT_HAS_ATMEL_SAM0_RTC_ENABLED select TICKLESS_CAPABLE help This module implements a kernel device driver for the Atmel SAM0 series Real Time Counter and provides the standard "system clock driver" interfaces. ```
/content/code_sandbox/drivers/timer/Kconfig.sam0_rtc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```unknown config MTK_ADSP_TIMER bool "MediaTek Audio DSP timer" select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER select SYSTEM_CLOCK_LOCK_FREE_COUNT help MediaTek MT81xx Audio DSPs have a 13 Mhz wall clock timer for system time that is independent of CPU speed. ```
/content/code_sandbox/drivers/timer/Kconfig.mtk_adsp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
70
```unknown config NPCX_ITIM_TIMER bool "Nuvoton NPCX series internal 64/32-bit timers" default y depends on DT_HAS_NUVOTON_NPCX_ITIM_TIMER_ENABLED select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER help This module implements a kernel device driver for the Nuvoton NPCX series internal 64/32-bit timers and provides the standard "system clock driver" interfaces. ```
/content/code_sandbox/drivers/timer/Kconfig.npcx_itim
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
99
```c */ #include <zephyr/spinlock.h> #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #define OSTIMER64_BASE DT_REG_ADDR(DT_NODELABEL(ostimer64)) #define OSTIMER_BASE DT_REG_ADDR(DT_NODELABEL(ostimer0)) /* * This device has a LOT of timer hardware. There are SIX * instantiated devices, with THREE different interfaces! Not * including the three Xtensa CCOUNT timers! * * In practice only "ostimer0" is used as an interrupt source by the * original SOF code, and the "ostimer64" and "platform" timers * reflect the same underlying clock (though they're different * counters with different values). There is also a "ptimer" device, * which is unused by SOF and not exercised by this driver. * * The driver architecture itself is sort of a hybrid of what other * Zephyr drivers use: there is no (or at least no documented) * comparator facility. The "ostimer64" is used as the system clock, * which is a 13 MHz 64 bit up-counter. But timeout interrupts are * delivered by ostimers[0], which is a 32 bit (!) down-counter (!!) * running at twice (!!!) the rate: 26MHz. Testing shows they're * slaved the same underlying clock -- they don't skew relative to * each other. */ struct mtk_ostimer { unsigned int con; unsigned int rst; unsigned int cur; unsigned int irq_ack; }; struct mtk_ostimer64 { unsigned int con; unsigned int init_l; unsigned int init_h; unsigned int cur_l; unsigned int cur_h; unsigned int tval_h; unsigned int irq_ack; }; #define OSTIMER64 (*(volatile struct mtk_ostimer64 *)OSTIMER64_BASE) #define OSTIMERS ((volatile struct mtk_ostimer *)OSTIMER_BASE) #define OSTIMER_CON_ENABLE BIT(0) #define OSTIMER_CON_CLKSRC_MASK 0x30 #define OSTIMER_CON_CLKSRC_32K 0x00 /* 32768 Hz */ #define OSTIMER_CON_CLKSRC_26M 0x10 /* 26 MHz */ #define OSTIMER_CON_CLKSRC_BCLK 0x20 /* CPU speed, 720 MHz */ #define OSTIMER_CON_CLKSRC_PCLK 0x30 /* ~312 MHz experimentally */ #define OSTIMER_IRQ_ACK_ENABLE BIT(4) /* read = status, write = enable */ #define OSTIMER_IRQ_ACK_CLEAR BIT(5) #define OST64_HZ 13000000U #define OST_HZ 26000000U #define OST64_PER_TICK (OST64_HZ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define OST_PER_TICK (OST_HZ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define MAX_TICKS ((0xffffffffU - OST_PER_TICK) / OST_PER_TICK) #define CYC64_MAX (0xffffffff - OST64_PER_TICK) static struct k_spinlock lock; static uint64_t last_announce; uint32_t sys_clock_cycle_get_32(void) { return OSTIMER64.cur_l; } uint64_t sys_clock_cycle_get_64(void) { uint32_t l, h0, h1; do { h0 = OSTIMER64.cur_h; l = OSTIMER64.cur_l; h1 = OSTIMER64.cur_h; } while (h0 != h1); return (((uint64_t)h0) << 32) | l; } void sys_clock_set_timeout(int32_t ticks, bool idle) { /* Compute desired expiration time */ uint64_t now = sys_clock_cycle_get_64(); uint64_t end = now + CLAMP(ticks - 1, 0, MAX_TICKS) * OST64_PER_TICK; uint32_t dt = (uint32_t)MIN(end - last_announce, CYC64_MAX); /* Round up to tick boundary */ dt = ((dt + OST64_PER_TICK - 1) / OST64_PER_TICK) * OST64_PER_TICK; /* Convert to "fast" OSTIMER[0] cycles! */ uint32_t cyc = 2 * (dt - (uint32_t)(now - last_announce)); /* Writes to RST need to be done when the device is disabled, * and automatically reset CUR (which reads zero while disabled) */ OSTIMERS[0].con &= ~OSTIMER_CON_ENABLE; OSTIMERS[0].rst = cyc; OSTIMERS[0].irq_ack |= OSTIMER_IRQ_ACK_CLEAR; OSTIMERS[0].irq_ack |= OSTIMER_IRQ_ACK_ENABLE; OSTIMERS[0].con |= OSTIMER_CON_ENABLE; } uint32_t sys_clock_elapsed(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint32_t ret; ret = (uint32_t)((sys_clock_cycle_get_64() - last_announce) / OST64_PER_TICK); k_spin_unlock(&lock, key); return ret; } static void timer_isr(__maybe_unused void *arg) { /* Note: no locking. As it happens, on MT8195/8186/8188 all * Zephyr-usable interrupts are delivered at the same level. * So we can't be preempted and there's actually no need to * take a spinlock here. But ideally we should verify/detect * this instead of trusting blindly; this is fragile if future * devices add nested interrupts. */ uint64_t dcyc = sys_clock_cycle_get_64() - last_announce; uint64_t ticks = dcyc / OST64_PER_TICK; /* Leave the device disabled after clearing the interrupt, * sys_clock_set_timeout() is responsible for turning it back * on. */ OSTIMERS[0].irq_ack |= OSTIMER_IRQ_ACK_CLEAR; OSTIMERS[0].con &= ~OSTIMER_CON_ENABLE; OSTIMERS[0].irq_ack &= ~OSTIMER_IRQ_ACK_ENABLE; last_announce += ticks * OST64_PER_TICK; sys_clock_announce(ticks); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { sys_clock_set_timeout(1, false); } } static int mtk_adsp_timer_init(void) { IRQ_CONNECT(DT_IRQN(DT_NODELABEL(ostimer0)), 0, timer_isr, 0, 0); irq_enable(DT_IRQN(DT_NODELABEL(ostimer0))); /* Disable all timers */ for (int i = 0; i < 4; i++) { OSTIMERS[i].con &= ~OSTIMER_CON_ENABLE; OSTIMERS[i].irq_ack |= OSTIMER_IRQ_ACK_CLEAR; OSTIMERS[i].irq_ack &= ~OSTIMER_IRQ_ACK_ENABLE; } /* Set them up to use the same clock. Note that OSTIMER64 has * a built-in divide by two (or it's configurable and I don't * know the register) and exposes a 13 MHz counter! */ OSTIMERS[0].con = ((OSTIMERS[0].con & ~OSTIMER_CON_CLKSRC_MASK) | OSTIMER_CON_CLKSRC_26M); OSTIMERS[0].con |= OSTIMER_CON_ENABLE; /* Clock is free running and survives reset, doesn't start at zero */ last_announce = sys_clock_cycle_get_64(); return 0; } SYS_INIT(mtk_adsp_timer_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/mtk_adsp_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,624
```c * Andrew Davis <afd@ti.com> * */ #include <zephyr/device.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/irq.h> #include <zephyr/sys_clock.h> #include <zephyr/kernel.h> #include <zephyr/spinlock.h> #include <zephyr/drivers/timer/ti_dmtimer.h> #define DT_DRV_COMPAT ti_am654_timer #define TIMER_BASE_ADDR DT_INST_REG_ADDR(0) #define TIMER_IRQ_NUM DT_INST_IRQN(0) #define TIMER_IRQ_PRIO DT_INST_IRQ(0, priority) #define TIMER_IRQ_FLAGS DT_INST_IRQ(0, flags) #define CYC_PER_TICK ((uint32_t)(sys_clock_hw_cycles_per_sec() \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC)) #define MAX_TICKS ((k_ticks_t)(UINT32_MAX / CYC_PER_TICK) - 1) static struct k_spinlock lock; static uint32_t last_cycle; #define TI_DM_TIMER_READ(reg) sys_read32(TIMER_BASE_ADDR + TI_DM_TIMER_ ## reg) #define TI_DM_TIMER_MASK(reg) TI_DM_TIMER_ ## reg ## _MASK #define TI_DM_TIMER_SHIFT(reg) TI_DM_TIMER_ ## reg ## _SHIFT #define TI_DM_TIMER_WRITE(data, reg, bits) \ ti_dm_timer_write_masks(data, \ TIMER_BASE_ADDR + TI_DM_TIMER_ ## reg, \ TI_DM_TIMER_MASK(reg ## _ ## bits), \ TI_DM_TIMER_SHIFT(reg ## _ ## bits)) static void ti_dm_timer_write_masks(uint32_t data, uint32_t reg, uint32_t mask, uint32_t shift) { uint32_t reg_val; reg_val = sys_read32(reg); reg_val = (reg_val & ~(mask)) | (data << shift); sys_write32(reg_val, reg); } static void ti_dmtimer_isr(void *data) { /* If no pending event */ if (!TI_DM_TIMER_READ(IRQSTATUS)) { return; } k_spinlock_key_t key = k_spin_lock(&lock); uint32_t curr_cycle = TI_DM_TIMER_READ(TCRR); uint32_t delta_cycles = curr_cycle - last_cycle; uint32_t delta_ticks = delta_cycles / CYC_PER_TICK; last_cycle = curr_cycle; /* ACK match interrupt */ TI_DM_TIMER_WRITE(1, IRQSTATUS, MAT_IT_FLAG); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Setup next match time */ uint64_t next_cycle = curr_cycle + CYC_PER_TICK; TI_DM_TIMER_WRITE(next_cycle, TMAR, COMPARE_VALUE); } k_spin_unlock(&lock, key); sys_clock_announce(delta_ticks); } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Not supported on tickful kernels */ return; } ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = CLAMP(ticks, 1, (int32_t)MAX_TICKS); k_spinlock_key_t key = k_spin_lock(&lock); /* Setup next match time */ uint32_t curr_cycle = TI_DM_TIMER_READ(TCRR); uint32_t next_cycle = curr_cycle + (ticks * CYC_PER_TICK); TI_DM_TIMER_WRITE(next_cycle, TMAR, COMPARE_VALUE); k_spin_unlock(&lock, key); } uint32_t sys_clock_cycle_get_32(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint32_t curr_cycle = TI_DM_TIMER_READ(TCRR); k_spin_unlock(&lock, key); return curr_cycle; } unsigned int sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Always return 0 for tickful kernel system */ return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint32_t curr_cycle = TI_DM_TIMER_READ(TCRR); uint32_t delta_cycles = curr_cycle - last_cycle; uint32_t delta_ticks = delta_cycles / CYC_PER_TICK; k_spin_unlock(&lock, key); return delta_ticks; } static int sys_clock_driver_init(void) { last_cycle = 0; IRQ_CONNECT(TIMER_IRQ_NUM, TIMER_IRQ_PRIO, ti_dmtimer_isr, NULL, TIMER_IRQ_FLAGS); /* Select autoreload mode */ TI_DM_TIMER_WRITE(1, TCLR, AR); /* Enable match interrupt */ TI_DM_TIMER_WRITE(1, IRQENABLE_SET, MAT_EN_FLAG); /* Load timer counter value */ TI_DM_TIMER_WRITE(0, TCRR, TIMER_COUNTER); /* Load timer load value */ TI_DM_TIMER_WRITE(0, TLDR, LOAD_VALUE); /* Load timer compare value */ TI_DM_TIMER_WRITE(CYC_PER_TICK, TMAR, COMPARE_VALUE); /* Enable compare mode */ TI_DM_TIMER_WRITE(1, TCLR, CE); /* Start the timer */ TI_DM_TIMER_WRITE(1, TCLR, ST); irq_enable(TIMER_IRQ_NUM); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/ti_dmtimer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,105
```unknown # Timer driver configuration options if SYS_CLOCK_EXISTS menu "Timer drivers" config TIMER_HAS_64BIT_CYCLE_COUNTER bool help When this option is true, the k_cycle_get_64() call is available to provide values from a 64-bit cycle counter. config TIMER_READS_ITS_FREQUENCY_AT_RUNTIME bool "Timer queries its hardware to find its frequency at runtime" help The drivers select this option automatically when needed. Do not modify this unless you have a very good reason for it. config SYSTEM_CLOCK_SLOPPY_IDLE bool "Timer allowed to skew uptime clock during idle" help When true, the timer driver is not required to maintain a correct system uptime count when the system enters idle. Some platforms may take advantage of this to reduce the overhead from regular interrupts required to handle counter wraparound conditions. config SYSTEM_CLOCK_INIT_PRIORITY int "System clock driver initialization priority" default 0 help This options can be used to set a specific initialization priority value for the system clock driver. As driver initialization might need the clock to be running already, you should let the default value as it is (0). # Hidden option to be selected by individual SoC. config TICKLESS_CAPABLE bool help Timer drivers should select this flag if they are capable of supporting tickless operation. That is, a call to sys_clock_set_timeout() with a number of ticks greater than one should be expected not to produce a call to sys_clock_announce() (really, not to produce an interrupt at all) until the specified expiration. config SYSTEM_TIMER_HAS_DISABLE_SUPPORT bool help This option should be selected by drivers implementing support for sys_clock_disable() API. config SYSTEM_CLOCK_LOCK_FREE_COUNT bool help This option should be selected by drivers implementing a lock free cycle count accessor. This is needed for instrumenting spin lock hold times. source "drivers/timer/Kconfig.altera_avalon" source "drivers/timer/Kconfig.ambiq" source "drivers/timer/Kconfig.x86" source "drivers/timer/Kconfig.arcv2" source "drivers/timer/Kconfig.arm_arch" source "drivers/timer/Kconfig.cavs" source "drivers/timer/Kconfig.cc13xx_cc26xx_rtc" source "drivers/timer/Kconfig.cortex_m_systick" source "drivers/timer/Kconfig.esp32" source "drivers/timer/Kconfig.gecko" source "drivers/timer/Kconfig.ite_it8xxx2" source "drivers/timer/Kconfig.leon_gptimer" source "drivers/timer/Kconfig.litex" source "drivers/timer/Kconfig.mchp_xec_rtos" source "drivers/timer/Kconfig.mcux_gpt" source "drivers/timer/Kconfig.mcux_lptmr" source "drivers/timer/Kconfig.mcux_os" source "drivers/timer/Kconfig.mips_cp0" source "drivers/timer/Kconfig.native_posix" source "drivers/timer/Kconfig.npcx_itim" source "drivers/timer/Kconfig.nrf_rtc" source "drivers/timer/Kconfig.nrf_grtc" source "drivers/timer/Kconfig.nrf_xrtc" source "drivers/timer/Kconfig.rcar_cmt" source "drivers/timer/Kconfig.riscv_machine" source "drivers/timer/Kconfig.rv32m1_lptmr" source "drivers/timer/Kconfig.sam0_rtc" source "drivers/timer/Kconfig.smartbond" source "drivers/timer/Kconfig.stm32_lptim" source "drivers/timer/Kconfig.ti_dm_timer" source "drivers/timer/Kconfig.xlnx_psttc" source "drivers/timer/Kconfig.xtensa" source "drivers/timer/Kconfig.mtk_adsp" endmenu endif # SYS_CLOCK_EXISTS ```
/content/code_sandbox/drivers/timer/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
834
```c /* * */ #include <limits.h> #include <zephyr/init.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <zephyr/irq.h> /* andestech,machine-timer */ #if DT_HAS_COMPAT_STATUS_OKAY(andestech_machine_timer) #define DT_DRV_COMPAT andestech_machine_timer #define MTIME_REG DT_INST_REG_ADDR(0) #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8) #define TIMER_IRQN DT_INST_IRQN(0) /* neorv32-machine-timer */ #elif DT_HAS_COMPAT_STATUS_OKAY(neorv32_machine_timer) #define DT_DRV_COMPAT neorv32_machine_timer #define MTIME_REG DT_INST_REG_ADDR(0) #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8) #define TIMER_IRQN DT_INST_IRQN(0) /* nuclei,systimer */ #elif DT_HAS_COMPAT_STATUS_OKAY(nuclei_systimer) #define DT_DRV_COMPAT nuclei_systimer #define MTIME_REG DT_INST_REG_ADDR(0) #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8) #define TIMER_IRQN DT_INST_IRQ_BY_IDX(0, 1, irq) /* sifive,clint0 */ #elif DT_HAS_COMPAT_STATUS_OKAY(sifive_clint0) #define DT_DRV_COMPAT sifive_clint0 #define MTIME_REG (DT_INST_REG_ADDR(0) + 0xbff8U) #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 0x4000U) #define TIMER_IRQN DT_INST_IRQ_BY_IDX(0, 1, irq) /* telink,machine-timer */ #elif DT_HAS_COMPAT_STATUS_OKAY(telink_machine_timer) #define DT_DRV_COMPAT telink_machine_timer #define MTIME_REG DT_INST_REG_ADDR(0) #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8) #define TIMER_IRQN DT_INST_IRQN(0) /* lowrisc,machine-timer */ #elif DT_HAS_COMPAT_STATUS_OKAY(lowrisc_machine_timer) #define DT_DRV_COMPAT lowrisc_machine_timer #define MTIME_REG (DT_INST_REG_ADDR(0) + 0x110) #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 0x118) #define TIMER_IRQN DT_INST_IRQN(0) /* niosv-machine-timer */ #elif DT_HAS_COMPAT_STATUS_OKAY(niosv_machine_timer) #define DT_DRV_COMPAT niosv_machine_timer #define MTIMECMP_REG DT_INST_REG_ADDR(0) #define MTIME_REG (DT_INST_REG_ADDR(0) + 8) #define TIMER_IRQN DT_INST_IRQN(0) /* scr,machine-timer*/ #elif DT_HAS_COMPAT_STATUS_OKAY(scr_machine_timer) #define DT_DRV_COMPAT scr_machine_timer #define MTIMER_HAS_DIVIDER #define MTIMEDIV_REG (DT_INST_REG_ADDR_U64(0) + 4) #define MTIME_REG (DT_INST_REG_ADDR_U64(0) + 8) #define MTIMECMP_REG (DT_INST_REG_ADDR_U64(0) + 16) #define TIMER_IRQN DT_INST_IRQN(0) #endif #define CYC_PER_TICK (uint32_t)(sys_clock_hw_cycles_per_sec() \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) /* the unsigned long cast limits divisions to native CPU register width */ #define cycle_diff_t unsigned long #define CYCLE_DIFF_MAX (~(cycle_diff_t)0) /* * We have two constraints on the maximum number of cycles we can wait for. * * 1) sys_clock_announce() accepts at most INT32_MAX ticks. * * 2) The number of cycles between two reports must fit in a cycle_diff_t * variable before converting it to ticks. * * Then: * * 3) Pick the smallest between (1) and (2). * * 4) Take into account some room for the unavoidable IRQ servicing latency. * Let's use 3/4 of the max range. * * Finally let's add the LSB value to the result so to clear out a bunch of * consecutive set bits coming from the original max values to produce a * nicer literal for assembly generation. */ #define CYCLES_MAX_1 ((uint64_t)INT32_MAX * (uint64_t)CYC_PER_TICK) #define CYCLES_MAX_2 ((uint64_t)CYCLE_DIFF_MAX) #define CYCLES_MAX_3 MIN(CYCLES_MAX_1, CYCLES_MAX_2) #define CYCLES_MAX_4 (CYCLES_MAX_3 / 2 + CYCLES_MAX_3 / 4) #define CYCLES_MAX (CYCLES_MAX_4 + LSB_GET(CYCLES_MAX_4)) static struct k_spinlock lock; static uint64_t last_count; static uint64_t last_ticks; static uint32_t last_elapsed; #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = TIMER_IRQN; #endif static uintptr_t get_hart_mtimecmp(void) { return MTIMECMP_REG + (arch_proc_id() * 8); } static void set_mtimecmp(uint64_t time) { #ifdef CONFIG_64BIT *(volatile uint64_t *)get_hart_mtimecmp() = time; #else volatile uint32_t *r = (uint32_t *)get_hart_mtimecmp(); /* Per spec, the RISC-V MTIME/MTIMECMP registers are 64 bit, * but are NOT internally latched for multiword transfers. So * we have to be careful about sequencing to avoid triggering * spurious interrupts: always set the high word to a max * value first. */ r[1] = 0xffffffff; r[0] = (uint32_t)time; r[1] = (uint32_t)(time >> 32); #endif } static void set_divider(void) { #ifdef MTIMER_HAS_DIVIDER *(volatile uint32_t *)MTIMEDIV_REG = CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER; #endif } static uint64_t mtime(void) { #ifdef CONFIG_64BIT return *(volatile uint64_t *)MTIME_REG; #else volatile uint32_t *r = (uint32_t *)MTIME_REG; uint32_t lo, hi; /* Likewise, must guard against rollover when reading */ do { hi = r[1]; lo = r[0]; } while (r[1] != hi); return (((uint64_t)hi) << 32) | lo; #endif } static void timer_isr(const void *arg) { ARG_UNUSED(arg); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = mtime(); uint64_t dcycles = now - last_count; uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK; last_count += (cycle_diff_t)dticks * CYC_PER_TICK; last_ticks += dticks; last_elapsed = 0; if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { uint64_t next = last_count + CYC_PER_TICK; set_mtimecmp(next); } k_spin_unlock(&lock, key); sys_clock_announce(dticks); } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t cyc; if (ticks == K_TICKS_FOREVER) { cyc = last_count + CYCLES_MAX; } else { cyc = (last_ticks + last_elapsed + ticks) * CYC_PER_TICK; if ((cyc - last_count) > CYCLES_MAX) { cyc = last_count + CYCLES_MAX; } } set_mtimecmp(cyc); k_spin_unlock(&lock, key); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t now = mtime(); uint64_t dcycles = now - last_count; uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK; last_elapsed = dticks; k_spin_unlock(&lock, key); return dticks; } uint32_t sys_clock_cycle_get_32(void) { return ((uint32_t)mtime()) << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER; } uint64_t sys_clock_cycle_get_64(void) { return mtime() << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER; } static int sys_clock_driver_init(void) { set_divider(); IRQ_CONNECT(TIMER_IRQN, 0, timer_isr, NULL, 0); last_ticks = mtime() / CYC_PER_TICK; last_count = last_ticks * CYC_PER_TICK; set_mtimecmp(last_count + CYC_PER_TICK); irq_enable(TIMER_IRQN); return 0; } #ifdef CONFIG_SMP void smp_timer_init(void) { set_mtimecmp(last_count + CYC_PER_TICK); irq_enable(TIMER_IRQN); } #endif SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/riscv_machine_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,037
```unknown config MCUX_LPTMR_TIMER bool "MCUX LPTMR timer" default y depends on DT_HAS_NXP_KINETIS_LPTMR_ENABLED || \ MCUX_KINETIS_LPTMR depends on !COUNTER_MCUX_LPTMR select SYSTEM_TIMER_HAS_DISABLE_SUPPORT help This module implements a kernel device driver for the NXP MCUX Low Power Timer (LPTMR) and provides the standard "system clock driver" interfaces. config MCUX_KINETIS_LPTMR bool default y depends on DT_HAS_NXP_KINETIS_LPTMR_ENABLED select DEPRECATED help The compatible string "nxp,kinetis-lptmr" should be swiched to "nxp,lptmr" in DT. The former will be removed eventually. ```
/content/code_sandbox/drivers/timer/Kconfig.mcux_lptmr
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
183
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_TIMER_XLNX_PSTTC_TIMER_PRIV_H_ #define ZEPHYR_DRIVERS_TIMER_XLNX_PSTTC_TIMER_PRIV_H_ /* * Refer to the "Zynq UltraScale+ Device Technical Reference Manual" document * from Xilinx for more information on this peripheral. */ /* * Triple-timer Counter (TTC) Register Offsets */ /* Clock Control Register */ #define XTTCPS_CLK_CNTRL_OFFSET 0x00000000U /* Counter Control Register*/ #define XTTCPS_CNT_CNTRL_OFFSET 0x0000000CU /* Current Counter Value */ #define XTTCPS_COUNT_VALUE_OFFSET 0x00000018U /* Interval Count Value */ #define XTTCPS_INTERVAL_VAL_OFFSET 0x00000024U /* Match 1 value */ #define XTTCPS_MATCH_0_OFFSET 0x00000030U /* Match 2 value */ #define XTTCPS_MATCH_1_OFFSET 0x0000003CU /* Match 3 value */ #define XTTCPS_MATCH_2_OFFSET 0x00000048U /* Interrupt Status Register */ #define XTTCPS_ISR_OFFSET 0x00000054U /* Interrupt Enable Register */ #define XTTCPS_IER_OFFSET 0x00000060U /* * Clock Control Register Definitions */ /* Prescale enable */ #define XTTCPS_CLK_CNTRL_PS_EN_MASK 0x00000001U /* Prescale value */ #define XTTCPS_CLK_CNTRL_PS_VAL_MASK 0x0000001EU /* Prescale shift */ #define XTTCPS_CLK_CNTRL_PS_VAL_SHIFT 1U /* Prescale disable */ #define XTTCPS_CLK_CNTRL_PS_DISABLE 16U /* Clock source */ #define XTTCPS_CLK_CNTRL_SRC_MASK 0x00000020U /* External Clock edge */ #define XTTCPS_CLK_CNTRL_EXT_EDGE_MASK 0x00000040U /* * Counter Control Register Definitions */ /* Disable the counter */ #define XTTCPS_CNT_CNTRL_DIS_MASK 0x00000001U /* Interval mode */ #define XTTCPS_CNT_CNTRL_INT_MASK 0x00000002U /* Decrement mode */ #define XTTCPS_CNT_CNTRL_DECR_MASK 0x00000004U /* Match mode */ #define XTTCPS_CNT_CNTRL_MATCH_MASK 0x00000008U /* Reset counter */ #define XTTCPS_CNT_CNTRL_RST_MASK 0x00000010U /* Enable waveform */ #define XTTCPS_CNT_CNTRL_EN_WAVE_MASK 0x00000020U /* Waveform polarity */ #define XTTCPS_CNT_CNTRL_POL_WAVE_MASK 0x00000040U /* Reset value */ #define XTTCPS_CNT_CNTRL_RESET_VALUE 0x00000021U /* * Interrupt Register Definitions */ /* Interval Interrupt */ #define XTTCPS_IXR_INTERVAL_MASK 0x00000001U /* Match 1 Interrupt */ #define XTTCPS_IXR_MATCH_0_MASK 0x00000002U /* Match 2 Interrupt */ #define XTTCPS_IXR_MATCH_1_MASK 0x00000004U /* Match 3 Interrupt */ #define XTTCPS_IXR_MATCH_2_MASK 0x00000008U /* Counter Overflow */ #define XTTCPS_IXR_CNT_OVR_MASK 0x00000010U /* All valid Interrupts */ #define XTTCPS_IXR_ALL_MASK 0x0000001FU /* * Constants */ /* Maximum value of interval counter */ #define XTTC_MAX_INTERVAL_COUNT 0xFFFFFFFFU #endif /* ZEPHYR_DRIVERS_TIMER_XLNX_PSTTC_TIMER_PRIV_H_ */ ```
/content/code_sandbox/drivers/timer/xlnx_psttc_timer_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
813
```c /* * */ #include <zephyr/init.h> #include <zephyr/drivers/timer/arm_arch_timer.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/irq.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <zephyr/arch/cpu.h> #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME /* precompute CYC_PER_TICK at driver init to avoid runtime double divisions */ static uint32_t cyc_per_tick; #define CYC_PER_TICK cyc_per_tick #else #define CYC_PER_TICK (uint32_t)(sys_clock_hw_cycles_per_sec() \ / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #endif #if defined(CONFIG_GDBSTUB) /* When interactively debugging, the cycle diff can overflow 32-bit variable */ #define cycle_diff_t uint64_t #else /* the unsigned long cast limits divisors to native CPU register width */ #define cycle_diff_t unsigned long #endif #define CYCLE_DIFF_MAX (~(cycle_diff_t)0) /* * We have two constraints on the maximum number of cycles we can wait for. * * 1) sys_clock_announce() accepts at most INT32_MAX ticks. * * 2) The number of cycles between two reports must fit in a cycle_diff_t * variable before converting it to ticks. * * Then: * * 3) Pick the smallest between (1) and (2). * * 4) Take into account some room for the unavoidable IRQ servicing latency. * Let's use 3/4 of the max range. * * Finally let's add the LSB value to the result so to clear out a bunch of * consecutive set bits coming from the original max values to produce a * nicer literal for assembly generation. */ #define CYCLES_MAX_1 ((uint64_t)INT32_MAX * (uint64_t)CYC_PER_TICK) #define CYCLES_MAX_2 ((uint64_t)CYCLE_DIFF_MAX) #define CYCLES_MAX_3 MIN(CYCLES_MAX_1, CYCLES_MAX_2) #define CYCLES_MAX_4 (CYCLES_MAX_3 / 2 + CYCLES_MAX_3 / 4) #define CYCLES_MAX_5 (CYCLES_MAX_4 + LSB_GET(CYCLES_MAX_4)) #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME /* precompute CYCLES_MAX at driver init to avoid runtime double divisions */ static uint64_t cycles_max; #define CYCLES_MAX cycles_max #else #define CYCLES_MAX CYCLES_MAX_5 #endif static struct k_spinlock lock; static uint64_t last_cycle; static uint64_t last_tick; static uint32_t last_elapsed; #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = ARM_ARCH_TIMER_IRQ; #endif static void arm_arch_timer_compare_isr(const void *arg) { ARG_UNUSED(arg); k_spinlock_key_t key = k_spin_lock(&lock); #ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 /* * Workaround required for Cortex-A9 MPCore erratum 740657 * comp. ARM Cortex-A9 processors Software Developers Errata Notice, * ARM document ID032315. */ if (!arm_arch_timer_get_int_status()) { /* * If the event flag is not set, this is a spurious interrupt. * DO NOT modify the compare register's value, DO NOT announce * elapsed ticks! */ k_spin_unlock(&lock, key); return; } #endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */ uint64_t curr_cycle = arm_arch_timer_count(); uint64_t delta_cycles = curr_cycle - last_cycle; uint32_t delta_ticks = (cycle_diff_t)delta_cycles / CYC_PER_TICK; last_cycle += (cycle_diff_t)delta_ticks * CYC_PER_TICK; last_tick += delta_ticks; last_elapsed = 0; if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { uint64_t next_cycle = last_cycle + CYC_PER_TICK; arm_arch_timer_set_compare(next_cycle); arm_arch_timer_set_irq_mask(false); } else { arm_arch_timer_set_irq_mask(true); #ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 /* * In tickless mode, the compare register is normally not * updated from within the ISR. Yet, to work around the timer's * erratum, a new value *must* be written while the interrupt * is being processed before the interrupt is acknowledged * by the handling interrupt controller. */ arm_arch_timer_set_compare(~0ULL); } /* * Clear the event flag so that in case the erratum strikes (the timer's * vector will still be indicated as pending by the GIC's pending register * after this ISR has been executed) the error will be detected by the * check performed upon entry of the ISR -> the event flag is not set, * therefore, no actual hardware interrupt has occurred. */ arm_arch_timer_clear_int_status(); #else } #endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */ k_spin_unlock(&lock, key); sys_clock_announce(delta_ticks); } void sys_clock_set_timeout(int32_t ticks, bool idle) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } if (idle && ticks == K_TICKS_FOREVER) { return; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t next_cycle; if (ticks == K_TICKS_FOREVER) { next_cycle = last_cycle + CYCLES_MAX; } else { next_cycle = (last_tick + last_elapsed + ticks) * CYC_PER_TICK; if ((next_cycle - last_cycle) > CYCLES_MAX) { next_cycle = last_cycle + CYCLES_MAX; } } arm_arch_timer_set_compare(next_cycle); arm_arch_timer_set_irq_mask(false); k_spin_unlock(&lock, key); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&lock); uint64_t curr_cycle = arm_arch_timer_count(); uint64_t delta_cycles = curr_cycle - last_cycle; uint32_t delta_ticks = (cycle_diff_t)delta_cycles / CYC_PER_TICK; last_elapsed = delta_ticks; k_spin_unlock(&lock, key); return delta_ticks; } uint32_t sys_clock_cycle_get_32(void) { return (uint32_t)arm_arch_timer_count(); } uint64_t sys_clock_cycle_get_64(void) { return arm_arch_timer_count(); } #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT void arch_busy_wait(uint32_t usec_to_wait) { if (usec_to_wait == 0) { return; } uint64_t start_cycles = arm_arch_timer_count(); uint64_t cycles_to_wait = sys_clock_hw_cycles_per_sec() / USEC_PER_SEC * usec_to_wait; for (;;) { uint64_t current_cycles = arm_arch_timer_count(); /* this handles the rollover on an unsigned 32-bit value */ if ((current_cycles - start_cycles) >= cycles_to_wait) { break; } } } #endif #ifdef CONFIG_SMP void smp_timer_init(void) { /* * set the initial status of timer0 of each secondary core */ arm_arch_timer_set_compare(last_cycle + CYC_PER_TICK); arm_arch_timer_enable(true); irq_enable(ARM_ARCH_TIMER_IRQ); arm_arch_timer_set_irq_mask(false); } #endif static int sys_clock_driver_init(void) { IRQ_CONNECT(ARM_ARCH_TIMER_IRQ, ARM_ARCH_TIMER_PRIO, arm_arch_timer_compare_isr, NULL, ARM_ARCH_TIMER_FLAGS); arm_arch_timer_init(); #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME cyc_per_tick = sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC; cycles_max = CYCLES_MAX_5; #endif arm_arch_timer_enable(true); last_tick = arm_arch_timer_count() / CYC_PER_TICK; last_cycle = last_tick * CYC_PER_TICK; arm_arch_timer_set_compare(last_cycle + CYC_PER_TICK); irq_enable(ARM_ARCH_TIMER_IRQ); arm_arch_timer_set_irq_mask(false); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/arm_arch_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,819
```c /* * */ #define DT_DRV_COMPAT ambiq_stimer /** * @file * @brief Ambiq Apollo STIMER-based sys_clock driver * */ #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/irq.h> #include <zephyr/spinlock.h> /* ambiq-sdk includes */ #include <am_mcu_apollo.h> #define COUNTER_MAX UINT32_MAX #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1) #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK) #define MIN_DELAY 1 #define TIMER_IRQ (DT_INST_IRQN(0)) #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = TIMER_IRQ; #endif /* Elapsed ticks since the previous kernel tick was announced, It will get accumulated every time * stimer_isr is triggered, or sys_clock_set_timeout/sys_clock_elapsed API is called. * It will be cleared after sys_clock_announce is called,. */ static uint32_t g_tick_elapsed; /* Value of STIMER counter when the previous timer API is called, this value is * aligned to tick boundary. It is updated along with the g_tick_elapsed value. */ static uint32_t g_last_time_stamp; /* Spinlock to sync between Compare ISR and update of Compare register */ static struct k_spinlock g_lock; static void update_tick_counter(void) { /* Read current cycle count. */ uint32_t now = am_hal_stimer_counter_get(); /* If current cycle count is smaller than the last time stamp, a counter overflow happened. * We need to extend the current counter value to 64 bits and add it with 0xFFFFFFFF * to get the correct elapsed cycles. */ uint64_t now_64 = (g_last_time_stamp <= now) ? (uint64_t)now : (uint64_t)now + COUNTER_MAX; /* Get elapsed cycles */ uint32_t elapsed_cycle = (now_64 - g_last_time_stamp); /* Get elapsed ticks. */ uint32_t dticks = elapsed_cycle / CYC_PER_TICK; g_last_time_stamp += dticks * CYC_PER_TICK; g_tick_elapsed += dticks; } static void stimer_isr(const void *arg) { ARG_UNUSED(arg); uint32_t irq_status = am_hal_stimer_int_status_get(false); if (irq_status & AM_HAL_STIMER_INT_COMPAREA) { am_hal_stimer_int_clear(AM_HAL_STIMER_INT_COMPAREA); k_spinlock_key_t key = k_spin_lock(&g_lock); /*Calculate the elapsed ticks based on the current cycle count*/ update_tick_counter(); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Get the counter value to trigger the next tick interrupt. */ uint64_t next = (uint64_t)g_last_time_stamp + CYC_PER_TICK; /* Read current cycle count. */ uint32_t now = am_hal_stimer_counter_get(); /* If current cycle count is smaller than the last time stamp, a counter * overflow happened. We need to extend the current counter value to 64 bits * and add 0xFFFFFFFF to get the correct elapsed cycles. */ uint64_t now_64 = (g_last_time_stamp <= now) ? (uint64_t)now : (uint64_t)now + COUNTER_MAX; uint32_t delta = (now_64 + MIN_DELAY < next) ? (next - now_64) : MIN_DELAY; /* Set delta. */ am_hal_stimer_compare_delta_set(0, delta); } k_spin_unlock(&g_lock, key); sys_clock_announce(g_tick_elapsed); g_tick_elapsed = 0; } } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } /* Adjust the ticks to the range of [1, MAX_TICKS]. */ ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = CLAMP(ticks, 1, (int32_t)MAX_TICKS); k_spinlock_key_t key = k_spin_lock(&g_lock); /* Update the internal tick counter*/ update_tick_counter(); /* Get current hardware counter value.*/ uint32_t now = am_hal_stimer_counter_get(); /* last: the last recorded counter value. * now_64: current counter value. Extended to uint64_t to easy the handing of hardware * counter overflow. * next: counter values where to trigger the scheduled timeout. * last < now_64 < next */ uint64_t last = (uint64_t)g_last_time_stamp; uint64_t now_64 = (g_last_time_stamp <= now) ? (uint64_t)now : (uint64_t)now + COUNTER_MAX; uint64_t next = now_64 + ticks * CYC_PER_TICK; uint32_t gap = next - last; uint32_t gap_aligned = (gap / CYC_PER_TICK) * CYC_PER_TICK; uint64_t next_aligned = last + gap_aligned; uint32_t delta = next_aligned - now_64; if (delta <= MIN_DELAY) { /*If the delta value is smaller than MIN_DELAY, trigger a interrupt immediately*/ am_hal_stimer_int_set(AM_HAL_STIMER_INT_COMPAREA); } else { am_hal_stimer_compare_delta_set(0, delta); } k_spin_unlock(&g_lock, key); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } k_spinlock_key_t key = k_spin_lock(&g_lock); update_tick_counter(); k_spin_unlock(&g_lock, key); return g_tick_elapsed; } uint32_t sys_clock_cycle_get_32(void) { return am_hal_stimer_counter_get(); } static int stimer_init(void) { uint32_t oldCfg; oldCfg = am_hal_stimer_config(AM_HAL_STIMER_CFG_FREEZE); #if defined(CONFIG_SOC_SERIES_APOLLO3X) am_hal_stimer_config((oldCfg & ~(AM_HAL_STIMER_CFG_FREEZE | CTIMER_STCFG_CLKSEL_Msk)) | AM_HAL_STIMER_XTAL_32KHZ | AM_HAL_STIMER_CFG_COMPARE_A_ENABLE); #else am_hal_stimer_config((oldCfg & ~(AM_HAL_STIMER_CFG_FREEZE | STIMER_STCFG_CLKSEL_Msk)) | AM_HAL_STIMER_XTAL_32KHZ | AM_HAL_STIMER_CFG_COMPARE_A_ENABLE); #endif g_last_time_stamp = am_hal_stimer_counter_get(); NVIC_ClearPendingIRQ(TIMER_IRQ); IRQ_CONNECT(TIMER_IRQ, 0, stimer_isr, 0, 0); irq_enable(TIMER_IRQ); am_hal_stimer_int_enable(AM_HAL_STIMER_INT_COMPAREA); /* Start timer with period CYC_PER_TICK if tickless is not enabled */ if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { am_hal_stimer_compare_delta_set(0, CYC_PER_TICK); } return 0; } SYS_INIT(stimer_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/ambiq_stimer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,606
```unknown # Common RTC configuration if NRF_RTC_TIMER || NRF_GRTC_TIMER choice prompt "Clock startup policy" default SYSTEM_CLOCK_WAIT_FOR_STABILITY config SYSTEM_CLOCK_NO_WAIT bool "No wait" help System clock source is initiated but does not wait for clock readiness. When this option is picked, system clock may not be ready when code relying on kernel API is executed. Requested timeouts will be prolonged by the remaining startup time. config SYSTEM_CLOCK_WAIT_FOR_AVAILABILITY bool "Wait for availability" help System clock source initialization waits until clock is available. In some systems, clock initially runs from less accurate source which has faster startup time and then seamlessly switches to the target clock source when it is ready. When this option is picked, system clock is available after system clock driver initialization but it may be less accurate. Option is equivalent to waiting for stability if clock source does not have intermediate state. config SYSTEM_CLOCK_WAIT_FOR_STABILITY bool "Wait for stability" help System clock source initialization waits until clock is stable. When this option is picked, system clock is available and stable after system clock driver initialization. endchoice endif # NRF_RTC_TIMER || NRF_GRTC_TIMER ```
/content/code_sandbox/drivers/timer/Kconfig.nrf_xrtc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
269
```c /* * */ /* * This driver uses two independent GPTIMER subtimers in the following way: * - subtimer 0 generates periodic interrupts and the ISR announces ticks. * - subtimer 1 is used as up-counter. */ #define DT_DRV_COMPAT gaisler_gptimer #include <zephyr/init.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/irq.h> #include <zephyr/sys_clock.h> /* GPTIMER subtimer increments each microsecond. */ #define PRESCALER (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 1000000) /* GPTIMER Timer instance */ struct gptimer_timer_regs { uint32_t counter; uint32_t reload; uint32_t ctrl; uint32_t latch; }; /* A GPTIMER can have maximum of 7 subtimers. */ #define GPTIMER_MAX_SUBTIMERS 7 /* GPTIMER common registers */ struct gptimer_regs { uint32_t scaler_value; uint32_t scaler_reload; uint32_t cfg; uint32_t latch_cfg; struct gptimer_timer_regs timer[GPTIMER_MAX_SUBTIMERS]; }; #define GPTIMER_CTRL_WN (1 << 7) #define GPTIMER_CTRL_IP (1 << 4) #define GPTIMER_CTRL_IE (1 << 3) #define GPTIMER_CTRL_LD (1 << 2) #define GPTIMER_CTRL_RS (1 << 1) #define GPTIMER_CTRL_EN (1 << 0) #define GPTIMER_CFG_EL (1 << 11) #define GPTIMER_CFG_DF (1 << 9) #define GPTIMER_CFG_SI (1 << 8) #define GPTIMER_CFG_IRQ (0x1f << 3) #define GPTIMER_CFG_TIMERS (7 << 0) static volatile struct gptimer_regs *get_regs(void) { return (struct gptimer_regs *) DT_INST_REG_ADDR(0); } static int get_timer_irq(void) { return DT_INST_IRQN(0); } static uint32_t gptimer_ctrl_clear_ip; static void timer_isr(const void *unused) { ARG_UNUSED(unused); volatile struct gptimer_regs *regs = get_regs(); volatile struct gptimer_timer_regs *tmr = &regs->timer[0]; uint32_t ctrl; ctrl = tmr->ctrl; if ((ctrl & GPTIMER_CTRL_IP) == 0) { return; /* interrupt not for us */ } /* Clear pending */ tmr->ctrl = GPTIMER_CTRL_IE | GPTIMER_CTRL_RS | GPTIMER_CTRL_EN | gptimer_ctrl_clear_ip; sys_clock_announce(1); } uint32_t sys_clock_elapsed(void) { return 0; } uint32_t sys_clock_cycle_get_32(void) { volatile struct gptimer_regs *regs = get_regs(); volatile struct gptimer_timer_regs *tmr = &regs->timer[1]; uint32_t counter = tmr->counter; return (0 - counter) * PRESCALER; } static void init_downcounter(volatile struct gptimer_timer_regs *tmr) { tmr->reload = 0xFFFFFFFF; tmr->ctrl = GPTIMER_CTRL_LD | GPTIMER_CTRL_RS | GPTIMER_CTRL_EN; } static int sys_clock_driver_init(void) { const int timer_interrupt = get_timer_irq(); volatile struct gptimer_regs *regs = get_regs(); volatile struct gptimer_timer_regs *tmr = &regs->timer[0]; init_downcounter(&regs->timer[1]); /* Stop timer and probe how CTRL_IP is cleared (write 1 or 0). */ tmr->ctrl = GPTIMER_CTRL_IP; if ((tmr->ctrl & GPTIMER_CTRL_IP) == 0) { /* IP bit is cleared by setting it to 1. */ gptimer_ctrl_clear_ip = GPTIMER_CTRL_IP; } /* Configure timer scaler for 1 MHz subtimer tick */ regs->scaler_reload = PRESCALER - 1; tmr->reload = 1000000U / CONFIG_SYS_CLOCK_TICKS_PER_SEC - 1; tmr->ctrl = GPTIMER_CTRL_IE | GPTIMER_CTRL_LD | GPTIMER_CTRL_RS | GPTIMER_CTRL_EN; irq_connect_dynamic(timer_interrupt, 0, timer_isr, NULL, 0); irq_enable(timer_interrupt); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/leon_gptimer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
972
```unknown config RCAR_CMT_TIMER bool "Renesas RCar cmt timer" default y depends on DT_HAS_RENESAS_RCAR_CMT_ENABLED help This module implements a kernel device driver for the Renesas RCAR platform provides the standard "system clock driver" interfaces. If unchecked, no timer will be used. ```
/content/code_sandbox/drivers/timer/Kconfig.rcar_cmt
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
77
```unknown # STM32 LPTIM configuration options DT_CHOSEN_STDBY_TIMER := st,lptim-stdby-timer menuconfig STM32_LPTIM_TIMER bool "STM32 Low Power Timer [EXPERIMENTAL]" default y depends on DT_HAS_ST_STM32_LPTIM_ENABLED depends on CLOCK_CONTROL && PM select TICKLESS_CAPABLE select EXPERIMENTAL help This module implements a kernel device driver for the LowPower Timer and provides the standard "system clock driver" interfaces. if STM32_LPTIM_TIMER choice STM32_LPTIM_CLOCK prompt "LPTIM clock value configuration" help This option is deprecated and configuration of LPTIM domain clock using devicetree should be preferred. config STM32_LPTIM_CLOCK_LSI bool "LSI" help Use LSI as LPTIM clock config STM32_LPTIM_CLOCK_LSE bool "LSE" help Use LSE as LPTIM clock endchoice config STM32_LPTIM_CLOCK int default 32768 if STM32_LPTIM_CLOCK_LSE default 32000 if STM32_LPTIM_CLOCK_LSI config STM32_LPTIM_TIMEBASE hex "LPTIM AutoReload value" default 0xFFFF if STM32_LPTIM_CLOCK_LSE default 0xF9FF if STM32_LPTIM_CLOCK_LSI config STM32_LPTIM_TICK_FREQ_RATIO_OVERRIDE bool "Override tick to freq ratio check" default y if ZTEST help For LPTIM configuration, a specific tick freq is advised depending on LPTIM input clock: - LSI(32KHz): 4000 ticks/sec - LSE(32678): 4096 ticks/sec To prevent misconfigurations, a dedicated check is implemented in the driver. This options allows to override this check config STM32_LPTIM_STDBY_TIMER bool default $(dt_chosen_enabled,$(DT_CHOSEN_STDBY_TIMER)) depends on COUNTER depends on TICKLESS_KERNEL select EXPERIMENTAL help Use an additional timer while entering Standby mode. There are chips e.g. STM32WBAX family that use LPTIM as a system timer, but LPTIM is not clocked in standby mode. These chips usually have another timer that is not stopped, but it has lower frequency e.g. RTC, thus it can't be used as a main system timer. Same approach is used on STM32U5 and STOP3 mode. Use the Standby timer for timeout (wakeup) when the system is entering Standby state. The chosen Standby timer node has to support setting alarm from the counter API. endif # STM32_LPTIM_TIMER ```
/content/code_sandbox/drivers/timer/Kconfig.stm32_lptim
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
614
```c /* * * Based on: * */ #define DT_DRV_COMPAT silabs_gecko_burtc /** * @file * @brief SiLabs Gecko BURTC-based sys_clock driver * */ #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys_clock.h> #include <zephyr/irq.h> #include <zephyr/spinlock.h> #include <zephyr/logging/log.h> #include "em_device.h" #include "em_cmu.h" #include "em_burtc.h" LOG_MODULE_REGISTER(gecko_burtc_timer); /* Maximum time interval between timer interrupts (in hw_cycles) */ #define MAX_TIMEOUT_CYC (UINT32_MAX >> 1) /* * Mininum time interval between now and IRQ firing that can be scheduled. * The main cause for this is LFSYNC register update, which requires several * LF clk cycles for synchronization. * Seee e.g. "4.2.4.4.4 LFSYNC Registers" in "EFR32xG22 Reference Manual" */ #define MIN_DELAY_CYC (6u) #define TIMER_IRQ (DT_INST_IRQN(0)) #if defined(CONFIG_TEST) /* See tests/kernel/context */ const int32_t z_sys_timer_irq_for_test = TIMER_IRQ; #endif /* With CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME, that's where we * should write hw_cycles timer clock frequency upon init */ extern int z_clock_hw_cycles_per_sec; /* Number of hw_cycles clocks per 1 kernel tick */ static uint32_t g_cyc_per_tick; /* MAX_TIMEOUT_CYC expressed as ticks */ static uint32_t g_max_timeout_ticks; /* Value of BURTC counter when the previous kernel tick was announced */ static atomic_t g_last_count; /* Spinlock to sync between Compare ISR and update of Compare register */ static struct k_spinlock g_lock; /* Set to true when timer is initialized */ static bool g_init; static void burtc_isr(const void *arg) { ARG_UNUSED(arg); /* Clear the interrupt */ BURTC_IntClear(BURTC_IF_COMP); uint32_t curr = BURTC_CounterGet(); /* NOTE: this is the only place where g_last_count is modified, * so we don't need to do make the whole read-and-modify atomic, just * writing it behind the memory barrier is enough */ uint32_t prev = atomic_get(&g_last_count); /* How many ticks have we not announced since the last announcement */ uint32_t unannounced = (curr - prev) / g_cyc_per_tick; atomic_set(&g_last_count, prev + unannounced * g_cyc_per_tick); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { /* Counter value on which announcement should be made */ uint32_t next = prev + g_cyc_per_tick; /* `next` can be too close in the future since we're trying to * announce the very next tick - in that case we skip one and * announce the one after it instead */ if ((next - curr) < MIN_DELAY_CYC) { next += g_cyc_per_tick; } BURTC_CompareSet(0, next); } sys_clock_announce(unannounced); } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } /* * calculate 'ticks' value that specifies which tick to announce, * beginning from the closest upcoming one: * 0 - announce upcoming tick itself * 1 - skip upcoming one, but announce the one after it, etc. */ ticks = (ticks == K_TICKS_FOREVER) ? g_max_timeout_ticks : ticks; ticks = CLAMP(ticks - 1, 0, g_max_timeout_ticks); k_spinlock_key_t key = k_spin_lock(&g_lock); uint32_t curr = BURTC_CounterGet(); uint32_t prev = atomic_get(&g_last_count); /* How many ticks have we not announced since the last announcement */ uint32_t unannounced = (curr - prev) / g_cyc_per_tick; /* Which tick to announce (counting from the last announced one) */ uint32_t to_announce = unannounced + ticks + 1; /* Force maximum interval between announcements. If we sit without * announcements for too long, counter will roll over and we'll lose * track of unannounced ticks. */ to_announce = MIN(to_announce, g_max_timeout_ticks); /* Counter value on which announcement should be made */ uint32_t next = prev + to_announce * g_cyc_per_tick; /* `next` can be too close in the future if we're trying to announce * the very next tick - in that case we skip one and announce the one * after it instead */ if ((next - curr) < MIN_DELAY_CYC) { next += g_cyc_per_tick; } BURTC_CompareSet(0, next); k_spin_unlock(&g_lock, key); } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL) || !g_init) { return 0; } else { return (BURTC_CounterGet() - g_last_count) / g_cyc_per_tick; } } uint32_t sys_clock_cycle_get_32(void) { /* API note: this function is unrelated to kernel ticks, it returns * a value of some 32-bit hw_cycles counter which counts with * z_clock_hw_cycles_per_sec frequency */ if (!g_init) { return 0; } else { return BURTC_CounterGet(); } } static int burtc_init(void) { uint32_t hw_clock_freq; BURTC_Init_TypeDef init = BURTC_INIT_DEFAULT; /* Enable clock for BURTC CSRs on APB */ CMU_ClockEnable(cmuClock_BURTC, true); /* Configure BURTC LF clocksource according to Kconfig */ #if defined(CONFIG_CMU_BURTCCLK_LFXO) CMU_ClockSelectSet(cmuClock_BURTC, cmuSelect_LFXO); #elif defined(CONFIG_CMU_BURTCCLK_LFRCO) CMU_ClockSelectSet(cmuClock_BURTC, cmuSelect_LFRCO); #elif defined(CONFIG_CMU_BURTCCLK_ULFRCO) CMU_ClockSelectSet(cmuClock_BURTC, cmuSelect_ULFRCO); #else #error "Unsupported BURTC clock specified" #endif /* Calculate timing constants and init BURTC */ hw_clock_freq = CMU_ClockFreqGet(cmuClock_BURTC); z_clock_hw_cycles_per_sec = hw_clock_freq; BUILD_ASSERT(CONFIG_SYS_CLOCK_TICKS_PER_SEC > 0, "Invalid CONFIG_SYS_CLOCK_TICKS_PER_SEC value"); g_cyc_per_tick = hw_clock_freq / CONFIG_SYS_CLOCK_TICKS_PER_SEC; __ASSERT(g_cyc_per_tick >= MIN_DELAY_CYC, "%u cycle-long tick is too short to be scheduled " "(min is %u). Config: SYS_CLOCK_TICKS_PER_SEC is " "%d and timer frequency is %u", g_cyc_per_tick, MIN_DELAY_CYC, CONFIG_SYS_CLOCK_TICKS_PER_SEC, hw_clock_freq); g_max_timeout_ticks = MAX_TIMEOUT_CYC / g_cyc_per_tick; init.clkDiv = 1; init.start = false; BURTC_Init(&init); g_init = true; /* Enable compare match interrupt */ BURTC_IntClear(BURTC_IF_COMP); BURTC_IntEnable(BURTC_IF_COMP); NVIC_ClearPendingIRQ(TIMER_IRQ); IRQ_CONNECT(TIMER_IRQ, DT_INST_IRQ(0, priority), burtc_isr, 0, 0); irq_enable(TIMER_IRQ); /* Start the timer and announce 1 kernel tick */ atomic_set(&g_last_count, 0); BURTC_CompareSet(0, g_cyc_per_tick); BURTC_SyncWait(); BURTC->CNT = 0; BURTC_Start(); return 0; } SYS_INIT(burtc_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/gecko_burtc_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,824
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/smartbond_clock_control.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/sys_clock.h> #include <zephyr/spinlock.h> #include <cmsis_core.h> #include <zephyr/irq.h> #include <da1469x_pdc.h> #define COUNTER_SPAN BIT(24) #define CYC_PER_TICK k_ticks_to_cyc_ceil32(1) #define TICK_TO_CYC(tick) k_ticks_to_cyc_ceil32(tick) #define CYC_TO_TICK(cyc) k_cyc_to_ticks_ceil32(cyc) #define MAX_TICKS (((COUNTER_SPAN / 2) - CYC_PER_TICK) / (CYC_PER_TICK)) #define SMARTBOND_CLOCK_CONTROLLER DEVICE_DT_GET(DT_NODELABEL(osc)) /* Margin values are based on DA1469x characterization data */ #define RC32K_FREQ_POSITIVE_MARGIN_DUE_TO_VOLTAGE (675) #define RC32K_FREQ_MARGIN_DUE_TO_TEMPERATURE (450) static uint32_t last_timer_val_reg; static uint32_t timer_val_31_24; static uint32_t last_isr_val; static uint32_t last_isr_val_rounded; static uint32_t announced_ticks; static uint32_t get_rc32k_max_frequency(void) { /* According to DA1469x datasheet */ uint32_t r32k_frequency = 37000; clock_control_get_rate(SMARTBOND_CLOCK_CONTROLLER, (clock_control_subsys_t)SMARTBOND_CLK_RC32K, &r32k_frequency); r32k_frequency += RC32K_FREQ_POSITIVE_MARGIN_DUE_TO_VOLTAGE + RC32K_FREQ_MARGIN_DUE_TO_TEMPERATURE; return r32k_frequency; } static void set_reload(uint32_t val) { TIMER2->TIMER2_RELOAD_REG = val & TIMER2_TIMER2_RELOAD_REG_TIM_RELOAD_Msk; } static uint32_t timer_val_32(void) { uint32_t timer_val_reg; uint32_t val; timer_val_reg = TIMER2->TIMER2_TIMER_VAL_REG & TIMER2_TIMER2_TIMER_VAL_REG_TIM_TIMER_VALUE_Msk; if (timer_val_reg < last_timer_val_reg) { timer_val_31_24 += COUNTER_SPAN; } last_timer_val_reg = timer_val_reg; val = timer_val_31_24 + timer_val_reg; return val; } static uint32_t timer_val_32_noupdate(void) { uint32_t timer_val_reg; uint32_t val; timer_val_reg = TIMER2->TIMER2_TIMER_VAL_REG & TIMER2_TIMER2_TIMER_VAL_REG_TIM_TIMER_VALUE_Msk; val = timer_val_31_24 + timer_val_reg; if (timer_val_reg < last_timer_val_reg) { val += COUNTER_SPAN; } return val; } void sys_clock_set_timeout(int32_t ticks, bool idle) { uint32_t target_val; uint32_t timer_val; if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return; } if (ticks == K_TICKS_FOREVER) { /* FIXME we could disable timer here */ } /* * When Watchdog is NOT enabled but power management is, system * starts watchdog before PD_SYS is powered off. * Watchdog default reload value is 0x1FFF (~82s for RC32K and 172s for RCX). * After this time watchdog will reset system if not woken up before. * When Watchdog is not configured power management freezes watchdog * as soon as system is awaken. Following code makes sure that * system never goes to sleep for longer time that watchdog reload value. */ if (!IS_ENABLED(CONFIG_WDT_SMARTBOND) && IS_ENABLED(CONFIG_PM)) { uint32_t watchdog_expire_ticks; if (CRG_TOP->CLK_RCX_REG & CRG_TOP_CLK_RCX_REG_RCX_ENABLE_Msk) { /* * When LP clock is RCX, the watchdog is clocked by RCX clock * divided by 320. */ watchdog_expire_ticks = SYS_WDOG->WATCHDOG_REG * 320; } else { /* * When LP clock is not RCX, the watchdog is clocked by RC32K * divided by 320. In this case watchdog value to LP clock * ticks must be calculated according to XTAL32K frequency and * RC32K maximum frequency. */ watchdog_expire_ticks = SYS_WDOG->WATCHDOG_REG * CONFIG_SYS_CLOCK_TICKS_PER_SEC / (get_rc32k_max_frequency() / 320); } if (watchdog_expire_ticks - 2 < ticks) { ticks = watchdog_expire_ticks - 2; } } ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS); timer_val = timer_val_32_noupdate(); /* Calculate target timer value and align to full tick */ target_val = timer_val + TICK_TO_CYC(ticks); target_val = ((target_val + CYC_PER_TICK - 1) / CYC_PER_TICK) * CYC_PER_TICK; set_reload(target_val); /* * If time was so small that it already fired or should fire * just now, mark interrupt as pending to avoid losing timer event. * Condition is true when target_val (point in time that should be * used for wakeup) is behind timer value or is equal to it. * In that case we don't know if reload value was set in time or * not but time expired anyway so make sure that interrupt is pending. */ if ((int32_t)(target_val - timer_val_32_noupdate() - 1) < 0) { NVIC_SetPendingIRQ(TIMER2_IRQn); } } uint32_t sys_clock_elapsed(void) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { return 0; } return CYC_TO_TICK(timer_val_32_noupdate() - last_isr_val); } uint32_t sys_clock_cycle_get_32(void) { return timer_val_32_noupdate(); } void sys_clock_idle_exit(void) { TIMER2->TIMER2_CTRL_REG |= TIMER2_TIMER2_CTRL_REG_TIM_EN_Msk; } void sys_clock_disable(void) { TIMER2->TIMER2_CTRL_REG &= ~TIMER2_TIMER2_CTRL_REG_TIM_EN_Msk; } static void timer2_isr(const void *arg) { uint32_t val; int32_t delta; int32_t dticks; ARG_UNUSED(arg); TIMER2->TIMER2_CLEAR_IRQ_REG = 1; val = timer_val_32(); delta = (int32_t)(val - last_isr_val_rounded); last_isr_val = val; dticks = CYC_TO_TICK(delta); last_isr_val_rounded += TICK_TO_CYC(dticks); announced_ticks += dticks; sys_clock_announce(dticks); } static int sys_clock_driver_init(void) { #if CONFIG_PM uint8_t pdc_idx; uint8_t en_xtal; en_xtal = DT_NODE_HAS_STATUS(DT_NODELABEL(xtal32m), okay) ? MCU_PDC_EN_XTAL : 0; /* Enable wakeup by TIMER2 */ pdc_idx = da1469x_pdc_add(MCU_PDC_TRIGGER_TIMER2, MCU_PDC_MASTER_M33, en_xtal); __ASSERT_NO_MSG(pdc_idx >= 0); da1469x_pdc_set(pdc_idx); da1469x_pdc_ack(pdc_idx); #endif TIMER2->TIMER2_CTRL_REG = 0; TIMER2->TIMER2_PRESCALER_REG = 0; TIMER2->TIMER2_CTRL_REG |= TIMER2_TIMER2_CTRL_REG_TIM_CLK_EN_Msk; TIMER2->TIMER2_CTRL_REG |= TIMER2_TIMER2_CTRL_REG_TIM_FREE_RUN_MODE_EN_Msk | TIMER2_TIMER2_CTRL_REG_TIM_IRQ_EN_Msk | TIMER2_TIMER2_CTRL_REG_TIM_EN_Msk; IRQ_CONNECT(TIMER2_IRQn, _IRQ_PRIO_OFFSET, timer2_isr, 0, 0); irq_enable(TIMER2_IRQn); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/smartbond_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,810
```unknown config NATIVE_POSIX_TIMER bool "(POSIX) native_sim/posix timer driver" default y depends on BOARD_NATIVE_POSIX select TICKLESS_CAPABLE select TIMER_HAS_64BIT_CYCLE_COUNTER select SYSTEM_TIMER_HAS_DISABLE_SUPPORT help This module implements a kernel device driver for the native_sim/posix HW timer model ```
/content/code_sandbox/drivers/timer/Kconfig.native_posix
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```c /* * */ #define DT_DRV_COMPAT openisa_rv32m1_lptmr #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/timer/system_timer.h> #include <soc.h> #include <zephyr/irq.h> /* * This is just a getting started point. * * Assumptions and limitations: * * - system clock based on an LPTMR instance, clocked by SIRC output * SIRCDIV3, prescaler divide-by-1, SIRC at 8MHz * - no tickless */ #define CYCLES_PER_SEC sys_clock_hw_cycles_per_sec() #define CYCLES_PER_TICK (CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC) #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = DT_IRQN(DT_ALIAS(system_lptmr)); #endif /* * As a simplifying assumption, we only support a clock ticking at the * SIRC reset rate of 8MHz. */ #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) || \ (MHZ(8) != CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) #error "system timer misconfiguration; unsupported clock rate" #endif #define SYSTEM_TIMER_INSTANCE \ ((LPTMR_Type *)(DT_INST_REG_ADDR(0))) #define SIRC_RANGE_8MHZ SCG_SIRCCFG_RANGE(1) #define SIRCDIV3_DIVIDE_BY_1 1 #define PCS_SOURCE_SIRCDIV3 0 struct device; /* forward declaration; type is not used. */ static volatile uint32_t cycle_count; static void lptmr_irq_handler(const struct device *unused) { ARG_UNUSED(unused); SYSTEM_TIMER_INSTANCE->CSR |= LPTMR_CSR_TCF(1); /* Rearm timer. */ cycle_count += CYCLES_PER_TICK; /* Track cycles. */ sys_clock_announce(1); /* Poke the scheduler. */ } uint32_t sys_clock_cycle_get_32(void) { return cycle_count + SYSTEM_TIMER_INSTANCE->CNR; } /* * Since we're not tickless, this is identically zero. */ uint32_t sys_clock_elapsed(void) { return 0; } static int sys_clock_driver_init(void) { uint32_t csr, psr, sircdiv; /* LPTMR registers */ IRQ_CONNECT(DT_INST_IRQN(0), 0, lptmr_irq_handler, NULL, 0); if ((SCG->SIRCCSR & SCG_SIRCCSR_SIRCEN_MASK) == SCG_SIRCCSR_SIRCEN(0)) { /* * SIRC is on by default, so something else turned it off. * * This is incompatible with this driver, which is SIRC-based. */ return -ENODEV; } /* Disable the timer and clear any pending IRQ. */ csr = SYSTEM_TIMER_INSTANCE->CSR; csr &= ~LPTMR_CSR_TEN(0); csr |= LPTMR_CSR_TFC(1); SYSTEM_TIMER_INSTANCE->CSR = csr; /* * Set up the timer clock source and configure the timer. */ /* * SIRCDIV3 is the SIRC divider for LPTMR (SoC dependent). * Pass it directly through without any divider. */ sircdiv = SCG->SIRCDIV; sircdiv &= ~SCG_SIRCDIV_SIRCDIV3_MASK; sircdiv |= SCG_SIRCDIV_SIRCDIV3(SIRCDIV3_DIVIDE_BY_1); SCG->SIRCDIV = sircdiv; /* * TMS = 0: time counter mode, not pulse counter * TCF = 0: reset counter register on reaching compare value * TDRE = 0: disable DMA request */ csr &= ~(LPTMR_CSR_TMS(1) | LPTMR_CSR_TFC(1) | LPTMR_CSR_TDRE(1)); /* * TIE = 1: enable interrupt */ csr |= LPTMR_CSR_TIE(1); SYSTEM_TIMER_INSTANCE->CSR = csr; /* * PCS = 0: clock source is SIRCDIV3 (SoC dependent) * PBYP = 1: bypass the prescaler */ psr = SYSTEM_TIMER_INSTANCE->PSR; psr &= ~LPTMR_PSR_PCS_MASK; psr |= (LPTMR_PSR_PBYP(1) | LPTMR_PSR_PCS(PCS_SOURCE_SIRCDIV3)); SYSTEM_TIMER_INSTANCE->PSR = psr; /* * Set compare register to the proper tick count. The check * here makes sure SIRC is left at its default reset value to * make the defconfig setting work properly. * * TODO: be smarter to meet arbitrary Kconfig settings. */ if ((SCG->SIRCCFG & SCG_SIRCCFG_RANGE_MASK) != SIRC_RANGE_8MHZ) { return -EINVAL; } SYSTEM_TIMER_INSTANCE->CMR = CYCLES_PER_TICK; /* * Enable interrupts and the timer. There's no need to clear the * TFC bit in the csr variable, as it's already clear. */ irq_enable(DT_INST_IRQN(0)); csr = SYSTEM_TIMER_INSTANCE->CSR; csr |= LPTMR_CSR_TEN(1); SYSTEM_TIMER_INSTANCE->CSR = csr; return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/rv32m1_lptmr_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,225
```c /* * */ #define DT_DRV_COMPAT litex_timer0 #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/init.h> #include <zephyr/irq.h> #include <zephyr/spinlock.h> #include <zephyr/drivers/timer/system_timer.h> #include <soc.h> #define TIMER_LOAD_ADDR DT_INST_REG_ADDR_BY_NAME(0, load) #define TIMER_RELOAD_ADDR DT_INST_REG_ADDR_BY_NAME(0, reload) #define TIMER_EN_ADDR DT_INST_REG_ADDR_BY_NAME(0, en) #define TIMER_UPDATE_VALUE_ADDR DT_INST_REG_ADDR_BY_NAME(0, update_value) #define TIMER_VALUE_ADDR DT_INST_REG_ADDR_BY_NAME(0, value) #define TIMER_EV_STATUS_ADDR DT_INST_REG_ADDR_BY_NAME(0, ev_status) #define TIMER_EV_PENDING_ADDR DT_INST_REG_ADDR_BY_NAME(0, ev_pending) #define TIMER_EV_ENABLE_ADDR DT_INST_REG_ADDR_BY_NAME(0, ev_enable) #define TIMER_UPTIME_LATCH_ADDR DT_INST_REG_ADDR_BY_NAME(0, uptime_latch) #define TIMER_UPTIME_CYCLES_ADDR DT_INST_REG_ADDR_BY_NAME(0, uptime_cycles) #define TIMER_EV 0x1 #define TIMER_IRQ DT_INST_IRQN(0) #define TIMER_DISABLE 0x0 #define TIMER_ENABLE 0x1 #define TIMER_UPTIME_LATCH 0x1 #if defined(CONFIG_TEST) const int32_t z_sys_timer_irq_for_test = TIMER_IRQ; #endif static void litex_timer_irq_handler(const void *device) { unsigned int key = irq_lock(); litex_write8(TIMER_EV, TIMER_EV_PENDING_ADDR); sys_clock_announce(1); irq_unlock(key); } uint32_t sys_clock_cycle_get_32(void) { static struct k_spinlock lock; uint32_t uptime_cycles; k_spinlock_key_t key = k_spin_lock(&lock); litex_write8(TIMER_UPTIME_LATCH, TIMER_UPTIME_LATCH_ADDR); uptime_cycles = (uint32_t)litex_read64(TIMER_UPTIME_CYCLES_ADDR); k_spin_unlock(&lock, key); return uptime_cycles; } uint64_t sys_clock_cycle_get_64(void) { static struct k_spinlock lock; uint64_t uptime_cycles; k_spinlock_key_t key = k_spin_lock(&lock); litex_write8(TIMER_UPTIME_LATCH, TIMER_UPTIME_LATCH_ADDR); uptime_cycles = litex_read64(TIMER_UPTIME_CYCLES_ADDR); k_spin_unlock(&lock, key); return uptime_cycles; } /* tickless kernel is not supported */ uint32_t sys_clock_elapsed(void) { return 0; } static int sys_clock_driver_init(void) { IRQ_CONNECT(TIMER_IRQ, DT_INST_IRQ(0, priority), litex_timer_irq_handler, NULL, 0); irq_enable(TIMER_IRQ); litex_write8(TIMER_DISABLE, TIMER_EN_ADDR); litex_write32(k_ticks_to_cyc_floor32(1), TIMER_RELOAD_ADDR); litex_write32(k_ticks_to_cyc_floor32(1), TIMER_LOAD_ADDR); litex_write8(TIMER_ENABLE, TIMER_EN_ADDR); litex_write8(litex_read8(TIMER_EV_PENDING_ADDR), TIMER_EV_PENDING_ADDR); litex_write8(TIMER_EV, TIMER_EV_ENABLE_ADDR); return 0; } SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); ```
/content/code_sandbox/drivers/timer/litex_timer.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
758
```unknown config ESPI_EMUL bool "eSPI emulator" default y depends on DT_HAS_ZEPHYR_ESPI_EMUL_CONTROLLER_ENABLED depends on EMUL help Enable the eSPI emulator driver. This is a fake driver, it does not talk to real hardware. Instead it talks to emulation drivers that pretend to be devices on the emulated eSPI bus. It is used for testing drivers for eSPI devices. eSPI is an interface using SPI wires, whose main goal is to reduce the number of required pins. It includes the functionality of LPC, SMB, SPI itself (flash access) and GPIO (virtual wires). Please refer to the specification for more details (it is good for the introduction as well) path_to_url ```
/content/code_sandbox/drivers/espi/Kconfig.espi_emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
170
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_espi #include <assert.h> #include <stdlib.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/dt-bindings/espi/npcx_espi.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/util.h> #include <soc.h> #include "espi_utils.h" #include "soc_host.h" #include "soc_miwu.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(espi, CONFIG_ESPI_LOG_LEVEL); struct espi_npcx_config { uintptr_t base; /* clock configuration */ struct npcx_clk_cfg clk_cfg; /* mapping table between eSPI reset signal and wake-up input */ struct npcx_wui espi_rst_wui; /* pinmux configuration */ const struct pinctrl_dev_config *pcfg; }; struct espi_npcx_data { sys_slist_t callbacks; uint8_t plt_rst_asserted; uint8_t espi_rst_level; uint8_t sx_state; #if !defined(CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC) struct k_sem oob_rx_lock; #endif #if defined(CONFIG_ESPI_FLASH_CHANNEL) struct k_sem flash_rx_lock; #endif #ifdef CONFIG_ESPI_NPCX_CAF_GLOBAL_RESET_WORKAROUND /* tell the interrupt handler that it is a fake request */ bool fake_req_flag; #endif }; /* Driver convenience defines */ #define HAL_INSTANCE(dev) \ ((struct espi_reg *)((const struct espi_npcx_config *)(dev)->config)->base) /* eSPI channels */ #define NPCX_ESPI_CH_PC 0 #define NPCX_ESPI_CH_VW 1 #define NPCX_ESPI_CH_OOB 2 #define NPCX_ESPI_CH_FLASH 3 #define NPCX_ESPI_CH_COUNT 4 #define NPCX_ESPI_HOST_CH_EN(ch) (ch + 4) /* eSPI max supported frequency */ #define NPCX_ESPI_MAXFREQ_20 0 #define NPCX_ESPI_MAXFREQ_25 1 #define NPCX_ESPI_MAXFREQ_33 2 #define NPCX_ESPI_MAXFREQ_50 3 #define NPCX_ESPI_MAXFREQ_66 4 /* Minimum delay before acknowledging a virtual wire */ #define NPCX_ESPI_VWIRE_ACK_DELAY 10ul /* 10 us */ /* OOB channel maximum payload size */ #define NPCX_ESPI_OOB_MAX_PAYLOAD 64 #define NPCX_OOB_RX_PACKAGE_LEN(hdr) (((hdr & 0xff000000) >> 24) | \ ((hdr & 0xf0000) >> 8)) /* Flash channel maximum payload size */ #define NPCX_ESPI_FLASH_MAX_RX_PAYLOAD DT_INST_PROP(0, rx_plsize) #define NPCX_ESPI_FLASH_MAX_TX_PAYLOAD DT_INST_PROP(0, tx_plsize) /* eSPI cycle type field for OOB and FLASH channels */ #define ESPI_FLASH_READ_CYCLE_TYPE 0x00 #define ESPI_FLASH_WRITE_CYCLE_TYPE 0x01 #define ESPI_FLASH_ERASE_CYCLE_TYPE 0x02 #define ESPI_FLASH_SUCCESS_WITH_DATA_CYCLE_TYPE 0x0f #define ESPI_FLASH_SUCCESS_WITHOUT_DATA_CYCLE_TYPE 0x06 #define ESPI_FLASH_HEADER_PCKT_SIZE 0x07 #define ESPI_FLASH_MAX_TIMEOUT 1000ul /* 1000 ms */ #define ESPI_OOB_GET_CYCLE_TYPE 0x21 #define ESPI_OOB_TAG 0x00 #define ESPI_OOB_MAX_TIMEOUT 500ul /* 500 ms */ /* eSPI bus interrupt configuration structure and macro function */ struct espi_bus_isr { uint8_t status_bit; /* bit order in ESPISTS register */ uint8_t int_en_bit; /* bit order in ESPIIE register */ uint8_t wake_en_bit; /* bit order in ESPIWE register */ void (*bus_isr)(const struct device *dev); /* eSPI bus ISR */ }; #define NPCX_ESPI_BUS_INT_ITEM(event, isr) { \ .status_bit = NPCX_ESPISTS_##event, \ .int_en_bit = NPCX_ESPIIE_##event##IE, \ .wake_en_bit = NPCX_ESPIWE_##event##WE, \ .bus_isr = isr } /* eSPI Virtual Wire Input (Master-to-Slave) signals configuration structure */ struct npcx_vw_in_config { enum espi_vwire_signal sig; /* Virtual Wire signal */ uint8_t reg_idx; /* register index for VW signal */ uint8_t bitmask; /* VW signal bits-mask */ struct npcx_wui vw_wui; /* WUI mapping in MIWU modules for VW signal */ }; /* eSPI Virtual Wire Output (Slave-to-Master) signals configuration structure */ struct npcx_vw_out_config { enum espi_vwire_signal sig; /* Virtual Wire signal */ uint8_t reg_idx; /* register index for VW signal */ uint8_t bitmask; /* VW signal bits-mask */ }; /* * eSPI VW input/Output signal configuration tables. Please refer * npcxn-espi-vws-map.dtsi device tree file for more detail. */ static const struct npcx_vw_in_config vw_in_tbl[] = { /* index 02h (In) */ NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SLP_S3, vw_slp_s3), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SLP_S4, vw_slp_s4), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SLP_S5, vw_slp_s5), /* index 03h (In) */ NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SUS_STAT, vw_sus_stat), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_PLTRST, vw_plt_rst), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_OOB_RST_WARN, vw_oob_rst_warn), /* index 07h (In) */ NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_HOST_RST_WARN, vw_host_rst_warn), /* index 41h (In) */ NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SUS_WARN, vw_sus_warn), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, vw_sus_pwrdn_ack), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SLP_A, vw_slp_a), /* index 42h (In) */ NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SLP_LAN, vw_slp_lan), NPCX_DT_VW_IN_CONF(ESPI_VWIRE_SIGNAL_SLP_WLAN, vw_slp_wlan), }; static const struct npcx_vw_out_config vw_out_tbl[] = { /* index 04h (Out) */ NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_OOB_RST_ACK, vw_oob_rst_ack), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_WAKE, vw_wake), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_PME, vw_pme), /* index 05h (Out) */ NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, vw_slv_boot_done), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_ERR_FATAL, vw_err_fatal), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_ERR_NON_FATAL, vw_err_non_fatal), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, vw_slv_boot_sts_with_done), /* index 06h (Out) */ NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_SCI, vw_sci), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_SMI, vw_smi), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_HOST_RST_ACK, vw_host_rst_ack), /* index 40h (Out) */ NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_SUS_ACK, vw_sus_ack), }; /* Virtual wire GPIOs for platform level usage (High at Reset state) */ static const struct npcx_vw_out_config vw_out_gpio_tbl1[] = { /* Only NPCX9 and later series support this feature */ #if defined(CONFIG_ESPI_NPCX_SUPP_VW_GPIO) /* index 50h (Out) */ NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_0, vw_slv_gpio_0), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_1, vw_slv_gpio_1), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_2, vw_slv_gpio_2), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_3, vw_slv_gpio_3), /* index 51h (Out) */ NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_4, vw_slv_gpio_4), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_5, vw_slv_gpio_5), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_6, vw_slv_gpio_6), NPCX_DT_VW_OUT_CONF(ESPI_VWIRE_SIGNAL_TARGET_GPIO_7, vw_slv_gpio_7), #endif }; /* Callbacks for eSPI bus reset and Virtual Wire signals. */ static struct miwu_callback espi_rst_callback; static struct miwu_callback vw_in_callback[ARRAY_SIZE(vw_in_tbl)]; /* eSPI VW service function forward declarations */ static int espi_npcx_receive_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t *level); static int espi_npcx_send_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t level); static void espi_vw_send_bootload_done(const struct device *dev); #if defined(CONFIG_ESPI_FLASH_CHANNEL) static int espi_npcx_flash_parse_completion_with_data(const struct device *dev, struct espi_flash_packet *pckt); static void espi_npcx_flash_prepare_tx_header(const struct device *dev, int cyc_type, int flash_addr, int flash_len, int tx_payload); #endif /* eSPI local initialization functions */ static void espi_init_wui_callback(const struct device *dev, struct miwu_callback *callback, const struct npcx_wui *wui, miwu_dev_callback_handler_t handler) { /* VW signal which has no wake-up input source */ if (wui->table == NPCX_MIWU_TABLE_NONE) { return; } /* Install callback function */ npcx_miwu_init_dev_callback(callback, wui, handler, dev); npcx_miwu_manage_callback(callback, 1); /* Configure MIWU setting and enable its interrupt */ npcx_miwu_interrupt_configure(wui, NPCX_MIWU_MODE_EDGE, NPCX_MIWU_TRIG_BOTH); } /* eSPI local bus interrupt service functions */ static void espi_bus_err_isr(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint32_t err = inst->ESPIERR; LOG_ERR("eSPI Bus Error %08X", err); /* Clear error status bits */ inst->ESPIERR = err; } static void espi_bus_inband_rst_isr(const struct device *dev) { ARG_UNUSED(dev); LOG_DBG("%s issued", __func__); } static void espi_bus_reset_isr(const struct device *dev) { ARG_UNUSED(dev); LOG_DBG("%s issued", __func__); /* Do nothing! This signal is handled in ESPI_RST VW signal ISR */ } #if defined(CONFIG_ESPI_NPCX_CAF_GLOBAL_RESET_WORKAROUND) static void espi_npcx_flash_fake_request(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; inst->FLASHCTL &= ~BIT(NPCX_FLASHCTL_AMTEN); data->fake_req_flag = true; espi_npcx_flash_prepare_tx_header(dev, ESPI_FLASH_READ_CYCLE_TYPE, 0, 16, 0); } #endif static void espi_bus_cfg_update_isr(const struct device *dev) { int chan; struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = 0, .evt_data = 0 }; /* If host enable bits are not sync with ready bits on slave side. */ uint8_t chg_mask = GET_FIELD(inst->ESPICFG, NPCX_ESPICFG_HCHANS_FIELD) ^ GET_FIELD(inst->ESPICFG, NPCX_ESPICFG_CHANS_FIELD); chg_mask &= (ESPI_CHANNEL_VWIRE | ESPI_CHANNEL_OOB | ESPI_CHANNEL_FLASH); LOG_DBG("ESPI CFG Change Updated! 0x%02X", chg_mask); /* * If host enable/disable channel for VW/OOB/FLASH, EC should follow * except Peripheral channel. It is handled after receiving PLTRST * event separately. */ for (chan = NPCX_ESPI_CH_VW; chan < NPCX_ESPI_CH_COUNT; chan++) { /* Channel ready bit isn't sync with enabled bit on host side */ if (chg_mask & BIT(chan)) { evt.evt_data = IS_BIT_SET(inst->ESPICFG, NPCX_ESPI_HOST_CH_EN(chan)); evt.evt_details = BIT(chan); #if defined(CONFIG_ESPI_NPCX_CAF_GLOBAL_RESET_WORKAROUND) if (chan == NPCX_ESPI_CH_FLASH && evt.evt_data == 1 && IS_BIT_SET(inst->FLASHCTL, NPCX_FLASHCTL_FLASH_TX_AVAIL)) { espi_npcx_flash_fake_request(dev); } #endif if (evt.evt_data) { inst->ESPICFG |= BIT(chan); } else { inst->ESPICFG &= ~BIT(chan); } espi_send_callbacks(&data->callbacks, dev, evt); } } LOG_DBG("ESPI CFG EC Updated! 0x%02X", GET_FIELD(inst->ESPICFG, NPCX_ESPICFG_CHANS_FIELD)); /* If VW channel is enabled and ready, send bootload done VW signal */ if ((chg_mask & BIT(NPCX_ESPI_CH_VW)) && IS_BIT_SET(inst->ESPICFG, NPCX_ESPI_HOST_CH_EN(NPCX_ESPI_CH_VW))) { espi_vw_send_bootload_done(dev); } #if (defined(CONFIG_ESPI_FLASH_CHANNEL) && defined(CONFIG_ESPI_TAF)) /* If CONFIG_ESPI_TAF is set, set to auto or manual mode accroding * to configuration. */ if (IS_BIT_SET(inst->ESPICFG, NPCX_ESPICFG_FLCHANMODE)) { #if defined(CONFIG_ESPI_TAF_AUTO_MODE) inst->FLASHCTL |= BIT(NPCX_FLASHCTL_SAF_AUTO_READ); #else inst->FLASHCTL &= ~BIT(NPCX_FLASHCTL_SAF_AUTO_READ); #endif } #endif } #if defined(CONFIG_ESPI_OOB_CHANNEL) static void espi_bus_oob_rx_isr(const struct device *dev) { struct espi_npcx_data *const data = dev->data; #if defined(CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC) struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_OOB_RECEIVED, .evt_details = 0, .evt_data = 0, }; /* Get received package length and set to additional detail of event */ evt.evt_details = NPCX_OOB_RX_PACKAGE_LEN(inst->OOBRXBUF[0]); espi_send_callbacks(&data->callbacks, dev, evt); #else LOG_DBG("%s", __func__); k_sem_give(&data->oob_rx_lock); #endif } #endif #if defined(CONFIG_ESPI_FLASH_CHANNEL) #if defined(CONFIG_ESPI_TAF) static struct espi_taf_pckt taf_pckt; static uint32_t espi_taf_parse(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct npcx_taf_head taf_head; uint32_t taf_addr; uint8_t i, roundsize; /* Get type, length and tag from RX buffer */ memcpy(&taf_head, (void *)&inst->FLASHRXBUF[0], sizeof(taf_head)); taf_pckt.type = taf_head.type; taf_pckt.len = (((uint16_t)taf_head.tag_hlen & 0xF) << 8) | taf_head.llen; taf_pckt.tag = taf_head.tag_hlen >> 4; if ((taf_pckt.len == 0) && (taf_pckt.type == NPCX_ESPI_TAF_REQ_READ)) { taf_pckt.len = KB(4); } /* Get address from RX buffer */ taf_addr = inst->FLASHRXBUF[1]; taf_pckt.addr = sys_cpu_to_be32(taf_addr); /* Get written data if eSPI TAF write */ if (taf_pckt.type == NPCX_ESPI_TAF_REQ_WRITE) { roundsize = DIV_ROUND_UP(taf_pckt.len, sizeof(uint32_t)); for (i = 0; i < roundsize; i++) { taf_pckt.src[i] = inst->FLASHRXBUF[2 + i]; } } return (uint32_t)&taf_pckt; } #endif /* CONFIG_ESPI_TAF */ static void espi_bus_flash_rx_isr(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; /* Controller Attached Flash Access */ if ((inst->ESPICFG & BIT(NPCX_ESPICFG_FLCHANMODE)) == 0) { #ifdef CONFIG_ESPI_NPCX_CAF_GLOBAL_RESET_WORKAROUND if (data->fake_req_flag == true) { uint8_t pckt_buf[16]; struct espi_flash_packet pckt; pckt.buf = &pckt_buf[0]; espi_npcx_flash_parse_completion_with_data(dev, &pckt); data->fake_req_flag = false; return; } #endif k_sem_give(&data->flash_rx_lock); } else { /* Target Attached Flash Access */ #if defined(CONFIG_ESPI_TAF) struct espi_event evt = { .evt_type = ESPI_BUS_TAF_NOTIFICATION, .evt_details = ESPI_CHANNEL_FLASH, .evt_data = espi_taf_parse(dev), }; espi_send_callbacks(&data->callbacks, dev, evt); #else LOG_WRN("ESPI TAF not supported"); #endif } } #endif /* CONFIG_ESPI_FLASH_CHANNEL */ const struct espi_bus_isr espi_bus_isr_tbl[] = { NPCX_ESPI_BUS_INT_ITEM(BERR, espi_bus_err_isr), NPCX_ESPI_BUS_INT_ITEM(IBRST, espi_bus_inband_rst_isr), NPCX_ESPI_BUS_INT_ITEM(ESPIRST, espi_bus_reset_isr), NPCX_ESPI_BUS_INT_ITEM(CFGUPD, espi_bus_cfg_update_isr), #if defined(CONFIG_ESPI_OOB_CHANNEL) NPCX_ESPI_BUS_INT_ITEM(OOBRX, espi_bus_oob_rx_isr), #endif #if defined(CONFIG_ESPI_FLASH_CHANNEL) NPCX_ESPI_BUS_INT_ITEM(FLASHRX, espi_bus_flash_rx_isr), #endif }; static void espi_bus_generic_isr(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); int i; uint32_t mask, status; /* * Bit 17 of ESPIIE is reserved. We need to set the same bit in mask * in case bit 17 in ESPISTS of npcx7 is not cleared in ISR. */ mask = inst->ESPIIE | (1 << NPCX_ESPISTS_VWUPDW); status = inst->ESPISTS & mask; /* Clear pending bits of status register first */ inst->ESPISTS = status; LOG_DBG("%s: 0x%08X", __func__, status); for (i = 0; i < ARRAY_SIZE(espi_bus_isr_tbl); i++) { struct espi_bus_isr entry = espi_bus_isr_tbl[i]; if (status & BIT(entry.status_bit)) { if (entry.bus_isr != NULL) { entry.bus_isr(dev); } } } } /* eSPI local virtual-wire service functions */ static void espi_vw_config_input(const struct device *dev, const struct npcx_vw_in_config *config_in) { struct espi_reg *const inst = HAL_INSTANCE(dev); int idx = config_in->reg_idx; /* IE & WE bits are already set? */ if (IS_BIT_SET(inst->VWEVMS[idx], NPCX_VWEVMS_IE) && IS_BIT_SET(inst->VWEVMS[idx], NPCX_VWEVMS_WE)) { return; } /* Set IE & WE bits in VWEVMS */ inst->VWEVMS[idx] |= BIT(NPCX_VWEVMS_IE) | BIT(NPCX_VWEVMS_WE); LOG_DBG("VWEVMS%d 0x%08X", idx, inst->VWEVMS[idx]); } static void espi_vw_config_output(const struct device *dev, const struct npcx_vw_out_config *config_out) { struct espi_reg *const inst = HAL_INSTANCE(dev); int idx = config_out->reg_idx; uint8_t valid = GET_FIELD(inst->VWEVSM[idx], NPCX_VWEVSM_VALID); /* Set valid bits for vw signal which we have declared in table. */ valid |= config_out->bitmask; SET_FIELD(inst->VWEVSM[idx], NPCX_VWEVSM_VALID, valid); /* * Turn off hardware-wire feature which generates VW events that * connected to hardware signals. We will set it manually by software. */ SET_FIELD(inst->VWEVSM[idx], NPCX_VWEVSM_HW_WIRE, 0); LOG_DBG("VWEVSM%d 0x%08X", idx, inst->VWEVSM[idx]); } static void espi_vw_gpio_config_output(const struct device *dev, const struct npcx_vw_out_config *config_out, uint8_t init_level) { struct espi_reg *const inst = HAL_INSTANCE(dev); int idx = config_out->reg_idx; uint8_t valid = GET_FIELD(inst->VWGPSM[idx], NPCX_VWEVSM_VALID); uint8_t val = GET_FIELD(inst->VWGPSM[idx], NPCX_VWEVSM_WIRE); /* Set valid bits for vw signal which we have declared in table. */ valid |= config_out->bitmask; SET_FIELD(inst->VWGPSM[idx], NPCX_VWEVSM_VALID, valid); inst->VWGPSM[idx] |= BIT(NPCX_VWGPSM_INDEX_EN); if (init_level) { val |= config_out->bitmask; } else { val &= ~config_out->bitmask; } SET_FIELD(inst->VWGPSM[idx], NPCX_VWEVSM_WIRE, val); LOG_DBG("VWEVSM%d 0x%08X", idx, inst->VWGPSM[idx]); } static void espi_vw_notify_system_state(const struct device *dev, enum espi_vwire_signal signal) { struct espi_npcx_data *const data = dev->data; struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0 }; uint8_t wire = 0; espi_npcx_receive_vwire(dev, signal, &wire); if (!wire) { data->sx_state = signal; } evt.evt_details = signal; evt.evt_data = wire; espi_send_callbacks(&data->callbacks, dev, evt); } static void espi_vw_notify_host_warning(const struct device *dev, enum espi_vwire_signal signal) { uint8_t wire; espi_npcx_receive_vwire(dev, signal, &wire); k_busy_wait(NPCX_ESPI_VWIRE_ACK_DELAY); switch (signal) { case ESPI_VWIRE_SIGNAL_HOST_RST_WARN: espi_npcx_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, wire); break; case ESPI_VWIRE_SIGNAL_SUS_WARN: espi_npcx_send_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_ACK, wire); break; case ESPI_VWIRE_SIGNAL_OOB_RST_WARN: espi_npcx_send_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_ACK, wire); break; default: break; } } static void espi_vw_notify_plt_rst(const struct device *dev) { struct espi_npcx_data *const data = dev->data; struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, ESPI_VWIRE_SIGNAL_PLTRST, 0 }; uint8_t wire = 0; espi_npcx_receive_vwire(dev, ESPI_VWIRE_SIGNAL_PLTRST, &wire); LOG_DBG("VW_PLT_RST is %d!", wire); if (wire) { /* Set Peripheral Channel ready when PLTRST is de-asserted */ inst->ESPICFG |= BIT(NPCX_ESPICFG_PCHANEN); /* Configure all host sub-modules in host domain */ npcx_host_init_subs_host_domain(); } /* PLT_RST will be received several times */ if (wire != data->plt_rst_asserted) { data->plt_rst_asserted = wire; evt.evt_data = wire; espi_send_callbacks(&data->callbacks, dev, evt); } } static void espi_vw_send_bootload_done(const struct device *dev) { int ret; uint8_t boot_done; ret = espi_npcx_receive_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, &boot_done); LOG_DBG("%s: %d", __func__, boot_done); if (!ret && !boot_done) { /* Send slave boot status bit with done bit at the same time. */ espi_npcx_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 1); } } static void espi_vw_generic_isr(const struct device *dev, struct npcx_wui *wui) { int idx; enum espi_vwire_signal signal; LOG_DBG("%s: WUI %d %d %d", __func__, wui->table, wui->group, wui->bit); for (idx = 0; idx < ARRAY_SIZE(vw_in_tbl); idx++) { if (wui->table == vw_in_tbl[idx].vw_wui.table && wui->group == vw_in_tbl[idx].vw_wui.group && wui->bit == vw_in_tbl[idx].vw_wui.bit) { break; } } if (idx == ARRAY_SIZE(vw_in_tbl)) { LOG_ERR("Unknown VW event! %d %d %d", wui->table, wui->group, wui->bit); return; } signal = vw_in_tbl[idx].sig; if (signal == ESPI_VWIRE_SIGNAL_SLP_S3 || signal == ESPI_VWIRE_SIGNAL_SLP_S4 || signal == ESPI_VWIRE_SIGNAL_SLP_S5 || signal == ESPI_VWIRE_SIGNAL_SLP_A) { espi_vw_notify_system_state(dev, signal); } else if (signal == ESPI_VWIRE_SIGNAL_HOST_RST_WARN || signal == ESPI_VWIRE_SIGNAL_SUS_WARN || signal == ESPI_VWIRE_SIGNAL_OOB_RST_WARN) { espi_vw_notify_host_warning(dev, signal); } else if (signal == ESPI_VWIRE_SIGNAL_PLTRST) { espi_vw_notify_plt_rst(dev); } } static void espi_vw_espi_rst_isr(const struct device *dev, struct npcx_wui *wui) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; struct espi_event evt = { ESPI_BUS_RESET, 0, 0 }; data->espi_rst_level = IS_BIT_SET(inst->ESPISTS, NPCX_ESPISTS_ESPIRST_LVL); LOG_DBG("eSPI RST level is %d!", data->espi_rst_level); evt.evt_data = data->espi_rst_level; espi_send_callbacks(&data->callbacks, dev, evt); } /* eSPI api functions */ static int espi_npcx_configure(const struct device *dev, struct espi_cfg *cfg) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint8_t max_freq = 0; uint8_t cur_io_mode, io_mode = 0; /* Configure eSPI frequency */ switch (cfg->max_freq) { case 20: max_freq = NPCX_ESPI_MAXFREQ_20; break; case 25: max_freq = NPCX_ESPI_MAXFREQ_25; break; case 33: max_freq = NPCX_ESPI_MAXFREQ_33; break; case 50: max_freq = NPCX_ESPI_MAXFREQ_50; break; #ifdef CONFIG_SOC_SERIES_NPCX4 case 66: max_freq = NPCX_ESPI_MAXFREQ_66; break; #endif default: return -EINVAL; } SET_FIELD(inst->ESPICFG, NPCX_ESPICFG_MAXFREQ_FIELD, max_freq); /* Configure eSPI IO mode */ io_mode = (cfg->io_caps >> 1); if (io_mode > 3) { return -EINVAL; } cur_io_mode = GET_FIELD(inst->ESPICFG, NPCX_ESPICFG_IOMODE_FIELD); if (io_mode != cur_io_mode) { SET_FIELD(inst->ESPICFG, NPCX_ESPICFG_IOMODE_FIELD, io_mode); } /* Configure eSPI supported channels */ if (cfg->channel_caps & ESPI_CHANNEL_PERIPHERAL) { inst->ESPICFG |= BIT(NPCX_ESPICFG_PCCHN_SUPP); } if (cfg->channel_caps & ESPI_CHANNEL_VWIRE) { inst->ESPICFG |= BIT(NPCX_ESPICFG_VWCHN_SUPP); } if (cfg->channel_caps & ESPI_CHANNEL_OOB) { inst->ESPICFG |= BIT(NPCX_ESPICFG_OOBCHN_SUPP); } if (cfg->channel_caps & ESPI_CHANNEL_FLASH) { inst->ESPICFG |= BIT(NPCX_ESPICFG_FLASHCHN_SUPP); } LOG_DBG("%s: %d %d ESPICFG: 0x%08X", __func__, max_freq, io_mode, inst->ESPICFG); return 0; } static bool espi_npcx_channel_ready(const struct device *dev, enum espi_channel ch) { struct espi_reg *const inst = HAL_INSTANCE(dev); bool sts; switch (ch) { case ESPI_CHANNEL_PERIPHERAL: sts = IS_BIT_SET(inst->ESPICFG, NPCX_ESPICFG_PCHANEN); break; case ESPI_CHANNEL_VWIRE: sts = IS_BIT_SET(inst->ESPICFG, NPCX_ESPICFG_VWCHANEN); break; case ESPI_CHANNEL_OOB: sts = IS_BIT_SET(inst->ESPICFG, NPCX_ESPICFG_OOBCHANEN); break; case ESPI_CHANNEL_FLASH: sts = IS_BIT_SET(inst->ESPICFG, NPCX_ESPICFG_FLASHCHANEN); break; default: sts = false; break; } return sts; } static int espi_npcx_send_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t level) { uint8_t reg_idx, bitmask, sig_idx, val = 0, vw_tbl_size; struct espi_reg *const inst = HAL_INSTANCE(dev); const struct npcx_vw_out_config *vw_tbl; uint32_t reg_val; char *reg_name; if (signal >= ESPI_VWIRE_SIGNAL_COUNT) { LOG_ERR("Invalid VW: %d", signal); return -EINVAL; } if (signal >= ESPI_VWIRE_SIGNAL_TARGET_GPIO_0) { vw_tbl = vw_out_gpio_tbl1; vw_tbl_size = ARRAY_SIZE(vw_out_gpio_tbl1); reg_name = "VWGPSM"; } else { vw_tbl = vw_out_tbl; vw_tbl_size = ARRAY_SIZE(vw_out_tbl); reg_name = "VWEVSM"; } /* Find signal in VW output table */ for (sig_idx = 0; sig_idx < vw_tbl_size; sig_idx++) { if (vw_tbl[sig_idx].sig == signal) { break; } } if (sig_idx == vw_tbl_size) { LOG_ERR("%s signal %d is invalid", __func__, signal); return -EIO; } reg_idx = vw_tbl[sig_idx].reg_idx; bitmask = vw_tbl[sig_idx].bitmask; /* Get wire field and set/clear wire bit */ if (signal >= ESPI_VWIRE_SIGNAL_TARGET_GPIO_0) { val = GET_FIELD(inst->VWGPSM[reg_idx], NPCX_VWEVSM_WIRE); } else { val = GET_FIELD(inst->VWEVSM[reg_idx], NPCX_VWEVSM_WIRE); } if (level) { val |= bitmask; } else { val &= ~bitmask; } if (signal >= ESPI_VWIRE_SIGNAL_TARGET_GPIO_0) { SET_FIELD(inst->VWGPSM[reg_idx], NPCX_VWEVSM_WIRE, val); reg_val = inst->VWGPSM[reg_idx]; } else { SET_FIELD(inst->VWEVSM[reg_idx], NPCX_VWEVSM_WIRE, val); reg_val = inst->VWEVSM[reg_idx]; } LOG_DBG("Send VW: %s%d 0x%08X", reg_name, reg_idx, reg_val); return 0; } static int espi_npcx_receive_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t *level) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint8_t reg_idx, bitmask, sig_idx, val; /* Find signal in VW input table */ for (sig_idx = 0; sig_idx < ARRAY_SIZE(vw_in_tbl); sig_idx++) { if (vw_in_tbl[sig_idx].sig == signal) { reg_idx = vw_in_tbl[sig_idx].reg_idx; bitmask = vw_in_tbl[sig_idx].bitmask; val = GET_FIELD(inst->VWEVMS[reg_idx], NPCX_VWEVMS_WIRE); val &= GET_FIELD(inst->VWEVMS[reg_idx], NPCX_VWEVMS_VALID); *level = !!(val & bitmask); return 0; } } /* Find signal in VW output table */ for (sig_idx = 0; sig_idx < ARRAY_SIZE(vw_out_tbl); sig_idx++) { if (vw_out_tbl[sig_idx].sig == signal) { reg_idx = vw_out_tbl[sig_idx].reg_idx; bitmask = vw_out_tbl[sig_idx].bitmask; val = GET_FIELD(inst->VWEVSM[reg_idx], NPCX_VWEVSM_WIRE); val &= GET_FIELD(inst->VWEVSM[reg_idx], NPCX_VWEVSM_VALID); *level = !!(val & bitmask); return 0; } } LOG_ERR("%s Out of index %d", __func__, signal); return -EIO; } static int espi_npcx_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_npcx_data *const data = dev->data; return espi_manage_callback(&data->callbacks, callback, set); } static int espi_npcx_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); return npcx_host_periph_read_request(op, data); } static int espi_npcx_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); return npcx_host_periph_write_request(op, data); } #if defined(CONFIG_ESPI_OOB_CHANNEL) static int espi_npcx_send_oob(const struct device *dev, struct espi_oob_packet *pckt) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint8_t *oob_buf = pckt->buf; int sz_oob_tx = pckt->len; int idx_tx_buf; uint32_t oob_data; /* Check out of OOB transmitted buffer size */ if (sz_oob_tx > NPCX_ESPI_OOB_MAX_PAYLOAD) { LOG_ERR("Out of OOB transmitted buffer: %d", sz_oob_tx); return -EINVAL; } /* Check OOB Transmit Queue is empty? */ if (IS_BIT_SET(inst->OOBCTL, NPCX_OOBCTL_OOB_AVAIL)) { LOG_ERR("OOB channel is busy"); return -EBUSY; } /* * GET_OOB header (first 4 bytes) in npcx 32-bits tx buffer * * [24:31] - LEN[0:7] Data length of GET_OOB request package * [20:23] - TAG Tag of GET_OOB * [16:19] - LEN[8:11] Ignore it since max payload is 64 bytes * [8:15] - CYCLE_TYPE Cycle type of GET_OOB * [0:7] - SZ_PACK Package size plus 3 bytes header. (Npcx only) */ inst->OOBTXBUF[0] = (sz_oob_tx + 3) | (ESPI_OOB_GET_CYCLE_TYPE << 8) | (ESPI_OOB_TAG << 16) | (sz_oob_tx << 24); /* Write GET_OOB data into 32-bits tx buffer in little endian */ for (idx_tx_buf = 0; idx_tx_buf < sz_oob_tx/4; idx_tx_buf++, oob_buf += 4) inst->OOBTXBUF[idx_tx_buf + 1] = oob_buf[0] | (oob_buf[1] << 8) | (oob_buf[2] << 16) | (oob_buf[3] << 24); /* Write remaining bytes of package */ if (sz_oob_tx % 4) { int i; oob_data = 0; for (i = 0; i < sz_oob_tx % 4; i++) { oob_data |= (oob_buf[i] << (8 * i)); } inst->OOBTXBUF[idx_tx_buf + 1] = oob_data; } /* * Notify host a new OOB packet is ready. Please don't write OOB_FREE * to 1 at the same tiem in case clear it unexpectedly. */ oob_data = inst->OOBCTL & ~(BIT(NPCX_OOBCTL_OOB_FREE)); oob_data |= BIT(NPCX_OOBCTL_OOB_AVAIL); inst->OOBCTL = oob_data; while (IS_BIT_SET(inst->OOBCTL, NPCX_OOBCTL_OOB_AVAIL)) { ; } LOG_DBG("%s issued!!", __func__); return 0; } static int espi_npcx_receive_oob(const struct device *dev, struct espi_oob_packet *pckt) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint8_t *oob_buf = pckt->buf; uint32_t oob_data; int idx_rx_buf, sz_oob_rx; /* Check eSPI bus status first */ if (IS_BIT_SET(inst->ESPISTS, NPCX_ESPISTS_BERR)) { LOG_ERR("%s: eSPI Bus Error: 0x%08X", __func__, inst->ESPIERR); return -EIO; } #if !defined(CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC) struct espi_npcx_data *const data = dev->data; int ret; /* Wait until get oob package or timeout */ ret = k_sem_take(&data->oob_rx_lock, K_MSEC(ESPI_OOB_MAX_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } #endif /* * PUT_OOB header (first 4 bytes) in npcx 32-bits rx buffer * * [24:31] - LEN[0:7] Data length of PUT_OOB request package * [20:23] - TAG Tag of PUT_OOB * [16:19] - LEN[8:11] Data length of PUT_OOB request package * [8:15] - CYCLE_TYPE Cycle type of PUT_OOB * [0:7] - SZ_PACK Reserved. (Npcx only) */ oob_data = inst->OOBRXBUF[0]; /* Get received package length first */ sz_oob_rx = NPCX_OOB_RX_PACKAGE_LEN(oob_data); /* Check OOB received buffer size */ if (sz_oob_rx > NPCX_ESPI_OOB_MAX_PAYLOAD) { LOG_ERR("Out of OOB received buffer: %d", sz_oob_rx); return -EINVAL; } /* Set received size to package structure */ pckt->len = sz_oob_rx; /* Read PUT_OOB data into 32-bits rx buffer in little endian */ for (idx_rx_buf = 0; idx_rx_buf < sz_oob_rx/4; idx_rx_buf++) { oob_data = inst->OOBRXBUF[idx_rx_buf + 1]; *(oob_buf++) = oob_data & 0xFF; *(oob_buf++) = (oob_data >> 8) & 0xFF; *(oob_buf++) = (oob_data >> 16) & 0xFF; *(oob_buf++) = (oob_data >> 24) & 0xFF; } /* Read remaining bytes of package */ if (sz_oob_rx % 4) { int i; oob_data = inst->OOBRXBUF[idx_rx_buf + 1]; for (i = 0; i < sz_oob_rx % 4; i++) { *(oob_buf++) = (oob_data >> (8 * i)) & 0xFF; } } /* Notify host that OOB received buffer is free now. */ inst->OOBCTL |= BIT(NPCX_OOBCTL_OOB_FREE); return 0; } #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static void espi_npcx_flash_prepare_tx_header(const struct device *dev, int cyc_type, int flash_addr, int flash_len, int tx_payload) { struct espi_reg *const inst = HAL_INSTANCE(dev); /* * First 3 bytes of flash cycle command header in tx buffer * * [24:31] - LEN[0:7] = n Data length of flash cycle request * [16:23] - LEN[8:15] = 0 Ignore it since max buffer size is 64 bytes * [12:15] - TAG = 0 Tag of flash cycle command is always 0 here * [8:11] - CYCLE_TYPE = 0 Cycle type of flash command * [0:7] - SZ_PACK = 7 Overall tx package size. (Used internally.) */ inst->FLASHTXBUF[0] = (flash_len << 24) | (cyc_type << 8) | (tx_payload + ESPI_FLASH_HEADER_PCKT_SIZE); /* * Following 4 bytes of tager flash address in tx buffer * * [24:31] - ADDR[0:7] Start address of flash cycle command request * [16:23] - ADDR[15:8] * [8:15] - ADDR[23:16] * [0:7] - ADDR[31:24] */ inst->FLASHTXBUF[1] = sys_cpu_to_be32(flash_addr); } static int espi_npcx_flash_parse_completion(const struct device *dev) { int cycle_type; struct espi_reg *const inst = HAL_INSTANCE(dev); /* * First 3 bytes of flash cycle completion header in rx buffer * * [24:31] - LEN[0:7] Data length of flash cycle completion package * [16:23] - LEN[8:15] Ignore it since rx bufer size is 64 bytes * [12:15] - TAG Tag of flash cycle completion package * [8:11] - CYCLE_TYPE Cycle type of flash completion * [0:7] - Reserved */ cycle_type = (inst->FLASHRXBUF[0] & 0xff00) >> 8; if (cycle_type == ESPI_FLASH_SUCCESS_WITHOUT_DATA_CYCLE_TYPE) { return 0; } return -EIO; } static int espi_npcx_flash_parse_completion_with_data(const struct device *dev, struct espi_flash_packet *pckt) { struct espi_reg *const inst = HAL_INSTANCE(dev); int cycle_type, sz_rx_payload; /* * First 3 bytes of flash cycle completion header in rx buffer * * [24:31] - LEN[0:7] Data length of flash cycle completion package * [16:23] - LEN[8:15] Ignore it since rx bufer size is 64 bytes * [12:15] - TAG Tag of flash cycle completion package * [8:11] - CYCLE_TYPE Cycle type of flash completion * [0:7] - Reserved * * The following is flash data/ */ cycle_type = (inst->FLASHRXBUF[0] & 0xff00) >> 8; sz_rx_payload = inst->FLASHRXBUF[0] >> 24; if (cycle_type == ESPI_FLASH_SUCCESS_WITH_DATA_CYCLE_TYPE) { volatile uint32_t *rx_buf = &inst->FLASHRXBUF[1]; uint8_t *buf = pckt->buf; uint32_t data; /* Get data from flash RX buffer */ for (int i = 0; i < sz_rx_payload / 4; i++, rx_buf++) { data = *rx_buf; for (int j = 0; j < 4; j++, buf++) { *buf = data & 0xff; data = data >> 8; } } /* Get remaining bytes */ if (sz_rx_payload % 4) { data = *rx_buf; for (int j = 0; j < sz_rx_payload % 4; j++, buf++) { *buf = data & 0xff; data = data >> 8; } } return 0; } return -EIO; } static int espi_npcx_flash_read(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; /* Check out of FLASH received buffer size */ if (pckt->len > NPCX_ESPI_FLASH_MAX_RX_PAYLOAD) { LOG_ERR("Out of FLASH transmitted buffer: %d", pckt->len); return -EINVAL; } /* Check Flash Transmit Queue is empty? */ if (IS_BIT_SET(inst->FLASHCTL, NPCX_FLASHCTL_FLASH_TX_AVAIL)) { LOG_ERR("flash channel is busy"); return -EBUSY; } /* Prepare FLASH_READ header in tx buffer */ espi_npcx_flash_prepare_tx_header(dev, ESPI_FLASH_READ_CYCLE_TYPE, pckt->flash_addr, pckt->len, 0); /* Set the FLASHCTL.FLASH_TX_AVAIL bit to 1 to enqueue the packet */ inst->FLASHCTL |= BIT(NPCX_FLASHCTL_FLASH_TX_AVAIL); /* Wait until get flash package or timeout */ ret = k_sem_take(&data->flash_rx_lock, K_MSEC(ESPI_FLASH_MAX_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } return espi_npcx_flash_parse_completion_with_data(dev, pckt); } static int espi_npcx_flash_write(const struct device *dev, struct espi_flash_packet *pckt) { int ret; uint32_t tx_data; struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; volatile uint32_t *tx_buf = &inst->FLASHTXBUF[2]; uint8_t *buf = pckt->buf; /* Check out of FLASH transmitted buffer size */ if (pckt->len > NPCX_ESPI_FLASH_MAX_TX_PAYLOAD) { LOG_ERR("Out of FLASH transmitted buffer: %d", pckt->len); return -EINVAL; } /* Check Flash Transmit Queue is empty? */ if (IS_BIT_SET(inst->FLASHCTL, NPCX_FLASHCTL_FLASH_TX_AVAIL)) { LOG_ERR("flash channel is busy"); return -EBUSY; } /* Prepare FLASH_WRITE header in tx buffer */ espi_npcx_flash_prepare_tx_header(dev, ESPI_FLASH_WRITE_CYCLE_TYPE, pckt->flash_addr, pckt->len, pckt->len); /* Put package data to flash TX buffer */ for (int i = 0; i < pckt->len / 4; i++, tx_buf++) { tx_data = 0; for (int j = 0; j < 4; j++, buf++) { tx_data |= (*buf << (j * 8)); } *tx_buf = tx_data; } /* Put remaining bytes to flash TX buffer */ if (pckt->len % 4) { tx_data = 0; for (int j = 0; j < pckt->len % 4; j++, buf++) { tx_data |= (*buf << (j * 8)); } *tx_buf = tx_data; } /* Set the FLASHCTL.FLASH_TX_AVAIL bit to 1 to enqueue the packet */ inst->FLASHCTL |= BIT(NPCX_FLASHCTL_FLASH_TX_AVAIL); /* Wait until get flash package or timeout */ ret = k_sem_take(&data->flash_rx_lock, K_MSEC(ESPI_FLASH_MAX_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } /* Parse completion package in rx buffer */ return espi_npcx_flash_parse_completion(dev); } static int espi_npcx_flash_erase(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_npcx_data *const data = dev->data; /* Check Flash Transmit Queue is empty? */ if (IS_BIT_SET(inst->FLASHCTL, NPCX_FLASHCTL_FLASH_TX_AVAIL)) { LOG_ERR("flash channel is busy"); return -EBUSY; } /* Prepare FLASH_ERASE header in tx buffer */ espi_npcx_flash_prepare_tx_header(dev, ESPI_FLASH_ERASE_CYCLE_TYPE, pckt->flash_addr, pckt->len, 0); /* Set the FLASHCTL.FLASH_TX_AVAIL bit to 1 to enqueue the packet */ inst->FLASHCTL |= BIT(NPCX_FLASHCTL_FLASH_TX_AVAIL); /* Wait until get flash package or timeout */ ret = k_sem_take(&data->flash_rx_lock, K_MSEC(ESPI_FLASH_MAX_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } /* Parse completion package in rx buffer */ return espi_npcx_flash_parse_completion(dev); } #endif /* Platform specific espi module functions */ void npcx_espi_enable_interrupts(const struct device *dev) { const struct espi_npcx_config *const config = dev->config; /* Enable eSPI bus interrupt */ irq_enable(DT_INST_IRQN(0)); /* Turn on all VW inputs' MIWU interrupts */ for (int idx = 0; idx < ARRAY_SIZE(vw_in_tbl); idx++) { npcx_miwu_irq_enable(&(vw_in_tbl[idx].vw_wui)); } npcx_miwu_irq_enable(&config->espi_rst_wui); } void npcx_espi_disable_interrupts(const struct device *dev) { const struct espi_npcx_config *const config = dev->config; /* Disable eSPI bus interrupt */ irq_disable(DT_INST_IRQN(0)); /* Turn off all VW inputs' MIWU interrupts */ for (int idx = 0; idx < ARRAY_SIZE(vw_in_tbl); idx++) { npcx_miwu_irq_disable(&(vw_in_tbl[idx].vw_wui)); } npcx_miwu_irq_disable(&config->espi_rst_wui); } /* eSPI driver registration */ static int espi_npcx_init(const struct device *dev); static const struct espi_driver_api espi_npcx_driver_api = { .config = espi_npcx_configure, .get_channel_status = espi_npcx_channel_ready, .send_vwire = espi_npcx_send_vwire, .receive_vwire = espi_npcx_receive_vwire, .manage_callback = espi_npcx_manage_callback, .read_lpc_request = espi_npcx_read_lpc_request, .write_lpc_request = espi_npcx_write_lpc_request, #if defined(CONFIG_ESPI_OOB_CHANNEL) .send_oob = espi_npcx_send_oob, .receive_oob = espi_npcx_receive_oob, #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL .flash_read = espi_npcx_flash_read, .flash_write = espi_npcx_flash_write, .flash_erase = espi_npcx_flash_erase, #endif }; static struct espi_npcx_data espi_npcx_data; PINCTRL_DT_INST_DEFINE(0); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "only one 'nuvoton_npcx_espi' compatible node may be present"); static const struct espi_npcx_config espi_npcx_config = { .base = DT_INST_REG_ADDR(0), .espi_rst_wui = NPCX_DT_WUI_ITEM_BY_NAME(0, espi_rst_wui), .clk_cfg = NPCX_DT_CLK_CFG_ITEM(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; DEVICE_DT_INST_DEFINE(0, &espi_npcx_init, NULL, &espi_npcx_data, &espi_npcx_config, PRE_KERNEL_2, CONFIG_ESPI_INIT_PRIORITY, &espi_npcx_driver_api); static int espi_npcx_init(const struct device *dev) { const struct espi_npcx_config *const config = dev->config; struct espi_npcx_data *const data = dev->data; struct espi_reg *const inst = HAL_INSTANCE(dev); const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); int i, ret; if (!device_is_ready(clk_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Turn on eSPI device clock first */ ret = clock_control_on(clk_dev, (clock_control_subsys_t) &config->clk_cfg); if (ret < 0) { LOG_ERR("Turn on eSPI clock fail %d", ret); return ret; } if (IS_ENABLED(CONFIG_ESPI_NPCX_BYPASS_CH_ENABLE_FATAL_ERROR)) { /* Enable the access to the NPCX_ONLY_ESPI_REG2 register */ inst->NPCX_ONLY_ESPI_REG1 = NPCX_ONLY_ESPI_REG1_UNLOCK_REG2; inst->NPCX_ONLY_ESPI_REG2 &= ~BIT(NPCX_ONLY_ESPI_REG2_TRANS_END_CONFIG); /* Disable the access to the NPCX_ONLY_ESPI_REG2 register */ inst->NPCX_ONLY_ESPI_REG1 = NPCX_ONLY_ESPI_REG1_LOCK_REG2; } /* Enable events which share the same espi bus interrupt */ for (i = 0; i < ARRAY_SIZE(espi_bus_isr_tbl); i++) { inst->ESPIIE |= BIT(espi_bus_isr_tbl[i].int_en_bit); inst->ESPIWE |= BIT(espi_bus_isr_tbl[i].wake_en_bit); } #if !defined(CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC) k_sem_init(&data->oob_rx_lock, 0, 1); #endif #if defined(CONFIG_ESPI_FLASH_CHANNEL) k_sem_init(&data->flash_rx_lock, 0, 1); #endif /* Configure Virtual Wire input signals */ for (i = 0; i < ARRAY_SIZE(vw_in_tbl); i++) { espi_vw_config_input(dev, &vw_in_tbl[i]); } /* Configure Virtual Wire output signals */ for (i = 0; i < ARRAY_SIZE(vw_out_tbl); i++) { espi_vw_config_output(dev, &vw_out_tbl[i]); } /* Configure Virtual Wire GPIOs that are output high at reset state */ for (i = 0; i < ARRAY_SIZE(vw_out_gpio_tbl1); i++) { espi_vw_gpio_config_output(dev, &vw_out_gpio_tbl1[i], 1); } /* Configure wake-up input and callback for eSPI VW input signal */ for (i = 0; i < ARRAY_SIZE(vw_in_tbl); i++) { espi_init_wui_callback(dev, &vw_in_callback[i], &vw_in_tbl[i].vw_wui, espi_vw_generic_isr); } /* Configure wake-up input and callback for ESPI_RST signal */ espi_init_wui_callback(dev, &espi_rst_callback, &config->espi_rst_wui, espi_vw_espi_rst_isr); /* Configure pin-mux for eSPI bus device */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("eSPI pinctrl setup failed (%d)", ret); return ret; } /* Configure host sub-modules which HW blocks belong to core domain */ npcx_host_init_subs_core_domain(dev, &data->callbacks); #if defined(CONFIG_ESPI_FLASH_CHANNEL) && defined(CONFIG_ESPI_TAF) npcx_init_taf(dev, &data->callbacks); #endif /* eSPI Bus interrupt installation */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), espi_bus_generic_isr, DEVICE_DT_INST_GET(0), 0); /* Enable eSPI bus interrupt */ irq_enable(DT_INST_IRQN(0)); return 0; } ```
/content/code_sandbox/drivers/espi/espi_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,339
```c /* * */ #define DT_DRV_COMPAT microchip_xec_espi #include <zephyr/kernel.h> #include <soc.h> #include <errno.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include "espi_utils.h" /* Minimum delay before acknowledging a virtual wire */ #define ESPI_XEC_VWIRE_ACK_DELAY 10ul /* Maximum timeout to transmit a virtual wire packet. * 10 ms expressed in multiples of 100us */ #define ESPI_XEC_VWIRE_SEND_TIMEOUT 100ul #define VW_MAX_GIRQS 2ul /* 200ms */ #define MAX_OOB_TIMEOUT 200ul /* 1s */ #define MAX_FLASH_TIMEOUT 1000ul /* While issuing flash erase command, it should be ensured that the transfer * length specified is non-zero. */ #define ESPI_FLASH_ERASE_DUMMY 0x01ul /* OOB maximum address configuration */ #define ESPI_XEC_OOB_ADDR_MSW 0x1FFFul #define ESPI_XEC_OOB_ADDR_LSW 0xFFFFul /* OOB Rx length */ #define ESPI_XEC_OOB_RX_LEN 0x7F00ul /* BARs as defined in LPC spec chapter 11 */ #define ESPI_XEC_KBC_BAR_ADDRESS 0x00600000 #define ESPI_XEC_UART0_BAR_ADDRESS 0x03F80000 #define ESPI_XEC_MBOX_BAR_ADDRESS 0x03600000 #define ESPI_XEC_PORT80_BAR_ADDRESS 0x00800000 #define ESPI_XEC_PORT81_BAR_ADDRESS 0x00810000 /* Espi peripheral has 3 uart ports */ #define ESPI_PERIPHERAL_UART_PORT0 0 #define ESPI_PERIPHERAL_UART_PORT1 1 #define ESPI_PERIPHERAL_UART_PORT2 2 #define UART_DEFAULT_IRQ_POS 2u #define UART_DEFAULT_IRQ BIT(UART_DEFAULT_IRQ_POS) /* VM index 0x50 for OCB */ #define ESPI_OCB_VW_INDEX 0x50u LOG_MODULE_REGISTER(espi, CONFIG_ESPI_LOG_LEVEL); struct espi_isr { uint32_t girq_bit; void (*the_isr)(const struct device *dev); }; struct espi_xec_config { uint32_t base_addr; uint8_t bus_girq_id; uint8_t vw_girq_ids[VW_MAX_GIRQS]; uint8_t pc_girq_id; const struct pinctrl_dev_config *pcfg; }; struct espi_xec_data { sys_slist_t callbacks; struct k_sem tx_lock; struct k_sem rx_lock; struct k_sem flash_lock; }; struct xec_signal { uint8_t xec_reg_idx; uint8_t bit; uint8_t dir; }; enum mchp_msvw_regs { MCHP_MSVW00, MCHP_MSVW01, MCHP_MSVW02, MCHP_MSVW03, MCHP_MSVW04, MCHP_MSVW05, MCHP_MSVW06, MCHP_MSVW07, MCHP_MSVW08, }; enum mchp_smvw_regs { MCHP_SMVW00, MCHP_SMVW01, MCHP_SMVW02, MCHP_SMVW03, MCHP_SMVW04, MCHP_SMVW05, MCHP_SMVW06, MCHP_SMVW07, MCHP_SMVW08, }; /* Microchip canonical virtual wire mapping * your_sha256_hash----------------| * VW Idx | VW reg | SRC_ID3 | SRC_ID2 | SRC_ID1 | SRC_ID0 | * your_sha256_hash----------------| * System Event Virtual Wires * your_sha256_hash----------------| * 2h | MSVW00 | res | SLP_S5# | SLP_S4# | SLP_S3# | * 3h | MSVW01 | res | OOB_RST_WARN | PLTRST# | SUS_STAT# | * 4h | SMVW00 | PME# | WAKE# | res | OOB_RST_ACK | * 5h | SMVW01 | TARGET_BOOT_STS | ERR_NONFATAL | ERR_FATAL | TARGET_BOOT_DONE | * 6h | SMVW02 | HOST_RST_ACK | RCIN# | SMI# | SCI# | * 7h | MSVW02 | res | res | res | HOS_RST_WARN | * your_sha256_hash----------------| * Platform specific virtual wires * your_sha256_hash----------------| * 40h | SMVW03 | res | res | DNX_ACK | SUS_ACK# | * 41h | MSVW03 | SLP_A# | res | SUS_PDNACK| SUS_WARN# | * 42h | MSVW04 | res | res | SLP_WLAN# | SLP_LAN# | * 43h | MSVW05 | generic | generic | generic | generic | * 44h | MSVW06 | generic | generic | generic | generic | * 45h | SMVW04 | generic | generic | generic | generic | * 46h | SMVW05 | generic | generic | generic | generic | * 47h | MSVW07 | res | res | res | HOST_C10 | * 4Ah | MSVW08 | res | res | DNX_WARN | res | * 50h | SMVW06 | ESPI_OCB_3 | ESPI_OCB_2 | ESPI_OCB_1| ESPI_OCB_0 | */ static const struct xec_signal vw_tbl[] = { /* MSVW00 */ [ESPI_VWIRE_SIGNAL_SLP_S3] = {MCHP_MSVW00, ESPI_VWIRE_SRC_ID0, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_SLP_S4] = {MCHP_MSVW00, ESPI_VWIRE_SRC_ID1, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_SLP_S5] = {MCHP_MSVW00, ESPI_VWIRE_SRC_ID2, ESPI_CONTROLLER_TO_TARGET}, /* MSVW01 */ [ESPI_VWIRE_SIGNAL_SUS_STAT] = {MCHP_MSVW01, ESPI_VWIRE_SRC_ID0, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_PLTRST] = {MCHP_MSVW01, ESPI_VWIRE_SRC_ID1, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_OOB_RST_WARN] = {MCHP_MSVW01, ESPI_VWIRE_SRC_ID2, ESPI_CONTROLLER_TO_TARGET}, /* SMVW00 */ [ESPI_VWIRE_SIGNAL_OOB_RST_ACK] = {MCHP_SMVW00, ESPI_VWIRE_SRC_ID0, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_WAKE] = {MCHP_SMVW00, ESPI_VWIRE_SRC_ID2, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_PME] = {MCHP_SMVW00, ESPI_VWIRE_SRC_ID3, ESPI_TARGET_TO_CONTROLLER}, /* SMVW01 */ [ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE] = {MCHP_SMVW01, ESPI_VWIRE_SRC_ID0, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_ERR_FATAL] = {MCHP_SMVW01, ESPI_VWIRE_SRC_ID1, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_ERR_NON_FATAL] = {MCHP_SMVW01, ESPI_VWIRE_SRC_ID2, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS] = {MCHP_SMVW01, ESPI_VWIRE_SRC_ID3, ESPI_TARGET_TO_CONTROLLER}, /* SMVW02 */ [ESPI_VWIRE_SIGNAL_SCI] = {MCHP_SMVW02, ESPI_VWIRE_SRC_ID0, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_SMI] = {MCHP_SMVW02, ESPI_VWIRE_SRC_ID1, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_RST_CPU_INIT] = {MCHP_SMVW02, ESPI_VWIRE_SRC_ID2, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_HOST_RST_ACK] = {MCHP_SMVW02, ESPI_VWIRE_SRC_ID3, ESPI_TARGET_TO_CONTROLLER}, /* MSVW02 */ [ESPI_VWIRE_SIGNAL_HOST_RST_WARN] = {MCHP_MSVW02, ESPI_VWIRE_SRC_ID0, ESPI_CONTROLLER_TO_TARGET}, /* SMVW03 */ [ESPI_VWIRE_SIGNAL_SUS_ACK] = {MCHP_SMVW03, ESPI_VWIRE_SRC_ID0, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_DNX_ACK] = {MCHP_SMVW03, ESPI_VWIRE_SRC_ID1, ESPI_TARGET_TO_CONTROLLER}, /* MSVW03 */ [ESPI_VWIRE_SIGNAL_SUS_WARN] = {MCHP_MSVW03, ESPI_VWIRE_SRC_ID0, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK] = {MCHP_MSVW03, ESPI_VWIRE_SRC_ID1, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_SLP_A] = {MCHP_MSVW03, ESPI_VWIRE_SRC_ID3, ESPI_CONTROLLER_TO_TARGET}, /* MSVW04 */ [ESPI_VWIRE_SIGNAL_SLP_LAN] = {MCHP_MSVW04, ESPI_VWIRE_SRC_ID0, ESPI_CONTROLLER_TO_TARGET}, [ESPI_VWIRE_SIGNAL_SLP_WLAN] = {MCHP_MSVW04, ESPI_VWIRE_SRC_ID1, ESPI_CONTROLLER_TO_TARGET}, /* MSVW07 */ [ESPI_VWIRE_SIGNAL_HOST_C10] = {MCHP_MSVW07, ESPI_VWIRE_SRC_ID0, ESPI_CONTROLLER_TO_TARGET}, /* MSVW08 */ [ESPI_VWIRE_SIGNAL_DNX_WARN] = {MCHP_MSVW08, ESPI_VWIRE_SRC_ID1, ESPI_CONTROLLER_TO_TARGET}, /* SMVW06 */ [ESPI_VWIRE_SIGNAL_OCB_0] = {MCHP_SMVW06, ESPI_VWIRE_SRC_ID0, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_OCB_1] = {MCHP_SMVW06, ESPI_VWIRE_SRC_ID1, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_OCB_2] = {MCHP_SMVW06, ESPI_VWIRE_SRC_ID2, ESPI_TARGET_TO_CONTROLLER}, [ESPI_VWIRE_SIGNAL_OCB_3] = {MCHP_SMVW06, ESPI_VWIRE_SRC_ID3, ESPI_TARGET_TO_CONTROLLER}, }; /* Buffer size are expressed in bytes */ #ifdef CONFIG_ESPI_OOB_CHANNEL static uint32_t target_rx_mem[CONFIG_ESPI_OOB_BUFFER_SIZE >> 2]; static uint32_t target_tx_mem[CONFIG_ESPI_OOB_BUFFER_SIZE >> 2]; #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static uint32_t target_mem[CONFIG_ESPI_FLASH_BUFFER_SIZE >> 2]; #endif static int espi_xec_configure(const struct device *dev, struct espi_cfg *cfg) { uint8_t iomode = 0; uint8_t cap0 = ESPI_CAP_REGS->GLB_CAP0; uint8_t cap1 = ESPI_CAP_REGS->GLB_CAP1; uint8_t cur_iomode = (cap1 & MCHP_ESPI_GBL_CAP1_IO_MODE_MASK) >> MCHP_ESPI_GBL_CAP1_IO_MODE_POS; /* Set frequency */ cap1 &= ~MCHP_ESPI_GBL_CAP1_MAX_FREQ_MASK; switch (cfg->max_freq) { case 20: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_20M; break; case 25: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_25M; break; case 33: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_33M; break; case 50: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_50M; break; case 66: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_66M; break; default: return -EINVAL; } /* Set IO mode */ iomode = (cfg->io_caps >> 1); if (iomode > 3) { return -EINVAL; } if (iomode != cur_iomode) { cap1 &= ~(MCHP_ESPI_GBL_CAP1_IO_MODE_MASK0 << MCHP_ESPI_GBL_CAP1_IO_MODE_POS); cap1 |= (iomode << MCHP_ESPI_GBL_CAP1_IO_MODE_POS); } /* Validate and translate eSPI API channels to MEC capabilities */ cap0 &= ~MCHP_ESPI_GBL_CAP0_MASK; if (cfg->channel_caps & ESPI_CHANNEL_PERIPHERAL) { if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_PC_SUPP; } else { return -EINVAL; } } if (cfg->channel_caps & ESPI_CHANNEL_VWIRE) { if (IS_ENABLED(CONFIG_ESPI_VWIRE_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_VW_SUPP; } else { return -EINVAL; } } if (cfg->channel_caps & ESPI_CHANNEL_OOB) { if (IS_ENABLED(CONFIG_ESPI_OOB_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_OOB_SUPP; } else { return -EINVAL; } } if (cfg->channel_caps & ESPI_CHANNEL_FLASH) { if (IS_ENABLED(CONFIG_ESPI_FLASH_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_FC_SUPP; } else { LOG_ERR("Flash channel not supported"); return -EINVAL; } } ESPI_CAP_REGS->GLB_CAP0 = cap0; ESPI_CAP_REGS->GLB_CAP1 = cap1; /* Activate the eSPI block *. * Need to guarantee that this register is configured before RSMRST# * de-assertion and after pinmux */ ESPI_EIO_BAR_REGS->IO_ACTV = 1; LOG_DBG("eSPI block activated successfully"); return 0; } static bool espi_xec_channel_ready(const struct device *dev, enum espi_channel ch) { bool sts; switch (ch) { case ESPI_CHANNEL_PERIPHERAL: sts = ESPI_CAP_REGS->PC_RDY & MCHP_ESPI_PC_READY; break; case ESPI_CHANNEL_VWIRE: sts = ESPI_CAP_REGS->VW_RDY & MCHP_ESPI_VW_READY; break; case ESPI_CHANNEL_OOB: sts = ESPI_CAP_REGS->OOB_RDY & MCHP_ESPI_OOB_READY; break; case ESPI_CHANNEL_FLASH: sts = ESPI_CAP_REGS->FC_RDY & MCHP_ESPI_FC_READY; break; default: sts = false; break; } return sts; } static int espi_xec_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { /* Make sure kbc 8042 is on */ if (!(KBC_REGS->KBC_CTRL & MCHP_KBC_CTRL_OBFEN)) { return -ENOTSUP; } switch (op) { case E8042_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = KBC_REGS->EC_KBC_STS & MCHP_KBC_STS_OBF ? 1 : 0; break; case E8042_IBF_HAS_CHAR: *data = KBC_REGS->EC_KBC_STS & MCHP_KBC_STS_IBF ? 1 : 0; break; case E8042_READ_KB_STS: *data = KBC_REGS->EC_KBC_STS; break; default: return -EINVAL; } } else { return -ENOTSUP; } return 0; } static int espi_xec_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { struct espi_xec_config *config = (struct espi_xec_config *) (dev->config); volatile uint32_t __attribute__((unused)) dummy; if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { /* Make sure kbc 8042 is on */ if (!(KBC_REGS->KBC_CTRL & MCHP_KBC_CTRL_OBFEN)) { return -ENOTSUP; } switch (op) { case E8042_WRITE_KB_CHAR: KBC_REGS->EC_DATA = *data & 0xff; break; case E8042_WRITE_MB_CHAR: KBC_REGS->EC_AUX_DATA = *data & 0xff; break; case E8042_RESUME_IRQ: MCHP_GIRQ_SRC(config->pc_girq_id) = MCHP_KBC_IBF_GIRQ; MCHP_GIRQ_ENSET(config->pc_girq_id) = MCHP_KBC_IBF_GIRQ; break; case E8042_PAUSE_IRQ: MCHP_GIRQ_ENCLR(config->pc_girq_id) = MCHP_KBC_IBF_GIRQ; break; case E8042_CLEAR_OBF: dummy = KBC_REGS->HOST_AUX_DATA; break; case E8042_SET_FLAG: /* FW shouldn't modify these flags directly */ *data &= ~(MCHP_KBC_STS_OBF | MCHP_KBC_STS_IBF | MCHP_KBC_STS_AUXOBF); KBC_REGS->EC_KBC_STS |= *data; break; case E8042_CLEAR_FLAG: /* FW shouldn't modify these flags directly */ *data |= (MCHP_KBC_STS_OBF | MCHP_KBC_STS_IBF | MCHP_KBC_STS_AUXOBF); KBC_REGS->EC_KBC_STS &= ~(*data); break; default: return -EINVAL; } } else { return -ENOTSUP; } return 0; } static int espi_xec_send_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t level) { struct xec_signal signal_info = vw_tbl[signal]; uint8_t xec_id = signal_info.xec_reg_idx; uint8_t src_id = signal_info.bit; if ((src_id >= ESPI_VWIRE_SRC_ID_MAX) || (xec_id >= ESPI_MSVW_IDX_MAX)) { return -EINVAL; } if (signal_info.dir == ESPI_CONTROLLER_TO_TARGET) { ESPI_MSVW_REG *reg = &(ESPI_M2S_VW_REGS->MSVW00) + xec_id; uint8_t *p8 = (uint8_t *)&reg->SRC; *(p8 + (uintptr_t) src_id) = level; } if (signal_info.dir == ESPI_TARGET_TO_CONTROLLER) { ESPI_SMVW_REG *reg = &(ESPI_S2M_VW_REGS->SMVW00) + xec_id; uint8_t *p8 = (uint8_t *)&reg->SRC; *(p8 + (uintptr_t) src_id) = level; /* Ensure eSPI virtual wire packet is transmitted * There is no interrupt, so need to poll register */ uint8_t rd_cnt = ESPI_XEC_VWIRE_SEND_TIMEOUT; while (reg->SRC_CHG && rd_cnt--) { k_busy_wait(100); } } return 0; } static int espi_xec_receive_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t *level) { struct xec_signal signal_info = vw_tbl[signal]; uint8_t xec_id = signal_info.xec_reg_idx; uint8_t src_id = signal_info.bit; if ((src_id >= ESPI_VWIRE_SRC_ID_MAX) || (xec_id >= ESPI_SMVW_IDX_MAX) || (level == NULL)) { return -EINVAL; } if (signal_info.dir == ESPI_CONTROLLER_TO_TARGET) { ESPI_MSVW_REG *reg = &(ESPI_M2S_VW_REGS->MSVW00) + xec_id; *level = ((reg->SRC >> (src_id << 3)) & 0x01ul); } if (signal_info.dir == ESPI_TARGET_TO_CONTROLLER) { ESPI_SMVW_REG *reg = &(ESPI_S2M_VW_REGS->SMVW00) + xec_id; *level = ((reg->SRC >> (src_id << 3)) & 0x01ul); } return 0; } #ifdef CONFIG_ESPI_OOB_CHANNEL static int espi_xec_send_oob(const struct device *dev, struct espi_oob_packet *pckt) { int ret; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); uint8_t err_mask = MCHP_ESPI_OOB_TX_STS_IBERR | MCHP_ESPI_OOB_TX_STS_OVRUN | MCHP_ESPI_OOB_TX_STS_BADREQ; LOG_DBG("%s", __func__); if (!(ESPI_OOB_REGS->TX_STS & MCHP_ESPI_OOB_TX_STS_CHEN)) { LOG_ERR("OOB channel is disabled"); return -EIO; } if (ESPI_OOB_REGS->TX_STS & MCHP_ESPI_OOB_TX_STS_BUSY) { LOG_ERR("OOB channel is busy"); return -EBUSY; } if (pckt->len > CONFIG_ESPI_OOB_BUFFER_SIZE) { LOG_ERR("insufficient space"); return -EINVAL; } memcpy(target_tx_mem, pckt->buf, pckt->len); ESPI_OOB_REGS->TX_LEN = pckt->len; ESPI_OOB_REGS->TX_CTRL = MCHP_ESPI_OOB_TX_CTRL_START; LOG_DBG("%s %d", __func__, ESPI_OOB_REGS->TX_LEN); /* Wait until ISR or timeout */ ret = k_sem_take(&data->tx_lock, K_MSEC(MAX_OOB_TIMEOUT)); if (ret == -EAGAIN) { return -ETIMEDOUT; } if (ESPI_OOB_REGS->TX_STS & err_mask) { LOG_ERR("Tx failed %x", ESPI_OOB_REGS->TX_STS); ESPI_OOB_REGS->TX_STS = err_mask; return -EIO; } return 0; } static int espi_xec_receive_oob(const struct device *dev, struct espi_oob_packet *pckt) { uint8_t err_mask = MCHP_ESPI_OOB_RX_STS_IBERR | MCHP_ESPI_OOB_RX_STS_OVRUN; if (ESPI_OOB_REGS->TX_STS & err_mask) { return -EIO; } #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC int ret; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); /* Wait until ISR or timeout */ ret = k_sem_take(&data->rx_lock, K_MSEC(MAX_OOB_TIMEOUT)); if (ret == -EAGAIN) { return -ETIMEDOUT; } #endif /* Check if buffer passed to driver can fit the received buffer */ uint32_t rcvd_len = ESPI_OOB_REGS->RX_LEN & MCHP_ESPI_OOB_RX_LEN_MASK; if (rcvd_len > pckt->len) { LOG_ERR("space rcvd %d vs %d", rcvd_len, pckt->len); return -EIO; } pckt->len = rcvd_len; memcpy(pckt->buf, target_rx_mem, pckt->len); memset(target_rx_mem, 0, pckt->len); /* Only after data has been copied from SRAM, indicate channel * is available for next packet */ ESPI_OOB_REGS->RX_CTRL |= MCHP_ESPI_OOB_RX_CTRL_AVAIL; return 0; } #endif /* CONFIG_ESPI_OOB_CHANNEL */ #ifdef CONFIG_ESPI_FLASH_CHANNEL static int espi_xec_flash_read(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); uint32_t err_mask = MCHP_ESPI_FC_STS_IBERR | MCHP_ESPI_FC_STS_FAIL | MCHP_ESPI_FC_STS_OVFL | MCHP_ESPI_FC_STS_BADREQ; LOG_DBG("%s", __func__); if (!(ESPI_FC_REGS->STS & MCHP_ESPI_FC_STS_CHAN_EN)) { LOG_ERR("Flash channel is disabled"); return -EIO; } if (pckt->len > CONFIG_ESPI_FLASH_BUFFER_SIZE) { LOG_ERR("Invalid size request"); return -EINVAL; } ESPI_FC_REGS->FL_ADDR_MSW = 0; ESPI_FC_REGS->FL_ADDR_LSW = pckt->flash_addr; ESPI_FC_REGS->MEM_ADDR_MSW = 0; ESPI_FC_REGS->MEM_ADDR_LSW = (uint32_t)&target_mem[0]; ESPI_FC_REGS->XFR_LEN = pckt->len; ESPI_FC_REGS->CTRL = MCHP_ESPI_FC_CTRL_FUNC(MCHP_ESPI_FC_CTRL_RD0); ESPI_FC_REGS->CTRL |= MCHP_ESPI_FC_CTRL_START; /* Wait until ISR or timeout */ ret = k_sem_take(&data->flash_lock, K_MSEC(MAX_FLASH_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } if (ESPI_FC_REGS->STS & err_mask) { LOG_ERR("%s error %x", __func__, err_mask); ESPI_FC_REGS->STS = err_mask; return -EIO; } memcpy(pckt->buf, target_mem, pckt->len); return 0; } static int espi_xec_flash_write(const struct device *dev, struct espi_flash_packet *pckt) { int ret; uint32_t err_mask = MCHP_ESPI_FC_STS_IBERR | MCHP_ESPI_FC_STS_OVRUN | MCHP_ESPI_FC_STS_FAIL | MCHP_ESPI_FC_STS_BADREQ; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); LOG_DBG("%s", __func__); if (sizeof(target_mem) < pckt->len) { LOG_ERR("Packet length is too big"); return -ENOMEM; } if (!(ESPI_FC_REGS->STS & MCHP_ESPI_FC_STS_CHAN_EN)) { LOG_ERR("Flash channel is disabled"); return -EIO; } if ((ESPI_FC_REGS->CFG & MCHP_ESPI_FC_CFG_BUSY)) { LOG_ERR("Flash channel is busy"); return -EBUSY; } memcpy(target_mem, pckt->buf, pckt->len); ESPI_FC_REGS->FL_ADDR_MSW = 0; ESPI_FC_REGS->FL_ADDR_LSW = pckt->flash_addr; ESPI_FC_REGS->MEM_ADDR_MSW = 0; ESPI_FC_REGS->MEM_ADDR_LSW = (uint32_t)&target_mem[0]; ESPI_FC_REGS->XFR_LEN = pckt->len; ESPI_FC_REGS->CTRL = MCHP_ESPI_FC_CTRL_FUNC(MCHP_ESPI_FC_CTRL_WR0); ESPI_FC_REGS->CTRL |= MCHP_ESPI_FC_CTRL_START; /* Wait until ISR or timeout */ ret = k_sem_take(&data->flash_lock, K_MSEC(MAX_FLASH_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } if (ESPI_FC_REGS->STS & err_mask) { LOG_ERR("%s err: %x", __func__, err_mask); ESPI_FC_REGS->STS = err_mask; return -EIO; } return 0; } static int espi_xec_flash_erase(const struct device *dev, struct espi_flash_packet *pckt) { int ret; uint32_t status; uint32_t err_mask = MCHP_ESPI_FC_STS_IBERR | MCHP_ESPI_FC_STS_OVRUN | MCHP_ESPI_FC_STS_FAIL | MCHP_ESPI_FC_STS_BADREQ; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); LOG_DBG("%s", __func__); if (!(ESPI_FC_REGS->STS & MCHP_ESPI_FC_STS_CHAN_EN)) { LOG_ERR("Flash channel is disabled"); return -EIO; } if ((ESPI_FC_REGS->CFG & MCHP_ESPI_FC_CFG_BUSY)) { LOG_ERR("Flash channel is busy"); return -EBUSY; } /* Clear status register */ status = ESPI_FC_REGS->STS; ESPI_FC_REGS->STS = status; ESPI_FC_REGS->FL_ADDR_MSW = 0; ESPI_FC_REGS->FL_ADDR_LSW = pckt->flash_addr; ESPI_FC_REGS->XFR_LEN = ESPI_FLASH_ERASE_DUMMY; ESPI_FC_REGS->CTRL = MCHP_ESPI_FC_CTRL_FUNC(MCHP_ESPI_FC_CTRL_ERS0); ESPI_FC_REGS->CTRL |= MCHP_ESPI_FC_CTRL_START; /* Wait until ISR or timeout */ ret = k_sem_take(&data->flash_lock, K_MSEC(MAX_FLASH_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } if (ESPI_FC_REGS->STS & err_mask) { LOG_ERR("%s err: %x", __func__, err_mask); ESPI_FC_REGS->STS = err_mask; return -EIO; } return 0; } #endif /* CONFIG_ESPI_FLASH_CHANNEL */ static int espi_xec_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); return espi_manage_callback(&data->callbacks, callback, set); } #ifdef CONFIG_ESPI_AUTOMATIC_BOOT_DONE_ACKNOWLEDGE static void send_target_bootdone(const struct device *dev) { int ret; uint8_t boot_done; ret = espi_xec_receive_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, &boot_done); if (!ret && !boot_done) { /* TARGET_BOOT_DONE & TARGET_LOAD_STS have to be sent together */ espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 1); espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, 1); } } #endif #ifdef CONFIG_ESPI_OOB_CHANNEL static void espi_init_oob(const struct device *dev) { struct espi_xec_config *config = (struct espi_xec_config *) (dev->config); /* Enable OOB Tx/Rx interrupts */ MCHP_GIRQ_ENSET(config->bus_girq_id) = (MCHP_ESPI_OOB_UP_GIRQ_VAL | MCHP_ESPI_OOB_DN_GIRQ_VAL); ESPI_OOB_REGS->TX_ADDR_MSW = 0; ESPI_OOB_REGS->RX_ADDR_MSW = 0; ESPI_OOB_REGS->TX_ADDR_LSW = (uint32_t)&target_tx_mem[0]; ESPI_OOB_REGS->RX_ADDR_LSW = (uint32_t)&target_rx_mem[0]; ESPI_OOB_REGS->RX_LEN = 0x00FF0000; /* Enable OOB Tx channel enable change status interrupt */ ESPI_OOB_REGS->TX_IEN |= MCHP_ESPI_OOB_TX_IEN_CHG_EN | MCHP_ESPI_OOB_TX_IEN_DONE; /* Enable Rx channel to receive data any time * there are case where OOB is not initiated by a previous OOB Tx */ ESPI_OOB_REGS->RX_IEN |= MCHP_ESPI_OOB_RX_IEN; ESPI_OOB_REGS->RX_CTRL |= MCHP_ESPI_OOB_RX_CTRL_AVAIL; } #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static void espi_init_flash(const struct device *dev) { struct espi_xec_config *config = (struct espi_xec_config *)(dev->config); LOG_DBG("%s", __func__); /* Need to clear status done when ROM boots in MAF */ LOG_DBG("%s ESPI_FC_REGS->CFG %X", __func__, ESPI_FC_REGS->CFG); ESPI_FC_REGS->STS = MCHP_ESPI_FC_STS_DONE; /* Enable interrupts */ MCHP_GIRQ_ENSET(config->bus_girq_id) = BIT(MCHP_ESPI_FC_GIRQ_POS); ESPI_FC_REGS->IEN |= MCHP_ESPI_FC_IEN_CHG_EN; ESPI_FC_REGS->IEN |= MCHP_ESPI_FC_IEN_DONE; } #endif static void espi_bus_init(const struct device *dev) { const struct espi_xec_config *config = dev->config; /* Enable bus interrupts */ MCHP_GIRQ_ENSET(config->bus_girq_id) = MCHP_ESPI_ESPI_RST_GIRQ_VAL | MCHP_ESPI_VW_EN_GIRQ_VAL | MCHP_ESPI_PC_GIRQ_VAL; } void espi_config_vw_ocb(void) { ESPI_SMVW_REG *reg = &(ESPI_S2M_VW_REGS->SMVW06); /* Keep index bits [7:0] in initial 0h value (disabled state) */ mec_espi_smvw_index_set(reg, 0); /* Set 01b (eSPI_RESET# domain) into bits [9:8] which frees the * register from all except chip level resets and set initial state * of VW wires as 1111b in bits [15:12]. */ mec_espi_msvw_stom_set(reg, VW_RST_SRC_ESPI_RESET, 0x1); /* Set 4 SMVW SRC bits in bit positions [0], [8], [16] and [24] to * initial value '1'. */ mec_espi_smvw_set_all_bitmap(reg, 0xF); /* Set 00b (eSPI_RESET# domain) into bits [9:8] while preserving * the values in bits [15:12]. */ mec_espi_msvw_stom_set(reg, VW_RST_SRC_ESPI_RESET, 0x0); /* Set INDEX field with OCB VW index */ mec_espi_smvw_index_set(reg, ESPI_OCB_VW_INDEX); } static void espi_rst_isr(const struct device *dev) { uint8_t rst_sts; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { ESPI_BUS_RESET, 0, 0 }; rst_sts = ESPI_CAP_REGS->ERST_STS; /* eSPI reset status register is clear on write register */ ESPI_CAP_REGS->ERST_STS = MCHP_ESPI_RST_ISTS; if (rst_sts & MCHP_ESPI_RST_ISTS) { if (rst_sts & MCHP_ESPI_RST_ISTS_PIN_RO_HI) { evt.evt_data = 1; } else { evt.evt_data = 0; } espi_send_callbacks(&data->callbacks, dev, evt); #ifdef CONFIG_ESPI_OOB_CHANNEL espi_init_oob(dev); #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL espi_init_flash(dev); #endif espi_bus_init(dev); } } /* Configure sub devices BAR address if not using default I/O based address * then make its BAR valid. * Refer to microchip eSPI I/O base addresses for default values */ static void config_sub_devices(const struct device *dev) { #ifdef CONFIG_ESPI_PERIPHERAL_UART /* eSPI logical UART is tied to corresponding physical UART * Not all boards use same UART port for debug, hence needs to set * eSPI host logical UART0 bar address based on configuration. */ switch (CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING) { case 0: ESPI_EIO_BAR_REGS->EC_BAR_UART_0 = ESPI_XEC_UART0_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; break; case 1: ESPI_EIO_BAR_REGS->EC_BAR_UART_1 = ESPI_XEC_UART0_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; break; case 2: ESPI_EIO_BAR_REGS->EC_BAR_UART_2 = ESPI_XEC_UART0_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; break; } #endif #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC KBC_REGS->KBC_CTRL |= MCHP_KBC_CTRL_AUXH; KBC_REGS->KBC_CTRL |= MCHP_KBC_CTRL_OBFEN; /* This is the activate register, but the HAL has a funny name */ KBC_REGS->KBC_PORT92_EN = MCHP_KBC_PORT92_EN; ESPI_EIO_BAR_REGS->EC_BAR_KBC = ESPI_XEC_KBC_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; #endif #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO ESPI_EIO_BAR_REGS->EC_BAR_ACPI_EC_0 |= MCHP_ESPI_IO_BAR_HOST_VALID; ESPI_EIO_BAR_REGS->EC_BAR_MBOX = ESPI_XEC_MBOX_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; #endif #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT ESPI_EIO_BAR_REGS->EC_BAR_ACPI_EC_1 = CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT_PORT_NUM | MCHP_ESPI_IO_BAR_HOST_VALID; ESPI_EIO_BAR_REGS->EC_BAR_MBOX = ESPI_XEC_MBOX_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; #endif #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 ESPI_EIO_BAR_REGS->EC_BAR_P80CAP_0 = ESPI_XEC_PORT80_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; PORT80_CAP0_REGS->ACTV = 1; ESPI_EIO_BAR_REGS->EC_BAR_P80CAP_1 = ESPI_XEC_PORT81_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; PORT80_CAP1_REGS->ACTV = 1; #endif } static void configure_sirq(void) { #ifdef CONFIG_ESPI_PERIPHERAL_UART switch (CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING) { case ESPI_PERIPHERAL_UART_PORT0: ESPI_SIRQ_REGS->UART_0_SIRQ = UART_DEFAULT_IRQ; break; case ESPI_PERIPHERAL_UART_PORT1: ESPI_SIRQ_REGS->UART_1_SIRQ = UART_DEFAULT_IRQ; break; case ESPI_PERIPHERAL_UART_PORT2: ESPI_SIRQ_REGS->UART_2_SIRQ = UART_DEFAULT_IRQ; break; } #endif #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC ESPI_SIRQ_REGS->KBC_SIRQ_0 = 0x01; ESPI_SIRQ_REGS->KBC_SIRQ_1 = 0x0C; #endif } static void setup_espi_io_config(const struct device *dev, uint16_t host_address) { ESPI_EIO_BAR_REGS->EC_BAR_IOC = (host_address << 16) | MCHP_ESPI_IO_BAR_HOST_VALID; config_sub_devices(dev); configure_sirq(); ESPI_PC_REGS->PC_STATUS = (MCHP_ESPI_PC_STS_EN_CHG | MCHP_ESPI_PC_STS_BM_EN_CHG_POS); ESPI_PC_REGS->PC_IEN |= MCHP_ESPI_PC_IEN_EN_CHG; ESPI_CAP_REGS->PC_RDY = 1; } static void espi_pc_isr(const struct device *dev) { uint32_t status = ESPI_PC_REGS->PC_STATUS; if (status & MCHP_ESPI_PC_STS_EN_CHG) { if (status & MCHP_ESPI_PC_STS_EN) { setup_espi_io_config(dev, MCHP_ESPI_IOBAR_INIT_DFLT); } ESPI_PC_REGS->PC_STATUS = MCHP_ESPI_PC_STS_EN_CHG; } } static void espi_vwire_chanel_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); const struct espi_xec_config *config = dev->config; struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_VWIRE, .evt_data = 0 }; uint32_t status; status = ESPI_IO_VW_REGS->VW_EN_STS; if (status & MCHP_ESPI_VW_EN_STS_RO) { ESPI_IO_VW_REGS->VW_RDY = 1; evt.evt_data = 1; /* VW channel interrupt can disabled at this point */ MCHP_GIRQ_ENCLR(config->bus_girq_id) = MCHP_ESPI_VW_EN_GIRQ_VAL; #ifdef CONFIG_ESPI_AUTOMATIC_BOOT_DONE_ACKNOWLEDGE send_target_bootdone(dev); #endif } espi_send_callbacks(&data->callbacks, dev, evt); } #ifdef CONFIG_ESPI_OOB_CHANNEL static void espi_oob_down_isr(const struct device *dev) { uint32_t status; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); #ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_OOB_RECEIVED, .evt_details = 0, .evt_data = 0 }; #endif status = ESPI_OOB_REGS->RX_STS; LOG_DBG("%s %x", __func__, status); if (status & MCHP_ESPI_OOB_RX_STS_DONE) { /* Register is write-on-clear, ensure only 1 bit is affected */ ESPI_OOB_REGS->RX_STS = MCHP_ESPI_OOB_RX_STS_DONE; #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC k_sem_give(&data->rx_lock); #else evt.evt_details = ESPI_OOB_REGS->RX_LEN & MCHP_ESPI_OOB_RX_LEN_MASK; espi_send_callbacks(&data->callbacks, dev, evt); #endif } } static void espi_oob_up_isr(const struct device *dev) { uint32_t status; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_OOB, .evt_data = 0 }; status = ESPI_OOB_REGS->TX_STS; LOG_DBG("%s sts:%x", __func__, status); if (status & MCHP_ESPI_OOB_TX_STS_DONE) { /* Register is write-on-clear, ensure only 1 bit is affected */ ESPI_OOB_REGS->TX_STS = MCHP_ESPI_OOB_TX_STS_DONE; k_sem_give(&data->tx_lock); } if (status & MCHP_ESPI_OOB_TX_STS_CHG_EN) { if (status & MCHP_ESPI_OOB_TX_STS_CHEN) { espi_init_oob(dev); /* Indicate OOB channel is ready to eSPI host */ ESPI_CAP_REGS->OOB_RDY = 1; evt.evt_data = 1; } ESPI_OOB_REGS->TX_STS = MCHP_ESPI_OOB_TX_STS_CHG_EN; espi_send_callbacks(&data->callbacks, dev, evt); } } #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static void espi_flash_isr(const struct device *dev) { uint32_t status; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_FLASH, .evt_data = 0, }; status = ESPI_FC_REGS->STS; LOG_DBG("%s %x", __func__, status); if (status & MCHP_ESPI_FC_STS_DONE) { /* Ensure to clear only relevant bit */ ESPI_FC_REGS->STS = MCHP_ESPI_FC_STS_DONE; k_sem_give(&data->flash_lock); } if (status & MCHP_ESPI_FC_STS_CHAN_EN_CHG) { /* Ensure to clear only relevant bit */ ESPI_FC_REGS->STS = MCHP_ESPI_FC_STS_CHAN_EN_CHG; if (status & MCHP_ESPI_FC_STS_CHAN_EN) { espi_init_flash(dev); /* Indicate flash channel is ready to eSPI controller */ ESPI_CAP_REGS->FC_RDY = MCHP_ESPI_FC_READY; evt.evt_data = 1; } espi_send_callbacks(&data->callbacks, dev, evt); } } #endif static void vw_pltrst_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, ESPI_VWIRE_SIGNAL_PLTRST, 0 }; uint8_t status = 0; espi_xec_receive_vwire(dev, ESPI_VWIRE_SIGNAL_PLTRST, &status); if (status) { setup_espi_io_config(dev, MCHP_ESPI_IOBAR_INIT_DFLT); } evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } /* Send callbacks if enabled and track eSPI host system state */ static void notify_system_state(const struct device *dev, enum espi_vwire_signal signal) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0 }; uint8_t status = 0; espi_xec_receive_vwire(dev, signal, &status); evt.evt_details = signal; evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } static void notify_host_warning(const struct device *dev, enum espi_vwire_signal signal) { uint8_t status; espi_xec_receive_vwire(dev, signal, &status); if (!IS_ENABLED(CONFIG_ESPI_AUTOMATIC_WARNING_ACKNOWLEDGE)) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = {ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0 }; evt.evt_details = signal; evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } else { k_busy_wait(ESPI_XEC_VWIRE_ACK_DELAY); /* Some flows are dependent on awareness of client's driver * about these warnings in such cases these automatic response * should not be enabled. */ switch (signal) { case ESPI_VWIRE_SIGNAL_HOST_RST_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, status); break; case ESPI_VWIRE_SIGNAL_SUS_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_ACK, status); break; case ESPI_VWIRE_SIGNAL_OOB_RST_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_ACK, status); break; case ESPI_VWIRE_SIGNAL_DNX_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_DNX_ACK, status); break; default: break; } } } static void vw_slp3_isr(const struct device *dev) { notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_S3); } static void vw_slp4_isr(const struct device *dev) { notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_S4); } static void vw_slp5_isr(const struct device *dev) { notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_S5); } static void vw_host_rst_warn_isr(const struct device *dev) { notify_host_warning(dev, ESPI_VWIRE_SIGNAL_HOST_RST_WARN); } static void vw_sus_warn_isr(const struct device *dev) { notify_host_warning(dev, ESPI_VWIRE_SIGNAL_SUS_WARN); /* Configure spare VW register SMVW06 to VW index 50h. As per * per microchip recommendation, spare VW register should be * configured between TARGET_BOOT_LOAD_DONE = 1 VW event and * point where SUS_ACK=1 VW is sent to SOC. */ espi_config_vw_ocb(); } static void vw_oob_rst_isr(const struct device *dev) { notify_host_warning(dev, ESPI_VWIRE_SIGNAL_OOB_RST_WARN); } static void vw_sus_pwrdn_ack_isr(const struct device *dev) { notify_system_state(dev, ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK); } static void vw_sus_slp_a_isr(const struct device *dev) { notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_A); } static void ibf_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_HOST_IO, ESPI_PERIPHERAL_NODATA }; espi_send_callbacks(&data->callbacks, dev, evt); } #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT static void ibf_pvt_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION, .evt_details = ESPI_PERIPHERAL_HOST_IO_PVT, .evt_data = ESPI_PERIPHERAL_NODATA }; espi_send_callbacks(&data->callbacks, dev, evt); } #endif static void ibf_kbc_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); /* The high byte contains information from the host, * and the lower byte specifies if the host sent * a command or data. 1 = Command. */ uint32_t isr_data = ((KBC_REGS->EC_DATA & 0xFF) << E8042_ISR_DATA_POS) | ((KBC_REGS->EC_KBC_STS & MCHP_KBC_STS_CD) << E8042_ISR_CMD_DATA_POS); struct espi_event evt = { .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION, .evt_details = ESPI_PERIPHERAL_8042_KBC, .evt_data = isr_data }; espi_send_callbacks(&data->callbacks, dev, evt); } static void port80_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, (ESPI_PERIPHERAL_INDEX_0 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80, ESPI_PERIPHERAL_NODATA }; evt.evt_data = PORT80_CAP0_REGS->EC_DATA; espi_send_callbacks(&data->callbacks, dev, evt); } static void port81_isr(const struct device *dev) { struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, (ESPI_PERIPHERAL_INDEX_1 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80, ESPI_PERIPHERAL_NODATA }; evt.evt_data = PORT80_CAP1_REGS->EC_DATA; espi_send_callbacks(&data->callbacks, dev, evt); } const struct espi_isr espi_bus_isr[] = { {MCHP_ESPI_PC_GIRQ_VAL, espi_pc_isr}, #ifdef CONFIG_ESPI_OOB_CHANNEL {MCHP_ESPI_OOB_UP_GIRQ_VAL, espi_oob_up_isr}, {MCHP_ESPI_OOB_DN_GIRQ_VAL, espi_oob_down_isr}, #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL {MCHP_ESPI_FC_GIRQ_VAL, espi_flash_isr}, #endif {MCHP_ESPI_ESPI_RST_GIRQ_VAL, espi_rst_isr}, {MCHP_ESPI_VW_EN_GIRQ_VAL, espi_vwire_chanel_isr}, }; uint8_t vw_wires_int_en[] = { ESPI_VWIRE_SIGNAL_SLP_S3, ESPI_VWIRE_SIGNAL_SLP_S4, ESPI_VWIRE_SIGNAL_SLP_S5, ESPI_VWIRE_SIGNAL_PLTRST, ESPI_VWIRE_SIGNAL_OOB_RST_WARN, ESPI_VWIRE_SIGNAL_HOST_RST_WARN, ESPI_VWIRE_SIGNAL_SUS_WARN, ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, ESPI_VWIRE_SIGNAL_DNX_WARN, }; const struct espi_isr m2s_vwires_isr[] = { {MEC_ESPI_MSVW00_SRC0_VAL, vw_slp3_isr}, {MEC_ESPI_MSVW00_SRC1_VAL, vw_slp4_isr}, {MEC_ESPI_MSVW00_SRC2_VAL, vw_slp5_isr}, {MEC_ESPI_MSVW01_SRC1_VAL, vw_pltrst_isr}, {MEC_ESPI_MSVW01_SRC2_VAL, vw_oob_rst_isr}, {MEC_ESPI_MSVW02_SRC0_VAL, vw_host_rst_warn_isr}, {MEC_ESPI_MSVW03_SRC0_VAL, vw_sus_warn_isr}, {MEC_ESPI_MSVW03_SRC1_VAL, vw_sus_pwrdn_ack_isr}, {MEC_ESPI_MSVW03_SRC3_VAL, vw_sus_slp_a_isr}, }; const struct espi_isr peripherals_isr[] = { {MCHP_ACPI_EC_0_IBF_GIRQ, ibf_isr}, #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT {MCHP_ACPI_EC_1_IBF_GIRQ, ibf_pvt_isr}, #endif {MCHP_KBC_IBF_GIRQ, ibf_kbc_isr}, {MCHP_PORT80_DEBUG0_GIRQ_VAL, port80_isr}, {MCHP_PORT80_DEBUG1_GIRQ_VAL, port81_isr}, }; static uint8_t bus_isr_cnt = sizeof(espi_bus_isr) / sizeof(struct espi_isr); static uint8_t m2s_vwires_isr_cnt = sizeof(m2s_vwires_isr) / sizeof(struct espi_isr); static uint8_t periph_isr_cnt = sizeof(peripherals_isr) / sizeof(struct espi_isr); static void espi_xec_bus_isr(const struct device *dev) { const struct espi_xec_config *config = dev->config; uint32_t girq_result; girq_result = MCHP_GIRQ_RESULT(config->bus_girq_id); for (int i = 0; i < bus_isr_cnt; i++) { struct espi_isr entry = espi_bus_isr[i]; if (girq_result & entry.girq_bit) { if (entry.the_isr != NULL) { entry.the_isr(dev); } } } REG32(MCHP_GIRQ_SRC_ADDR(config->bus_girq_id)) = girq_result; } static void espi_xec_vw_isr(const struct device *dev) { const struct espi_xec_config *config = dev->config; uint32_t girq_result; girq_result = MCHP_GIRQ_RESULT(config->vw_girq_ids[0]); MCHP_GIRQ_SRC(config->vw_girq_ids[0]) = girq_result; for (int i = 0; i < m2s_vwires_isr_cnt; i++) { struct espi_isr entry = m2s_vwires_isr[i]; if (girq_result & entry.girq_bit) { if (entry.the_isr != NULL) { entry.the_isr(dev); } } } } #if DT_INST_PROP_HAS_IDX(0, vw_girqs, 1) static void vw_sus_dnx_warn_isr(const struct device *dev) { notify_host_warning(dev, ESPI_VWIRE_SIGNAL_DNX_WARN); } const struct espi_isr m2s_vwires_ext_isr[] = { {MEC_ESPI_MSVW08_SRC1_VAL, vw_sus_dnx_warn_isr} }; static void espi_xec_vw_ext_isr(const struct device *dev) { const struct espi_xec_config *config = dev->config; uint32_t girq_result; girq_result = MCHP_GIRQ_RESULT(config->vw_girq_ids[1]); MCHP_GIRQ_SRC(config->vw_girq_ids[1]) = girq_result; for (int i = 0; i < ARRAY_SIZE(m2s_vwires_ext_isr); i++) { struct espi_isr entry = m2s_vwires_ext_isr[i]; if (girq_result & entry.girq_bit) { if (entry.the_isr != NULL) { entry.the_isr(dev); } } } } #endif static void espi_xec_periph_isr(const struct device *dev) { const struct espi_xec_config *config = dev->config; uint32_t girq_result; girq_result = MCHP_GIRQ_RESULT(config->pc_girq_id); for (int i = 0; i < periph_isr_cnt; i++) { struct espi_isr entry = peripherals_isr[i]; if (girq_result & entry.girq_bit) { if (entry.the_isr != NULL) { entry.the_isr(dev); } } } REG32(MCHP_GIRQ_SRC_ADDR(config->pc_girq_id)) = girq_result; } static int espi_xec_init(const struct device *dev); static const struct espi_driver_api espi_xec_driver_api = { .config = espi_xec_configure, .get_channel_status = espi_xec_channel_ready, .send_vwire = espi_xec_send_vwire, .receive_vwire = espi_xec_receive_vwire, #ifdef CONFIG_ESPI_OOB_CHANNEL .send_oob = espi_xec_send_oob, .receive_oob = espi_xec_receive_oob, #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL .flash_read = espi_xec_flash_read, .flash_write = espi_xec_flash_write, .flash_erase = espi_xec_flash_erase, #endif .manage_callback = espi_xec_manage_callback, .read_lpc_request = espi_xec_read_lpc_request, .write_lpc_request = espi_xec_write_lpc_request, }; static struct espi_xec_data espi_xec_data; /* pin control structure(s) */ PINCTRL_DT_INST_DEFINE(0); static const struct espi_xec_config espi_xec_config = { .base_addr = DT_INST_REG_ADDR(0), .bus_girq_id = DT_INST_PROP(0, io_girq), .vw_girq_ids[0] = DT_INST_PROP_BY_IDX(0, vw_girqs, 0), .vw_girq_ids[1] = DT_INST_PROP_BY_IDX(0, vw_girqs, 1), .pc_girq_id = DT_INST_PROP(0, pc_girq), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; DEVICE_DT_INST_DEFINE(0, &espi_xec_init, NULL, &espi_xec_data, &espi_xec_config, PRE_KERNEL_2, CONFIG_ESPI_INIT_PRIORITY, &espi_xec_driver_api); static int espi_xec_init(const struct device *dev) { const struct espi_xec_config *config = dev->config; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); int ret; ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC eSPI pinctrl setup failed (%d)", ret); return ret; } /* Configure eSPI_PLTRST# to cause nSIO_RESET reset */ PCR_REGS->PWR_RST_CTRL = MCHP_PCR_PR_CTRL_USE_ESPI_PLTRST; ESPI_CAP_REGS->PLTRST_SRC = MCHP_ESPI_PLTRST_SRC_IS_VW; /* Configure the channels and its capabilities based on build config */ ESPI_CAP_REGS->GLB_CAP0 |= MCHP_ESPI_GBL_CAP0_VW_SUPP; ESPI_CAP_REGS->GLB_CAP0 |= MCHP_ESPI_GBL_CAP0_PC_SUPP; /* Max VW count is 12 pairs */ ESPI_CAP_REGS->VW_CAP = ESPI_NUM_SMVW; ESPI_CAP_REGS->PC_CAP |= MCHP_ESPI_PC_CAP_MAX_PLD_SZ_64; #ifdef CONFIG_ESPI_OOB_CHANNEL ESPI_CAP_REGS->GLB_CAP0 |= MCHP_ESPI_GBL_CAP0_OOB_SUPP; ESPI_CAP_REGS->OOB_CAP |= MCHP_ESPI_OOB_CAP_MAX_PLD_SZ_73; k_sem_init(&data->tx_lock, 0, 1); #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC k_sem_init(&data->rx_lock, 0, 1); #endif /* CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC */ #else ESPI_CAP_REGS->GLB_CAP0 &= ~MCHP_ESPI_GBL_CAP0_OOB_SUPP; #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL ESPI_CAP_REGS->GLB_CAP0 |= MCHP_ESPI_GBL_CAP0_FC_SUPP; ESPI_CAP_REGS->GLB_CAP0 |= MCHP_ESPI_FC_CAP_MAX_PLD_SZ_64; ESPI_CAP_REGS->FC_CAP |= MCHP_ESPI_FC_CAP_SHARE_MAF_SAF; ESPI_CAP_REGS->FC_CAP |= MCHP_ESPI_FC_CAP_MAX_RD_SZ_64; k_sem_init(&data->flash_lock, 0, 1); #else ESPI_CAP_REGS->GLB_CAP0 &= ~MCHP_ESPI_GBL_CAP0_FC_SUPP; #endif /* Clear reset interrupt status and enable interrupts */ ESPI_CAP_REGS->ERST_STS = MCHP_ESPI_RST_ISTS; ESPI_CAP_REGS->ERST_IEN |= MCHP_ESPI_RST_IEN; ESPI_PC_REGS->PC_STATUS = MCHP_ESPI_PC_STS_EN_CHG; ESPI_PC_REGS->PC_IEN |= MCHP_ESPI_PC_IEN_EN_CHG; /* Enable VWires interrupts */ for (int i = 0; i < sizeof(vw_wires_int_en); i++) { uint8_t signal = vw_wires_int_en[i]; struct xec_signal signal_info = vw_tbl[signal]; uint8_t xec_id = signal_info.xec_reg_idx; ESPI_MSVW_REG *reg = &(ESPI_M2S_VW_REGS->MSVW00) + xec_id; mec_espi_msvw_irq_sel_set(reg, signal_info.bit, MSVW_IRQ_SEL_EDGE_BOTH); } /* Enable interrupts for each logical channel enable assertion */ MCHP_GIRQ_ENSET(config->bus_girq_id) = MCHP_ESPI_ESPI_RST_GIRQ_VAL | MCHP_ESPI_VW_EN_GIRQ_VAL | MCHP_ESPI_PC_GIRQ_VAL; #ifdef CONFIG_ESPI_OOB_CHANNEL espi_init_oob(dev); #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL espi_init_flash(dev); #endif /* Enable aggregated block interrupts for VWires */ MCHP_GIRQ_ENSET(config->vw_girq_ids[0]) = MEC_ESPI_MSVW00_SRC0_VAL | MEC_ESPI_MSVW00_SRC1_VAL | MEC_ESPI_MSVW00_SRC2_VAL | MEC_ESPI_MSVW01_SRC1_VAL | MEC_ESPI_MSVW01_SRC2_VAL | MEC_ESPI_MSVW02_SRC0_VAL | MEC_ESPI_MSVW03_SRC0_VAL; /* Enable aggregated block interrupts for peripherals supported */ #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC MCHP_GIRQ_ENSET(config->pc_girq_id) = MCHP_KBC_IBF_GIRQ; #endif #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO MCHP_GIRQ_ENSET(config->pc_girq_id) = MCHP_ACPI_EC_0_IBF_GIRQ; MCHP_GIRQ_ENSET(config->pc_girq_id) = MCHP_ACPI_EC_2_IBF_GIRQ; #endif #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT MCHP_GIRQ_ENSET(config->pc_girq_id) = MCHP_ACPI_EC_1_IBF_GIRQ; #endif #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 MCHP_GIRQ_ENSET(config->pc_girq_id) = MCHP_PORT80_DEBUG0_GIRQ_VAL | MCHP_PORT80_DEBUG1_GIRQ_VAL; #endif /* Enable aggregated interrupt block for eSPI bus events */ MCHP_GIRQ_BLK_SETEN(config->bus_girq_id); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), espi_xec_bus_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); /* Enable aggregated interrupt block for eSPI VWire events */ MCHP_GIRQ_BLK_SETEN(config->vw_girq_ids[0]); IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 1, irq), DT_INST_IRQ_BY_IDX(0, 1, priority), espi_xec_vw_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 1, irq)); /* Enable aggregated interrupt block for eSPI peripheral channel */ MCHP_GIRQ_BLK_SETEN(config->pc_girq_id); IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 2, irq), DT_INST_IRQ_BY_IDX(0, 2, priority), espi_xec_periph_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 2, irq)); #if DT_INST_PROP_HAS_IDX(0, vw_girqs, 1) MCHP_GIRQ_ENSET(config->vw_girq_ids[1]) = MEC_ESPI_MSVW08_SRC1_VAL; MCHP_GIRQ_BLK_SETEN(config->vw_girq_ids[1]); IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 3, irq), DT_INST_IRQ_BY_IDX(0, 3, priority), espi_xec_vw_ext_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 3, irq)); #endif return 0; } ```
/content/code_sandbox/drivers/espi/espi_mchp_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
15,425
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_espi_taf #include <soc.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/espi_saf.h> #include <zephyr/drivers/flash.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(espi_taf, CONFIG_ESPI_LOG_LEVEL); static const struct device *const spi_dev = DEVICE_DT_GET(DT_ALIAS(taf_flash)); enum ESPI_TAF_ERASE_LEN { NPCX_ESPI_TAF_ERASE_LEN_4KB, NPCX_ESPI_TAF_ERASE_LEN_32KB, NPCX_ESPI_TAF_ERASE_LEN_64KB, NPCX_ESPI_TAF_ERASE_LEN_128KB, NPCX_ESPI_TAF_ERASE_LEN_MAX, }; struct espi_taf_npcx_config { uintptr_t base; uintptr_t mapped_addr; uintptr_t rx_plsz; enum NPCX_ESPI_TAF_ERASE_BLOCK_SIZE erase_sz; enum NPCX_ESPI_TAF_MAX_READ_REQ max_rd_sz; }; struct espi_taf_npcx_data { sys_slist_t *callbacks; const struct device *host_dev; uint8_t taf_type; uint8_t taf_tag; uint32_t address; uint16_t length; uint32_t src[16]; struct k_work work; }; static struct espi_taf_npcx_data npcx_espi_taf_data; static struct espi_callback espi_taf_cb; #define HAL_INSTANCE(dev) \ ((struct espi_reg *)((const struct espi_taf_npcx_config *) \ (dev)->config)->base) #define FLBASE_ADDR ( \ GET_FIELD(inst->FLASHBASE, NPCX_FLASHBASE_FLBASE_ADDR) \ << GET_FIELD_POS(NPCX_FLASHBASE_FLBASE_ADDR)) #define PRTR_BADDR(i) ( \ GET_FIELD(inst->FLASH_PRTR_BADDR[i], NPCX_FLASH_PRTR_BADDR) \ << GET_FIELD_POS(NPCX_FLASH_PRTR_BADDR)) #define PRTR_HADDR(i) ( \ GET_FIELD(inst->FLASH_PRTR_HADDR[i], NPCX_FLASH_PRTR_HADDR) \ << GET_FIELD_POS(NPCX_FLASH_PRTR_HADDR)) | 0xFFF; static void espi_taf_get_pckt(const struct device *dev, struct espi_taf_npcx_data *pckt, struct espi_event event) { struct espi_taf_pckt *data_ptr; data_ptr = (struct espi_taf_pckt *)event.evt_data; pckt->taf_type = data_ptr->type; pckt->length = data_ptr->len; pckt->taf_tag = data_ptr->tag; pckt->address = data_ptr->addr; if (data_ptr->type == NPCX_ESPI_TAF_REQ_WRITE) { memcpy(pckt->src, data_ptr->src, sizeof(pckt->src)); } } #if defined(CONFIG_ESPI_TAF_MANUAL_MODE) /* Check access region of read request is protected or not */ static bool espi_taf_check_read_protect(const struct device *dev, uint32_t addr, uint32_t len, uint8_t tag) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint32_t flash_addr = addr; uint8_t i; uint16_t override_rd; uint32_t base, high; bool rdpr; flash_addr += FLBASE_ADDR; for (i = 0; i < CONFIG_ESPI_TAF_PR_NUM; i++) { base = PRTR_BADDR(i); high = PRTR_HADDR(i); rdpr = IS_BIT_SET(inst->FLASH_PRTR_BADDR[i], NPCX_FRGN_RPR); override_rd = GET_FIELD(inst->FLASH_RGN_TAG_OVR[i], NPCX_FLASH_TAG_OVR_RPR); if (rdpr && !IS_BIT_SET(override_rd, tag) && (base <= flash_addr + len - 1 && flash_addr <= high)) { return true; } } return false; } #endif /* Check access region of write request is protected or not */ static bool espi_taf_check_write_protect(const struct device *dev, uint32_t addr, uint32_t len, uint8_t tag) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint32_t flash_addr = addr; uint8_t i; uint16_t override_wr; uint32_t base, high; bool wrpr; flash_addr += FLBASE_ADDR; for (i = 0; i < CONFIG_ESPI_TAF_PR_NUM; i++) { base = PRTR_BADDR(i); high = PRTR_HADDR(i); wrpr = IS_BIT_SET(inst->FLASH_PRTR_BADDR[i], NPCX_FRGN_WPR); override_wr = GET_FIELD(inst->FLASH_RGN_TAG_OVR[i], NPCX_FLASH_TAG_OVR_WPR); if (wrpr && !IS_BIT_SET(override_wr, tag) && (base <= flash_addr + len - 1 && flash_addr <= high)) { return true; } } return false; } static int espi_taf_npcx_configure(const struct device *dev, const struct espi_saf_cfg *cfg) { struct espi_reg *const inst = HAL_INSTANCE(dev); if (cfg->nflash_devices == 0U) { return -EINVAL; } #if defined(CONFIG_ESPI_TAF_AUTO_MODE) inst->FLASHCTL |= BIT(NPCX_FLASHCTL_SAF_AUTO_READ); #else inst->FLASHCTL &= ~BIT(NPCX_FLASHCTL_SAF_AUTO_READ); #endif return 0; } static int espi_taf_npcx_set_pr(const struct device *dev, const struct espi_saf_protection *pr) { struct espi_reg *const inst = HAL_INSTANCE(dev); const struct espi_saf_pr *preg = pr->pregions; size_t n = pr->nregions; uint8_t regnum; uint16_t bitmask, offset; uint32_t rw_pr, override_rw; if ((dev == NULL) || (pr == NULL)) { return -EINVAL; } if (pr->nregions >= CONFIG_ESPI_TAF_PR_NUM) { return -EINVAL; } while (n--) { regnum = preg->pr_num; if (regnum >= CONFIG_ESPI_TAF_PR_NUM) { return -EINVAL; } rw_pr = preg->master_bm_we << NPCX_FRGN_WPR; rw_pr = rw_pr | (preg->master_bm_rd << NPCX_FRGN_RPR); if (preg->flags) { bitmask = BIT_MASK(GET_FIELD_SZ(NPCX_FLASH_PRTR_BADDR)); offset = GET_FIELD_POS(NPCX_FLASH_PRTR_BADDR); inst->FLASH_PRTR_BADDR[regnum] = ((preg->start & bitmask) << offset) | rw_pr; bitmask = BIT_MASK(GET_FIELD_SZ(NPCX_FLASH_PRTR_HADDR)); offset = GET_FIELD_POS(NPCX_FLASH_PRTR_HADDR); inst->FLASH_PRTR_HADDR[regnum] = (preg->end & bitmask) << offset; } override_rw = (preg->override_r << 16) | preg->override_w; inst->FLASH_RGN_TAG_OVR[regnum] = override_rw; preg++; } return 0; } static int espi_taf_npcx_activate(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); inst->FLASHCTL &= ~BIT(NPCX_FLASHCTL_AUTO_RD_DIS_CTL); inst->FLASHCTL &= ~BIT(NPCX_FLASHCTL_BLK_FLASH_NP_FREE); return 0; } static bool espi_taf_npcx_channel_ready(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint8_t ret = GET_FIELD(inst->FLASHCFG, NPCX_FLASHCFG_FLCAPA) & NPCX_FLASH_SHARING_CAP_SUPP_TAF; if (ret != NPCX_FLASH_SHARING_CAP_SUPP_TAF) { return false; } if (!device_is_ready(spi_dev)) { return false; } return true; } /* This routine set FLASH_C_AVAIL for standard request */ static void taf_set_flash_c_avail(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint32_t tmp = inst->FLASHCTL; /* * Clear FLASHCTL_FLASH_NP_FREE to avoid host puts a flash * standard request command at here. */ tmp &= NPCX_FLASHCTL_ACCESS_MASK; /* Set FLASHCTL_FLASH_TX_AVAIL */ tmp |= BIT(NPCX_FLASHCTL_FLASH_TX_AVAIL); inst->FLASHCTL = tmp; } /* This routine release FLASH_NP_FREE for standard request */ static void taf_release_flash_np_free(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); uint32_t tmp = inst->FLASHCTL; /* * Clear FLASHCTL_FLASH_TX_AVAIL to avoid host puts a * GET_FLASH_C command at here. */ tmp &= NPCX_FLASHCTL_ACCESS_MASK; /* Release FLASH_NP_FREE */ tmp |= BIT(NPCX_FLASHCTL_FLASH_NP_FREE); inst->FLASHCTL = tmp; } static int taf_npcx_completion_handler(const struct device *dev, uint8_t type, uint8_t tag, uint16_t len, uint32_t *buffer) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct npcx_taf_head taf_head; uint16_t i, size; uint32_t tx_buf[16]; taf_head.pkt_len = NPCX_TAF_CMP_HEADER_LEN + len; taf_head.type = type; taf_head.tag_hlen = (tag << 4) | ((len & 0xF00) >> 8); taf_head.llen = len & 0xFF; memcpy(&tx_buf[0], &taf_head, sizeof(struct npcx_taf_head)); if (type == CYC_SCS_CMP_WITH_DATA_ONLY || type == CYC_SCS_CMP_WITH_DATA_FIRST || type == CYC_SCS_CMP_WITH_DATA_MIDDLE || type == CYC_SCS_CMP_WITH_DATA_LAST) { memcpy(&tx_buf[1], buffer, (uint8_t)(len)); } /* Check the Flash Access TX Queue is empty by polling * FLASH_TX_AVAIL. */ if (WAIT_FOR(!IS_BIT_SET(inst->FLASHCTL, NPCX_FLASHCTL_FLASH_TX_AVAIL), NPCX_FLASH_CHK_TIMEOUT, NULL) == false) { LOG_ERR("Check TX Queue Is Empty Timeout"); return -EBUSY; } /* Write packet to FLASHTXBUF */ size = DIV_ROUND_UP((uint8_t)(tx_buf[0]) + 1, sizeof(uint32_t)); for (i = 0; i < size; i++) { inst->FLASHTXBUF[i] = tx_buf[i]; } /* Set the FLASHCTL.FLASH_TX_AVAIL bit to 1 to enqueue the packet */ taf_set_flash_c_avail(dev); /* Release FLASH_NP_FREE here to ready get next TAF request */ if ((type != CYC_SCS_CMP_WITH_DATA_FIRST) && (type != CYC_SCS_CMP_WITH_DATA_MIDDLE)) { taf_release_flash_np_free(dev); } return 0; } #if defined(CONFIG_ESPI_TAF_MANUAL_MODE) static int espi_taf_npcx_flash_read(const struct device *dev, struct espi_saf_packet *pckt) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_taf_npcx_config *config = ((struct espi_taf_npcx_config *)(dev)->config); struct espi_taf_npcx_pckt *taf_data_ptr = (struct espi_taf_npcx_pckt *)pckt->buf; uint8_t cycle_type = CYC_SCS_CMP_WITH_DATA_ONLY; uint32_t total_len = pckt->len; uint32_t len = total_len; uint32_t addr = pckt->flash_addr; uint8_t flash_req_size = GET_FIELD(inst->FLASHCFG, NPCX_FLASHCFG_FLASHREQSIZE); uint8_t target_max_size = GET_FIELD(inst->FLASHCFG, NPCX_FLASHCFG_FLREQSUP); uint16_t max_read_req = 32 << flash_req_size; uint8_t read_buf[64]; int rc; if (flash_req_size > target_max_size) { LOG_DBG("Exceeded the maximum supported length"); if (target_max_size == 0) { target_max_size = 1; } max_read_req = 32 << target_max_size; } if (total_len > max_read_req) { LOG_ERR("Exceeded the limitation of read length"); return -EINVAL; } if (espi_taf_check_read_protect(dev, addr, len, taf_data_ptr->tag)) { LOG_ERR("Access protect region"); return -EINVAL; } if (total_len <= config->rx_plsz) { cycle_type = CYC_SCS_CMP_WITH_DATA_ONLY; len = total_len; } else { cycle_type = CYC_SCS_CMP_WITH_DATA_FIRST; len = config->rx_plsz; } do { rc = flash_read(spi_dev, addr, &read_buf[0], len); if (rc) { LOG_ERR("flash read fail 0x%x", rc); return -EIO; } rc = taf_npcx_completion_handler(dev, cycle_type, taf_data_ptr->tag, len, (uint32_t *)&read_buf[0]); if (rc) { LOG_ERR("espi taf completion handler fail"); return rc; } total_len -= len; addr += len; if (total_len <= config->rx_plsz) { cycle_type = CYC_SCS_CMP_WITH_DATA_LAST; len = total_len; } else { cycle_type = CYC_SCS_CMP_WITH_DATA_MIDDLE; } } while (total_len); return 0; } #endif static int espi_taf_npcx_flash_write(const struct device *dev, struct espi_saf_packet *pckt) { struct espi_taf_npcx_pckt *taf_data_ptr = (struct espi_taf_npcx_pckt *)pckt->buf; uint8_t *data_ptr = (uint8_t *)(taf_data_ptr->data); int rc; if (espi_taf_check_write_protect(dev, pckt->flash_addr, pckt->len, taf_data_ptr->tag)) { LOG_ERR("Access protection region"); return -EINVAL; } rc = flash_write(spi_dev, pckt->flash_addr, data_ptr, pckt->len); if (rc) { LOG_ERR("flash write fail 0x%x", rc); return -EIO; } rc = taf_npcx_completion_handler(dev, CYC_SCS_CMP_WITHOUT_DATA, taf_data_ptr->tag, 0x0, NULL); if (rc) { LOG_ERR("espi taf completion handler fail"); return rc; } return 0; } static int espi_taf_npcx_flash_erase(const struct device *dev, struct espi_saf_packet *pckt) { int erase_blk[] = {KB(4), KB(32), KB(64), KB(128)}; struct espi_taf_npcx_pckt *taf_data_ptr = (struct espi_taf_npcx_pckt *)pckt->buf; uint32_t addr = pckt->flash_addr; uint32_t len; int rc; if ((pckt->len < 0) || (pckt->len >= NPCX_ESPI_TAF_ERASE_LEN_MAX)) { LOG_ERR("Invalid erase block size"); return -EINVAL; } len = erase_blk[pckt->len]; if (espi_taf_check_write_protect(dev, addr, len, taf_data_ptr->tag)) { LOG_ERR("Access protection region"); return -EINVAL; } rc = flash_erase(spi_dev, addr, len); if (rc) { LOG_ERR("flash erase fail"); return -EIO; } rc = taf_npcx_completion_handler(dev, CYC_SCS_CMP_WITHOUT_DATA, taf_data_ptr->tag, 0x0, NULL); if (rc) { LOG_ERR("espi taf completion handler fail"); return rc; } return 0; } static int espi_taf_npcx_flash_unsuccess(const struct device *dev, struct espi_saf_packet *pckt) { struct espi_taf_npcx_pckt *taf_data_ptr = (struct espi_taf_npcx_pckt *)pckt->buf; int rc; rc = taf_npcx_completion_handler(dev, CYC_UNSCS_CMP_WITHOUT_DATA_ONLY, taf_data_ptr->tag, 0x0, NULL); if (rc) { LOG_ERR("espi taf completion handler fail"); return rc; } return 0; } static void espi_taf_work(struct k_work *item) { struct espi_taf_npcx_data *info = CONTAINER_OF(item, struct espi_taf_npcx_data, work); int ret = 0; struct espi_taf_npcx_pckt taf_data; struct espi_saf_packet pckt_taf; pckt_taf.flash_addr = info->address; pckt_taf.len = info->length; taf_data.tag = info->taf_tag; if (info->taf_type == NPCX_ESPI_TAF_REQ_WRITE) { taf_data.data = (uint8_t *)info->src; } else { taf_data.data = NULL; } pckt_taf.buf = (uint8_t *)&taf_data; switch (info->taf_type) { #if defined(CONFIG_ESPI_TAF_MANUAL_MODE) case NPCX_ESPI_TAF_REQ_READ: ret = espi_taf_npcx_flash_read(info->host_dev, &pckt_taf); break; #endif case NPCX_ESPI_TAF_REQ_ERASE: ret = espi_taf_npcx_flash_erase(info->host_dev, &pckt_taf); break; case NPCX_ESPI_TAF_REQ_WRITE: ret = espi_taf_npcx_flash_write(info->host_dev, &pckt_taf); break; } if (ret != 0) { ret = espi_taf_npcx_flash_unsuccess(info->host_dev, &pckt_taf); } } static void espi_taf_event_handler(const struct device *dev, struct espi_callback *cb, struct espi_event event) { if ((event.evt_type != ESPI_BUS_TAF_NOTIFICATION) || (event.evt_details != ESPI_CHANNEL_FLASH)) { return; } espi_taf_get_pckt(dev, &npcx_espi_taf_data, event); k_work_submit(&npcx_espi_taf_data.work); } int npcx_init_taf(const struct device *dev, sys_slist_t *callbacks) { espi_init_callback(&espi_taf_cb, espi_taf_event_handler, ESPI_BUS_TAF_NOTIFICATION); espi_add_callback(dev, &espi_taf_cb); npcx_espi_taf_data.host_dev = dev; npcx_espi_taf_data.callbacks = callbacks; k_work_init(&npcx_espi_taf_data.work, espi_taf_work); return 0; } static int espi_taf_npcx_init(const struct device *dev) { struct espi_reg *const inst = HAL_INSTANCE(dev); struct espi_taf_npcx_config *config = ((struct espi_taf_npcx_config *)(dev)->config); SET_FIELD(inst->FLASHCFG, NPCX_FLASHCFG_FLCAPA, NPCX_FLASH_SHARING_CAP_SUPP_TAF_AND_CAF); SET_FIELD(inst->FLASHCFG, NPCX_FLASHCFG_TRGFLEBLKSIZE, BIT(config->erase_sz)); SET_FIELD(inst->FLASHCFG, NPCX_FLASHCFG_FLREQSUP, config->max_rd_sz); inst->FLASHBASE = config->mapped_addr; return 0; } static const struct espi_saf_driver_api espi_taf_npcx_driver_api = { .config = espi_taf_npcx_configure, .set_protection_regions = espi_taf_npcx_set_pr, .activate = espi_taf_npcx_activate, .get_channel_status = espi_taf_npcx_channel_ready, }; static const struct espi_taf_npcx_config espi_taf_npcx_config = { .base = DT_INST_REG_ADDR(0), .mapped_addr = DT_INST_PROP(0, mapped_addr), .rx_plsz = DT_PROP(DT_INST_PARENT(0), rx_plsize), .erase_sz = DT_INST_STRING_TOKEN(0, erase_sz), .max_rd_sz = DT_INST_STRING_TOKEN(0, max_read_sz), }; DEVICE_DT_INST_DEFINE(0, &espi_taf_npcx_init, NULL, &npcx_espi_taf_data, &espi_taf_npcx_config, PRE_KERNEL_2, CONFIG_ESPI_INIT_PRIORITY, &espi_taf_npcx_driver_api); ```
/content/code_sandbox/drivers/espi/espi_taf_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,623
```c /* * */ #include <zephyr/drivers/espi.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_espi_config(const struct device *dev, struct espi_cfg *cfg) { struct espi_cfg cfg_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, config)); K_OOPS(k_usermode_from_copy(&cfg_copy, cfg, sizeof(struct espi_cfg))); return z_impl_espi_config(dev, &cfg_copy); } #include <zephyr/syscalls/espi_config_mrsh.c> static inline bool z_vrfy_espi_get_channel_status(const struct device *dev, enum espi_channel ch) { K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, get_channel_status)); return z_impl_espi_get_channel_status(dev, ch); } #include <zephyr/syscalls/espi_get_channel_status_mrsh.c> static inline int z_vrfy_espi_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { int ret; uint32_t data_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, read_lpc_request)); ret = z_impl_espi_read_lpc_request(dev, op, &data_copy); K_OOPS(k_usermode_to_copy(data, &data_copy, sizeof(uint8_t))); return ret; } #include <zephyr/syscalls/espi_read_lpc_request_mrsh.c> static inline int z_vrfy_espi_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { uint32_t data_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, write_lpc_request)); K_OOPS(k_usermode_from_copy(&data_copy, data, sizeof(*data))); return z_impl_espi_write_lpc_request(dev, op, &data_copy); } #include <zephyr/syscalls/espi_write_lpc_request_mrsh.c> static inline int z_vrfy_espi_send_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t level) { K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, send_vwire)); return z_impl_espi_send_vwire(dev, signal, level); } #include <zephyr/syscalls/espi_send_vwire_mrsh.c> static inline int z_vrfy_espi_receive_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t *level) { int ret; uint8_t level_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, receive_vwire)); ret = z_impl_espi_receive_vwire(dev, signal, &level_copy); K_OOPS(k_usermode_to_copy(level, &level_copy, sizeof(uint8_t))); return ret; } #include <zephyr/syscalls/espi_receive_vwire_mrsh.c> static inline int z_vrfy_espi_read_request(const struct device *dev, struct espi_request_packet *req) { int ret; struct espi_request_packet req_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, read_request)); K_OOPS(k_usermode_from_copy(&req_copy, req, sizeof(struct espi_request_packet))); K_OOPS(K_SYSCALL_MEMORY_WRITE(req_copy.data, req_copy.len)); ret = z_impl_espi_read_request(dev, &req_copy); K_OOPS(k_usermode_to_copy(req, &req_copy, sizeof(struct espi_request_packet))); return ret; } #include <zephyr/syscalls/espi_read_request_mrsh.c> static inline int z_vrfy_espi_write_request(const struct device *dev, struct espi_request_packet *req) { int ret; struct espi_request_packet req_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, write_request)); K_OOPS(K_SYSCALL_MEMORY_READ(req->data, req->len)); K_OOPS(k_usermode_from_copy(&req_copy, req, sizeof(struct espi_request_packet))); ret = z_impl_espi_write_request(dev, &req_copy); return ret; } #include <zephyr/syscalls/espi_write_request_mrsh.c> static inline int z_vrfy_espi_send_oob(const struct device *dev, struct espi_oob_packet *pckt) { int ret; struct espi_oob_packet pckt_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, send_oob)); K_OOPS(K_SYSCALL_MEMORY_READ(pckt->buf, pckt->len)); K_OOPS(k_usermode_from_copy(&pckt_copy, pckt, sizeof(struct espi_oob_packet))); ret = z_impl_espi_send_oob(dev, &pckt_copy); return ret; } #include <zephyr/syscalls/espi_send_oob_mrsh.c> static inline int z_vrfy_espi_receive_oob(const struct device *dev, struct espi_oob_packet *pckt) { int ret; struct espi_oob_packet pckt_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, receive_oob)); K_OOPS(k_usermode_from_copy(&pckt_copy, pckt, sizeof(struct espi_oob_packet))); K_OOPS(K_SYSCALL_MEMORY_WRITE(pckt->buf, pckt->len)); ret = z_impl_espi_receive_oob(dev, &pckt_copy); K_OOPS(k_usermode_to_copy(pckt, &pckt_copy, sizeof(struct espi_oob_packet))); return ret; } #include <zephyr/syscalls/espi_receive_oob_mrsh.c> static inline int z_vrfy_espi_read_flash(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_flash_packet pckt_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, flash_read)); K_OOPS(k_usermode_from_copy(&pckt_copy, pckt, sizeof(struct espi_flash_packet))); K_OOPS(K_SYSCALL_MEMORY_WRITE(pckt->buf, pckt->len)); ret = z_impl_espi_read_flash(dev, pckt); K_OOPS(k_usermode_to_copy(pckt, &pckt_copy, sizeof(struct espi_flash_packet))); return ret; } #include <zephyr/syscalls/espi_read_flash_mrsh.c> static inline int z_vrfy_espi_write_flash(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_flash_packet pckt_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, flash_write)); K_OOPS(k_usermode_from_copy(&pckt_copy, pckt, sizeof(struct espi_flash_packet))); K_OOPS(K_SYSCALL_MEMORY_READ(pckt->buf, pckt->len)); ret = z_impl_espi_write_flash(dev, &pckt_copy); return ret; } #include <zephyr/syscalls/espi_write_flash_mrsh.c> static inline int z_vrfy_espi_flash_erase(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_flash_packet pckt_copy; K_OOPS(K_SYSCALL_DRIVER_ESPI(dev, flash_write)); K_OOPS(k_usermode_from_copy(&pckt_copy, pckt, sizeof(struct espi_flash_packet))); K_OOPS(K_SYSCALL_MEMORY_READ(pckt->buf, pckt->len)); ret = z_impl_espi_flash_erase(dev, &pckt_copy); return ret; } #include <zephyr/syscalls/espi_flash_erase_mrsh.c> ```
/content/code_sandbox/drivers/espi/espi_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,600
```c /* * */ #define DT_DRV_COMPAT microchip_xec_espi_saf #include <zephyr/kernel.h> #include <soc.h> #include <errno.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/espi_saf.h> #include <zephyr/logging/log.h> #include "espi_utils.h" LOG_MODULE_REGISTER(espi_saf, CONFIG_ESPI_LOG_LEVEL); /* SAF EC Portal read/write flash access limited to 1-64 bytes */ #define MAX_SAF_ECP_BUFFER_SIZE 64ul /* 1 second maximum for flash operations */ #define MAX_SAF_FLASH_TIMEOUT 125000ul /* 1000ul */ /* 64 bytes @ 24MHz quad is approx. 6 us */ #define SAF_WAIT_INTERVAL 8 /* After 8 wait intervals yield */ #define SAF_YIELD_THRESHOLD 64 struct espi_isr { uint32_t girq_bit; void (*the_isr)(const struct device *dev); }; /* * SAF configuration from Device Tree * SAF controller register block base address * QMSPI controller register block base address * SAF communications register block base address * Flash STATUS1 poll timeout in 32KHz periods * Flash consecutive read timeout in units of 20 ns * Delay before first Poll-1 command after suspend in 20 ns units * Hold off suspend for this interval if erase or program in 32KHz periods. * Add delay between Poll STATUS1 commands in 20 ns units. */ struct espi_saf_xec_config { uintptr_t saf_base_addr; uintptr_t qmspi_base_addr; uintptr_t saf_comm_base_addr; uint32_t poll_timeout; uint32_t consec_rd_timeout; uint32_t sus_chk_delay; uint16_t sus_rsm_interval; uint16_t poll_interval; }; struct espi_saf_xec_data { sys_slist_t callbacks; struct k_sem ecp_lock; uint32_t hwstatus; }; /* EC portal local flash r/w buffer */ static uint32_t slave_mem[MAX_SAF_ECP_BUFFER_SIZE]; /* * @brief eSPI SAF configuration */ static inline void mchp_saf_cs_descr_wr(MCHP_SAF_HW_REGS *regs, uint8_t cs, uint32_t val) { regs->SAF_CS_OP[cs].OP_DESCR = val; } static inline void mchp_saf_poll2_mask_wr(MCHP_SAF_HW_REGS *regs, uint8_t cs, uint16_t val) { LOG_DBG("%s cs: %d mask %x", __func__, cs, val); if (cs == 0) { regs->SAF_CS0_CFG_P2M = val; } else { regs->SAF_CS1_CFG_P2M = val; } } static inline void mchp_saf_cm_prefix_wr(MCHP_SAF_HW_REGS *regs, uint8_t cs, uint16_t val) { if (cs == 0) { regs->SAF_CS0_CM_PRF = val; } else { regs->SAF_CS1_CM_PRF = val; } } /* busy wait or yield until we have SAF interrupt support */ static int xec_saf_spin_yield(int *counter) { *counter = *counter + 1; if (*counter > MAX_SAF_FLASH_TIMEOUT) { return -ETIMEDOUT; } if (*counter > SAF_YIELD_THRESHOLD) { k_yield(); } else { k_busy_wait(SAF_WAIT_INTERVAL); } return 0; } /* * Initialize SAF flash protection regions. * SAF HW implements 17 protection regions. * At least one protection region must be configured to allow * EC access to the local flash through the EC Portal. * Each protection region is composed of 4 32-bit registers * Start bits[19:0] = bits[31:12] region start address (4KB boundaries) * Limit bits[19:0] = bits[31:12] region limit address (4KB boundaries) * Write protect b[7:0] = masters[7:0] allow write/erase. 1=allowed * Read protetc b[7:0] = masters[7:0] allow read. 1=allowed * * This routine configures protection region 0 for full flash array * address range and read-write-erase for all masters. * This routine must be called AFTER the flash configuration size/limit and * threshold registers have been programmed. * * POR default values: * Start = 0x7ffff * Limit = 0 * Write Prot = 0x01 Master 0 always granted write/erase * Read Prot = 0x01 Master 0 always granted read * * Sample code configures PR[0] * Start = 0 * Limit = 0x7ffff * WR = 0xFF * RD = 0xFF */ static void saf_protection_regions_init(MCHP_SAF_HW_REGS *regs) { LOG_DBG("%s", __func__); for (size_t n = 0; n < MCHP_ESPI_SAF_PR_MAX; n++) { if (n == 0) { regs->SAF_PROT_RG[0].START = 0U; regs->SAF_PROT_RG[0].LIMIT = regs->SAF_FL_CFG_SIZE_LIM >> 12; regs->SAF_PROT_RG[0].WEBM = MCHP_SAF_MSTR_ALL; regs->SAF_PROT_RG[0].RDBM = MCHP_SAF_MSTR_ALL; } else { regs->SAF_PROT_RG[n].START = MCHP_SAF_PROT_RG_START_DFLT; regs->SAF_PROT_RG[n].LIMIT = MCHP_SAF_PROT_RG_LIMIT_DFLT; regs->SAF_PROT_RG[n].WEBM = 0U; regs->SAF_PROT_RG[n].RDBM = 0U; } LOG_DBG("PROT[%d] START %x", n, regs->SAF_PROT_RG[n].START); LOG_DBG("PROT[%d] LIMIT %x", n, regs->SAF_PROT_RG[n].LIMIT); LOG_DBG("PROT[%d] WEBM %x", n, regs->SAF_PROT_RG[n].WEBM); LOG_DBG("PROT[%d] RDBM %x", n, regs->SAF_PROT_RG[n].RDBM); } } static uint32_t qmspi_freq_div(uint32_t freqhz) { uint32_t fdiv; if (freqhz < (MCHP_QMSPI_MIN_FREQ_KHZ * 1000U)) { fdiv = 0U; /* freq divider field -> 256 */ } else if (freqhz >= (MCHP_QMSPI_MAX_FREQ_KHZ * 1000U)) { fdiv = 1U; } else { /* truncation produces next higher integer frequency */ fdiv = MCHP_QMSPI_INPUT_CLOCK_FREQ_HZ / freqhz; } fdiv &= MCHP_QMSPI_M_FDIV_MASK0; fdiv <<= MCHP_QMSPI_M_FDIV_POS; return fdiv; } /* * Take over and re-initialize QMSPI for use by SAF HW engine. * When SAF is activated, QMSPI registers are controlled by SAF * HW engine. CPU no longer has access to QMSPI registers. * 1. Save QMSPI driver frequency divider, SPI signalling mode, and * chip select timing. * 2. Put QMSPI controller in a known state by performing a soft reset. * 3. Clear QMSPI GIRQ status * 4. Configure QMSPI interface control for SAF. * 5. Load flash device independent (generic) descriptors. * 6. Enable transfer done interrupt in QMSPI * 7. Enable QMSPI SAF mode * 8. If user configuration overrides frequency, signalling mode, * or chip select timing derive user values. * 9. Program QMSPI MODE and CSTIM registers with activate set. */ static int saf_qmspi_init(const struct espi_saf_xec_config *xcfg, const struct espi_saf_cfg *cfg) { uint32_t qmode, cstim, n; QMSPI_Type *regs = (QMSPI_Type *)xcfg->qmspi_base_addr; const struct espi_saf_hw_cfg *hwcfg = &cfg->hwcfg; qmode = regs->MODE; if (!(qmode & MCHP_QMSPI_M_ACTIVATE)) { return -EAGAIN; } qmode = regs->MODE & (MCHP_QMSPI_M_FDIV_MASK | MCHP_QMSPI_M_SIG_MASK); cstim = regs->CSTM; regs->MODE = MCHP_QMSPI_M_SRST; regs->STS = MCHP_QMSPI_STS_RW1C_MASK; MCHP_GIRQ_ENCLR(MCHP_QMSPI_GIRQ_NUM) = MCHP_QMSPI_GIRQ_VAL; MCHP_GIRQ_SRC(MCHP_QMSPI_GIRQ_NUM) = MCHP_QMSPI_GIRQ_VAL; regs->IFCTRL = (MCHP_QMSPI_IFC_WP_OUT_HI | MCHP_QMSPI_IFC_WP_OUT_EN | MCHP_QMSPI_IFC_HOLD_OUT_HI | MCHP_QMSPI_IFC_HOLD_OUT_EN); for (n = 0; n < MCHP_SAF_NUM_GENERIC_DESCR; n++) { regs->DESCR[MCHP_SAF_CM_EXIT_START_DESCR + n] = hwcfg->generic_descr[n]; } regs->IEN = MCHP_QMSPI_IEN_XFR_DONE; qmode |= (MCHP_QMSPI_M_SAF_DMA_MODE_EN | MCHP_QMSPI_M_CS0 | MCHP_QMSPI_M_ACTIVATE); if (hwcfg->flags & MCHP_SAF_HW_CFG_FLAG_CPHA) { qmode = (qmode & ~(MCHP_QMSPI_M_SIG_MASK)) | ((hwcfg->qmspi_cpha << MCHP_QMSPI_M_SIG_POS) & MCHP_QMSPI_M_SIG_MASK); } if (hwcfg->flags & MCHP_SAF_HW_CFG_FLAG_FREQ) { qmode = (qmode & ~(MCHP_QMSPI_M_FDIV_MASK)) | qmspi_freq_div(hwcfg->qmspi_freq_hz); } if (hwcfg->flags & MCHP_SAF_HW_CFG_FLAG_CSTM) { cstim = hwcfg->qmspi_cs_timing; } regs->MODE = qmode; regs->CSTM = cstim; return 0; } /* * Registers at offsets: * SAF Poll timeout @ 0x194. Hard coded to 0x28000. Default value = 0. * recommended value = 0x28000 32KHz clocks (5 seconds). b[17:0] * SAF Poll interval @ 0x198. Hard coded to 0 * Default value = 0. Recommended = 0. b[15:0] * SAF Suspend/Resume Interval @ 0x19c. Hard coded to 0x8 * Default value = 0x01. Min time erase/prog in 32KHz units. * SAF Consecutive Read Timeout @ 0x1a0. Hard coded to 0x2. b[15:0] * Units of MCLK. Recommend < 20us. b[19:0] * SAF Suspend Check Delay @ 0x1ac. Not touched. * Default = 0. Recommend = 20us. Units = MCLK. b[19:0] */ static void saf_flash_timing_init(MCHP_SAF_HW_REGS *regs, const struct espi_saf_xec_config *cfg) { LOG_DBG("%s\n", __func__); regs->SAF_POLL_TMOUT = cfg->poll_timeout; regs->SAF_POLL_INTRVL = cfg->poll_interval; regs->SAF_SUS_RSM_INTRVL = cfg->sus_rsm_interval; regs->SAF_CONSEC_RD_TMOUT = cfg->consec_rd_timeout; regs->SAF_SUS_CHK_DLY = cfg->sus_chk_delay; LOG_DBG("SAF_POLL_TMOUT %x\n", regs->SAF_POLL_TMOUT); LOG_DBG("SAF_POLL_INTRVL %x\n", regs->SAF_POLL_INTRVL); LOG_DBG("SAF_SUS_RSM_INTRVL %x\n", regs->SAF_SUS_RSM_INTRVL); LOG_DBG("SAF_CONSEC_RD_TMOUT %x\n", regs->SAF_CONSEC_RD_TMOUT); LOG_DBG("SAF_SUS_CHK_DLY %x\n", regs->SAF_SUS_CHK_DLY); } /* * Disable DnX bypass feature. */ static void saf_dnx_bypass_init(MCHP_SAF_HW_REGS *regs) { regs->SAF_DNX_PROT_BYP = 0; regs->SAF_DNX_PROT_BYP = 0xffffffff; } /* * Bitmap of flash erase size from 1KB up to 128KB. * eSPI SAF specification requires 4KB erase support. * MCHP SAF supports 4KB, 32KB, and 64KB. * Only report 32KB and 64KB to Host if supported by both * flash devices. */ static int saf_init_erase_block_size(const struct espi_saf_cfg *cfg) { struct espi_saf_flash_cfg *fcfg = cfg->flash_cfgs; uint32_t opb = fcfg->opb; uint8_t erase_bitmap = MCHP_ESPI_SERASE_SZ_4K; LOG_DBG("%s\n", __func__); if (cfg->nflash_devices > 1) { fcfg++; opb &= fcfg->opb; } if ((opb & MCHP_SAF_CS_OPB_ER0_MASK) == 0) { /* One or both do not support 4KB erase! */ return -EINVAL; } if (opb & MCHP_SAF_CS_OPB_ER1_MASK) { erase_bitmap |= MCHP_ESPI_SERASE_SZ_32K; } if (opb & MCHP_SAF_CS_OPB_ER2_MASK) { erase_bitmap |= MCHP_ESPI_SERASE_SZ_64K; } ESPI_CAP_REGS->FC_SERBZ = erase_bitmap; return 0; } /* * Set the continuous mode prefix and 4-byte address mode bits * based upon the flash configuration information. * Updates: * SAF Flash Config Poll2 Mask @ 0x1A4 * SAF Flash Config Special Mode @ 0x1B0 * SAF Flash Misc Config @ 0x38 */ static void saf_flash_misc_cfg(MCHP_SAF_HW_REGS *regs, uint8_t cs, const struct espi_saf_flash_cfg *fcfg) { uint32_t d, v; d = regs->SAF_FL_CFG_MISC; v = MCHP_SAF_FL_CFG_MISC_CS0_CPE; if (cs) { v = MCHP_SAF_FL_CFG_MISC_CS1_CPE; } /* Does this flash device require a prefix for continuous mode? */ if (fcfg->cont_prefix != 0) { d |= v; } else { d &= ~v; } v = MCHP_SAF_FL_CFG_MISC_CS0_4BM; if (cs) { v = MCHP_SAF_FL_CFG_MISC_CS1_4BM; } /* Use 32-bit addressing for this flash device? */ if (fcfg->flags & MCHP_FLASH_FLAG_ADDR32) { d |= v; } else { d &= ~v; } regs->SAF_FL_CFG_MISC = d; LOG_DBG("%s SAF_FL_CFG_MISC: %x", __func__, d); } /* * Program flash device specific SAF and QMSPI registers. * * CS0 OpA @ 0x4c or CS1 OpA @ 0x5C * CS0 OpB @ 0x50 or CS1 OpB @ 0x60 * CS0 OpC @ 0x54 or CS1 OpC @ 0x64 * Poll 2 Mask @ 0x1a4 * Continuous Prefix @ 0x1b0 * CS0: QMSPI descriptors 0-5 or CS1 QMSPI descriptors 6-11 * CS0 Descrs @ 0x58 or CS1 Descrs @ 0x68 */ static void saf_flash_cfg(const struct device *dev, const struct espi_saf_flash_cfg *fcfg, uint8_t cs) { uint32_t d, did; const struct espi_saf_xec_config *xcfg = dev->config; MCHP_SAF_HW_REGS *regs = (MCHP_SAF_HW_REGS *)xcfg->saf_base_addr; QMSPI_Type *qregs = (QMSPI_Type *)xcfg->qmspi_base_addr; LOG_DBG("%s cs=%u", __func__, cs); regs->SAF_CS_OP[cs].OPA = fcfg->opa; regs->SAF_CS_OP[cs].OPB = fcfg->opb; regs->SAF_CS_OP[cs].OPC = fcfg->opc; regs->SAF_CS_OP[cs].OP_DESCR = (uint32_t)fcfg->cs_cfg_descr_ids; did = MCHP_SAF_QMSPI_CS0_START_DESCR; if (cs != 0) { did = MCHP_SAF_QMSPI_CS1_START_DESCR; } for (size_t i = 0; i < MCHP_SAF_QMSPI_NUM_FLASH_DESCR; i++) { d = fcfg->descr[i] & ~(MCHP_QMSPI_C_NEXT_DESCR_MASK); d |= (((did + 1) << MCHP_QMSPI_C_NEXT_DESCR_POS) & MCHP_QMSPI_C_NEXT_DESCR_MASK); qregs->DESCR[did++] = d; } mchp_saf_poll2_mask_wr(regs, cs, fcfg->poll2_mask); mchp_saf_cm_prefix_wr(regs, cs, fcfg->cont_prefix); saf_flash_misc_cfg(regs, cs, fcfg); } static const uint32_t tag_map_dflt[MCHP_ESPI_SAF_TAGMAP_MAX] = { MCHP_SAF_TAG_MAP0_DFLT, MCHP_SAF_TAG_MAP1_DFLT, MCHP_SAF_TAG_MAP2_DFLT }; static void saf_tagmap_init(MCHP_SAF_HW_REGS *regs, const struct espi_saf_cfg *cfg) { const struct espi_saf_hw_cfg *hwcfg = &cfg->hwcfg; for (int i = 0; i < MCHP_ESPI_SAF_TAGMAP_MAX; i++) { if (hwcfg->tag_map[i] & MCHP_SAF_HW_CFG_TAGMAP_USE) { regs->SAF_TAG_MAP[i] = hwcfg->tag_map[i]; } else { regs->SAF_TAG_MAP[i] = tag_map_dflt[i]; } } LOG_DBG("SAF TAG0 %x", regs->SAF_TAG_MAP[0]); LOG_DBG("SAF TAG1 %x", regs->SAF_TAG_MAP[1]); LOG_DBG("SAF TAG2 %x", regs->SAF_TAG_MAP[2]); } /* * Configure SAF and QMSPI for SAF operation based upon the * number and characteristics of local SPI flash devices. * NOTE: SAF is configured but not activated. SAF should be * activated only when eSPI master sends Flash Channel enable * message with MAF/SAF select flag. */ static int espi_saf_xec_configuration(const struct device *dev, const struct espi_saf_cfg *cfg) { int ret = 0; uint32_t totalsz = 0; uint32_t u = 0; LOG_DBG("%s", __func__); if ((dev == NULL) || (cfg == NULL)) { return -EINVAL; } const struct espi_saf_xec_config *xcfg = dev->config; MCHP_SAF_HW_REGS *regs = (MCHP_SAF_HW_REGS *)xcfg->saf_base_addr; const struct espi_saf_flash_cfg *fcfg = cfg->flash_cfgs; if ((fcfg == NULL) || (cfg->nflash_devices == 0U) || (cfg->nflash_devices > MCHP_SAF_MAX_FLASH_DEVICES)) { return -EINVAL; } if (regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN) { return -EAGAIN; } saf_qmspi_init(xcfg, cfg); regs->SAF_CS0_CFG_P2M = 0; regs->SAF_CS1_CFG_P2M = 0; regs->SAF_FL_CFG_GEN_DESCR = MCHP_SAF_FL_CFG_GEN_DESCR_STD; /* flash device connected to CS0 required */ totalsz = fcfg->flashsz; regs->SAF_FL_CFG_THRH = totalsz; saf_flash_cfg(dev, fcfg, 0); /* optional second flash device connected to CS1 */ if (cfg->nflash_devices > 1) { fcfg++; totalsz += fcfg->flashsz; } /* Program CS1 configuration (same as CS0 if only one device) */ saf_flash_cfg(dev, fcfg, 1); if (totalsz == 0) { return -EAGAIN; } regs->SAF_FL_CFG_SIZE_LIM = totalsz - 1; LOG_DBG("SAF_FL_CFG_THRH = %x SAF_FL_CFG_SIZE_LIM = %x", regs->SAF_FL_CFG_THRH, regs->SAF_FL_CFG_SIZE_LIM); saf_tagmap_init(regs, cfg); saf_protection_regions_init(regs); saf_dnx_bypass_init(regs); saf_flash_timing_init(regs, xcfg); ret = saf_init_erase_block_size(cfg); if (ret != 0) { LOG_ERR("SAF Config bad flash erase config"); return ret; } /* Default or expedited prefetch? */ u = MCHP_SAF_FL_CFG_MISC_PFOE_DFLT; if (cfg->hwcfg.flags & MCHP_SAF_HW_CFG_FLAG_PFEXP) { u = MCHP_SAF_FL_CFG_MISC_PFOE_EXP; } regs->SAF_FL_CFG_MISC = (regs->SAF_FL_CFG_MISC & ~(MCHP_SAF_FL_CFG_MISC_PFOE_MASK)) | u; /* enable prefetch ? */ if (cfg->hwcfg.flags & MCHP_SAF_HW_CFG_FLAG_PFEN) { MCHP_SAF_COMM_MODE_REG |= MCHP_SAF_COMM_MODE_PF_EN; } else { MCHP_SAF_COMM_MODE_REG &= ~(MCHP_SAF_COMM_MODE_PF_EN); } LOG_DBG("%s SAF_FL_CFG_MISC: %x", __func__, regs->SAF_FL_CFG_MISC); LOG_DBG("%s Aft MCHP_SAF_COMM_MODE_REG: %x", __func__, MCHP_SAF_COMM_MODE_REG); return 0; } static int espi_saf_xec_set_pr(const struct device *dev, const struct espi_saf_protection *pr) { if ((dev == NULL) || (pr == NULL)) { return -EINVAL; } if (pr->nregions >= MCHP_ESPI_SAF_PR_MAX) { return -EINVAL; } const struct espi_saf_xec_config *xcfg = dev->config; MCHP_SAF_HW_REGS *regs = (MCHP_SAF_HW_REGS *)xcfg->saf_base_addr; if (regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN) { return -EAGAIN; } const struct espi_saf_pr *preg = pr->pregions; size_t n = pr->nregions; while (n--) { uint8_t regnum = preg->pr_num; if (regnum >= MCHP_ESPI_SAF_PR_MAX) { return -EINVAL; } /* NOTE: If previously locked writes have no effect */ if (preg->flags & MCHP_SAF_PR_FLAG_ENABLE) { regs->SAF_PROT_RG[regnum].START = preg->start >> 12U; regs->SAF_PROT_RG[regnum].LIMIT = (preg->start + preg->size - 1U) >> 12U; regs->SAF_PROT_RG[regnum].WEBM = preg->master_bm_we; regs->SAF_PROT_RG[regnum].RDBM = preg->master_bm_rd; } else { regs->SAF_PROT_RG[regnum].START = 0x7FFFFU; regs->SAF_PROT_RG[regnum].LIMIT = 0U; regs->SAF_PROT_RG[regnum].WEBM = 0U; regs->SAF_PROT_RG[regnum].RDBM = 0U; } if (preg->flags & MCHP_SAF_PR_FLAG_LOCK) { regs->SAF_PROT_LOCK |= (1UL << regnum); } preg++; } return 0; } static bool espi_saf_xec_channel_ready(const struct device *dev) { const struct espi_saf_xec_config *cfg = dev->config; MCHP_SAF_HW_REGS *regs = (MCHP_SAF_HW_REGS *)cfg->saf_base_addr; if (regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN) { return true; } return false; } /* * MCHP SAF hardware supports a range of flash block erase * sizes from 1KB to 128KB. The eSPI Host specification requires * 4KB must be supported. The MCHP SAF QMSPI HW interface only * supported three erase sizes. Most SPI flash devices chosen for * SAF support 4KB, 32KB, and 64KB. * Get flash erase sizes driver has configured from eSPI capabilities * registers. We assume driver flash tables have opcodes to match * capabilities configuration. * Check requested erase size is supported. */ struct erase_size_encoding { uint8_t hwbitpos; uint8_t encoding; }; static const struct erase_size_encoding ersz_enc[] = { { MCHP_ESPI_SERASE_SZ_4K_BITPOS, 0 }, { MCHP_ESPI_SERASE_SZ_32K_BITPOS, 1 }, { MCHP_ESPI_SERASE_SZ_64K_BITPOS, 2 } }; #define SAF_ERASE_ENCODING_MAX_ENTRY \ (sizeof(ersz_enc) / sizeof(struct erase_size_encoding)) static uint32_t get_erase_size_encoding(uint32_t erase_size) { uint8_t supsz = ESPI_CAP_REGS->FC_SERBZ; LOG_DBG("%s\n", __func__); for (int i = 0; i < SAF_ERASE_ENCODING_MAX_ENTRY; i++) { uint32_t sz = MCHP_ESPI_SERASE_SZ(ersz_enc[i].hwbitpos); if ((sz == erase_size) && (supsz & (1 << ersz_enc[i].hwbitpos))) { return ersz_enc[i].encoding; } } return 0xffffffffU; } static int check_ecp_access_size(uint32_t reqlen) { if ((reqlen < MCHP_SAF_ECP_CMD_RW_LEN_MIN) || (reqlen > MCHP_SAF_ECP_CMD_RW_LEN_MAX)) { return -EAGAIN; } return 0; } /* * EC access (read/erase/write) to SAF attached flash array * cmd 0 = read * 1 = write * 2 = erase */ static int saf_ecp_access(const struct device *dev, struct espi_saf_packet *pckt, uint8_t cmd) { uint32_t err_mask, n; int rc, counter; struct espi_saf_xec_data *xdat = dev->data; const struct espi_saf_xec_config *cfg = dev->config; MCHP_SAF_HW_REGS *regs = (MCHP_SAF_HW_REGS *)cfg->saf_base_addr; counter = 0; err_mask = MCHP_SAF_ECP_STS_ERR_MASK; LOG_DBG("%s", __func__); if (!(regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN)) { LOG_ERR("SAF is disabled"); return -EIO; } if (regs->SAF_ECP_BUSY & MCHP_SAF_ECP_BUSY) { LOG_ERR("SAF EC Portal is busy"); return -EBUSY; } if ((cmd == MCHP_SAF_ECP_CMD_CTYPE_READ0) || (cmd == MCHP_SAF_ECP_CMD_CTYPE_WRITE0)) { rc = check_ecp_access_size(pckt->len); if (rc) { LOG_ERR("SAF EC Portal size out of bounds"); return rc; } if (cmd == MCHP_SAF_ECP_CMD_CTYPE_WRITE0) { memcpy(slave_mem, pckt->buf, pckt->len); } n = pckt->len; } else if (cmd == MCHP_SAF_ECP_CMD_CTYPE_ERASE0) { n = get_erase_size_encoding(pckt->len); if (n == 0xffffffff) { LOG_ERR("SAF EC Portal unsupported erase size"); return -EAGAIN; } } else { LOG_ERR("SAF EC Portal bad cmd"); return -EAGAIN; } LOG_DBG("%s params val done", __func__); k_sem_take(&xdat->ecp_lock, K_FOREVER); regs->SAF_ECP_INTEN = 0; regs->SAF_ECP_STATUS = 0xffffffff; /* * TODO - Force SAF Done interrupt disabled until we have support * from eSPI driver. */ MCHP_GIRQ_ENCLR(MCHP_SAF_GIRQ) = MCHP_SAF_GIRQ_ECP_DONE_BIT; MCHP_GIRQ_SRC(MCHP_SAF_GIRQ) = MCHP_SAF_GIRQ_ECP_DONE_BIT; regs->SAF_ECP_FLAR = pckt->flash_addr; regs->SAF_ECP_BFAR = (uint32_t)&slave_mem[0]; regs->SAF_ECP_CMD = MCHP_SAF_ECP_CMD_PUT_FLASH_NP | ((uint32_t)cmd << MCHP_SAF_ECP_CMD_CTYPE_POS) | ((n << MCHP_SAF_ECP_CMD_LEN_POS) & MCHP_SAF_ECP_CMD_LEN_MASK); /* TODO when interrupts are available enable here */ regs->SAF_ECP_START = MCHP_SAF_ECP_START; /* TODO * ISR is in eSPI driver. Use polling until eSPI driver has been * modified to provide callback for GIRQ19 SAF ECP Done. */ rc = 0; xdat->hwstatus = regs->SAF_ECP_STATUS; while (!(xdat->hwstatus & MCHP_SAF_ECP_STS_DONE)) { rc = xec_saf_spin_yield(&counter); if (rc < 0) { goto ecp_exit; } xdat->hwstatus = regs->SAF_ECP_STATUS; } /* clear hardware status and check for errors */ regs->SAF_ECP_STATUS = xdat->hwstatus; if (xdat->hwstatus & MCHP_SAF_ECP_STS_ERR_MASK) { rc = -EIO; goto ecp_exit; } if (cmd == MCHP_SAF_ECP_CMD_CTYPE_READ0) { memcpy(pckt->buf, slave_mem, pckt->len); } ecp_exit: k_sem_give(&xdat->ecp_lock); return rc; } /* Flash read using SAF EC Portal */ static int saf_xec_flash_read(const struct device *dev, struct espi_saf_packet *pckt) { LOG_DBG("%s", __func__); return saf_ecp_access(dev, pckt, MCHP_SAF_ECP_CMD_CTYPE_READ0); } /* Flash write using SAF EC Portal */ static int saf_xec_flash_write(const struct device *dev, struct espi_saf_packet *pckt) { return saf_ecp_access(dev, pckt, MCHP_SAF_ECP_CMD_CTYPE_WRITE0); } /* Flash erase using SAF EC Portal */ static int saf_xec_flash_erase(const struct device *dev, struct espi_saf_packet *pckt) { return saf_ecp_access(dev, pckt, MCHP_SAF_ECP_CMD_CTYPE_ERASE0); } static int espi_saf_xec_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_saf_xec_data *data = dev->data; return espi_manage_callback(&data->callbacks, callback, set); } static int espi_saf_xec_activate(const struct device *dev) { const struct espi_saf_xec_config *cfg; MCHP_SAF_HW_REGS *regs; if (dev == NULL) { return -EINVAL; } cfg = dev->config; regs = (MCHP_SAF_HW_REGS *)cfg->saf_base_addr; regs->SAF_FL_CFG_MISC |= MCHP_SAF_FL_CFG_MISC_SAF_EN; return 0; } static int espi_saf_xec_init(const struct device *dev); static const struct espi_saf_driver_api espi_saf_xec_driver_api = { .config = espi_saf_xec_configuration, .set_protection_regions = espi_saf_xec_set_pr, .activate = espi_saf_xec_activate, .get_channel_status = espi_saf_xec_channel_ready, .flash_read = saf_xec_flash_read, .flash_write = saf_xec_flash_write, .flash_erase = saf_xec_flash_erase, .manage_callback = espi_saf_xec_manage_callback, }; static struct espi_saf_xec_data espi_saf_xec_data; static const struct espi_saf_xec_config espi_saf_xec_config = { .saf_base_addr = DT_INST_REG_ADDR_BY_IDX(0, 0), .qmspi_base_addr = DT_INST_REG_ADDR_BY_IDX(0, 1), .saf_comm_base_addr = DT_INST_REG_ADDR_BY_IDX(0, 2), .poll_timeout = DT_INST_PROP_OR(inst, poll_timeout, MCHP_SAF_FLASH_POLL_TIMEOUT), .consec_rd_timeout = DT_INST_PROP_OR( inst, consec_rd_timeout, MCHP_SAF_FLASH_CONSEC_READ_TIMEOUT), .sus_chk_delay = DT_INST_PROP_OR(inst, sus_chk_delay, MCHP_SAF_FLASH_SUS_CHK_DELAY), .sus_rsm_interval = DT_INST_PROP_OR(inst, sus_rsm_interval, MCHP_SAF_FLASH_SUS_RSM_INTERVAL), .poll_interval = DT_INST_PROP_OR(inst, poll_interval, MCHP_SAF_FLASH_POLL_INTERVAL), }; DEVICE_DT_INST_DEFINE(0, &espi_saf_xec_init, NULL, &espi_saf_xec_data, &espi_saf_xec_config, POST_KERNEL, CONFIG_ESPI_TAF_INIT_PRIORITY, &espi_saf_xec_driver_api); static int espi_saf_xec_init(const struct device *dev) { struct espi_saf_xec_data *data = dev->data; /* ungate SAF clocks by disabling PCR sleep enable */ mchp_pcr_periph_slp_ctrl(PCR_ESPI_SAF, MCHP_PCR_SLEEP_DIS); /* reset the SAF block */ mchp_pcr_periph_reset(PCR_ESPI_SAF); /* Configure the channels and its capabilities based on build config */ ESPI_CAP_REGS->GLB_CAP0 |= MCHP_ESPI_GBL_CAP0_FC_SUPP; ESPI_CAP_REGS->FC_CAP &= ~(MCHP_ESPI_FC_CAP_SHARE_MASK); ESPI_CAP_REGS->FC_CAP |= MCHP_ESPI_FC_CAP_SHARE_MAF_SAF; k_sem_init(&data->ecp_lock, 1, 1); return 0; } ```
/content/code_sandbox/drivers/espi/espi_saf_mchp_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,051
```unknown config ESPI_IT8XXX2 bool "ITE IT8XXX2 embedded controller ESPI driver" default y depends on DT_HAS_ITE_IT8XXX2_ESPI_ENABLED depends on SOC_IT8XXX2 help Enable ITE IT8XXX2 ESPI driver. if ESPI_IT8XXX2 config ESPI_OOB_CHANNEL default y config ESPI_PERIPHERAL_8042_KBC default y config ESPI_PERIPHERAL_HOST_IO default y config ESPI_PERIPHERAL_DEBUG_PORT_80 default y config ESPI_PERIPHERAL_EC_HOST_CMD default y choice IT8XXX2_H2RAM_HC_SIZE_CHOICE prompt "H2RAM space for ec host command" default IT8XXX2_H2RAM_HC_SIZE_256 depends on ESPI_PERIPHERAL_EC_HOST_CMD config IT8XXX2_H2RAM_HC_SIZE_16 bool "16" config IT8XXX2_H2RAM_HC_SIZE_32 bool "32" config IT8XXX2_H2RAM_HC_SIZE_64 bool "64" config IT8XXX2_H2RAM_HC_SIZE_128 bool "128" config IT8XXX2_H2RAM_HC_SIZE_256 bool "256" config IT8XXX2_H2RAM_HC_SIZE_512 bool "512" config IT8XXX2_H2RAM_HC_SIZE_1024 bool "1024" config IT8XXX2_H2RAM_HC_SIZE_2048 bool "2048" endchoice config ESPI_IT8XXX2_HC_H2RAM_SIZE int depends on ESPI_PERIPHERAL_EC_HOST_CMD default 16 if IT8XXX2_H2RAM_HC_SIZE_16 default 32 if IT8XXX2_H2RAM_HC_SIZE_32 default 64 if IT8XXX2_H2RAM_HC_SIZE_64 default 128 if IT8XXX2_H2RAM_HC_SIZE_128 default 256 if IT8XXX2_H2RAM_HC_SIZE_256 default 512 if IT8XXX2_H2RAM_HC_SIZE_512 default 1024 if IT8XXX2_H2RAM_HC_SIZE_1024 default 2048 if IT8XXX2_H2RAM_HC_SIZE_2048 config ESPI_PERIPHERAL_ACPI_SHM_REGION default y choice IT8XXX2_H2RAM_ACPI_SHM_SIZE_CHOICE prompt "H2RAM space for ACPI shared memory region" default IT8XXX2_H2RAM_ACPI_SHM_SIZE_256 depends on ESPI_PERIPHERAL_ACPI_SHM_REGION config IT8XXX2_H2RAM_ACPI_SHM_SIZE_16 bool "16" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_32 bool "32" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_64 bool "64" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_128 bool "128" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_256 bool "256" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_512 bool "512" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_1024 bool "1024" config IT8XXX2_H2RAM_ACPI_SHM_SIZE_2048 bool "2048" endchoice config ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE int depends on ESPI_PERIPHERAL_ACPI_SHM_REGION default 16 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_16 default 32 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_32 default 64 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_64 default 128 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_128 default 256 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_256 default 512 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_512 default 1024 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_1024 default 2048 if IT8XXX2_H2RAM_ACPI_SHM_SIZE_2048 config ESPI_PERIPHERAL_CUSTOM_OPCODE default y config ESPI_FLASH_CHANNEL default y config ESPI_IT8XXX2_PNPCFG_DEVICE_KBC_MOUSE bool "ITE IT8XXX2 KBC mouse device" help With this option enabled, EC will send IRQ12 signal to host when the KBC mouse output buffer is full. # On IT8xxx2 series, this configuration option has limitation: # Port 80 and 81 I/O cycles share the same interrupt source and there is no # status bit to indicate which cycle triggered the interrupt and data registers # of these two ports are read only. Hence EC have to read these two data # registers at the same time in the ISR. # It means that the Host must alwasy write 2 bytes of data to port 80 otherwise # port 81 data will not be updated. config ESPI_IT8XXX2_PORT_81_CYCLE bool "EC accepts 0x81 I/O cycle from eSPI transaction" depends on ESPI_PERIPHERAL_DEBUG_PORT_80 help With this option enabled, EC will accept 0x81 I/O cycle from the Host. This allows EC to accept 2 bytes of port 80 data written from the Host. (e.g. using iotools: iotools io_write16 0x80 0x1234) endif #ESPI_IT8XXX2 ```
/content/code_sandbox/drivers/espi/Kconfig.it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,217
```c /* * */ #define DT_DRV_COMPAT microchip_xec_espi_v2 #include <zephyr/kernel.h> #include <soc.h> #include <errno.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h> #include <zephyr/logging/log.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #include "espi_utils.h" #include "espi_mchp_xec_v2.h" /* Minimum delay before acknowledging a virtual wire */ #define ESPI_XEC_VWIRE_ACK_DELAY 10ul /* Maximum timeout to transmit a virtual wire packet. * 10 ms expressed in multiples of 100us */ #define ESPI_XEC_VWIRE_SEND_TIMEOUT 100ul #define VW_MAX_GIRQS 2ul /* 200ms */ #define MAX_OOB_TIMEOUT 200ul /* 1s */ #define MAX_FLASH_TIMEOUT 1000ul /* While issuing flash erase command, it should be ensured that the transfer * length specified is non-zero. */ #define ESPI_FLASH_ERASE_DUMMY 0x01ul /* OOB maximum address configuration */ #define ESPI_XEC_OOB_ADDR_MSW 0x1ffful #define ESPI_XEC_OOB_ADDR_LSW 0xfffful /* OOB Rx length */ #define ESPI_XEC_OOB_RX_LEN 0x7f00ul /* Espi peripheral has 3 uart ports */ #define ESPI_PERIPHERAL_UART_PORT0 0 #define ESPI_PERIPHERAL_UART_PORT1 1 #define UART_DEFAULT_IRQ_POS 2u #define UART_DEFAULT_IRQ BIT(UART_DEFAULT_IRQ_POS) LOG_MODULE_REGISTER(espi, CONFIG_ESPI_LOG_LEVEL); #define ESPI_XEC_REG_BASE(dev) \ ((struct espi_iom_regs *)ESPI_XEC_CONFIG(dev)->base_addr) #define ESPI_XEC_MSVW_REG_BASE(dev) \ ((struct espi_msvw_ar_regs *)(ESPI_XEC_CONFIG(dev)->vw_base_addr)) #define ESPI_XEC_SMVW_REG_OFS 0x200 #define ESPI_XEC_SMVW_REG_BASE(dev) \ ((struct espi_smvw_ar_regs *) \ (ESPI_XEC_CONFIG(dev)->vw_base_addr + ESPI_XEC_SMVW_REG_OFS)) /* PCR */ #define XEC_PCR_REG_BASE \ ((struct pcr_regs *)(DT_REG_ADDR(DT_NODELABEL(pcr)))) /* Microchip canonical virtual wire mapping * your_sha256_hash--------| * VW Idx | VW reg | SRC_ID3 | SRC_ID2 | SRC_ID1 | SRC_ID0 | * your_sha256_hash--------| * System Event Virtual Wires * your_sha256_hash--------| * 2h | MSVW00 | res | SLP_S5# | SLP_S4# | SLP_S3# | * 3h | MSVW01 | res | OOB_RST_WARN | PLTRST# | SUS_STAT# | * 4h | SMVW00 | PME# | WAKE# | res | OOB_RST_ACK | * 5h | SMVW01 | SLV_BOOT_STS | ERR_NONFATAL | ERR_FATAL | SLV_BT_DONE | * 6h | SMVW02 | HOST_RST_ACK | RCIN# | SMI# | SCI# | * 7h | MSVW02 | res | NMIOUT# | SMIOUT# | HOS_RST_WARN| * your_sha256_hash--------| * Platform specific virtual wires * your_sha256_hash--------| * 40h | SMVW03 | res | res | DNX_ACK | SUS_ACK# | * 41h | MSVW03 | SLP_A# | res | SUS_PDNACK| SUS_WARN# | * 42h | MSVW04 | res | res | SLP_WLAN# | SLP_LAN# | * 43h | MSVW05 | generic | generic | generic | generic | * 44h | MSVW06 | generic | generic | generic | generic | * 45h | SMVW04 | generic | generic | generic | generic | * 46h | SMVW05 | generic | generic | generic | generic | * 47h | MSVW07 | res | res | res | HOST_C10 | * 4Ah | MSVW08 | res | res | DNX_WARN | res | * These are configurable by overriding device tree vw routing | * 50h | SMVW06 | ocb_3 | ocb_2 | ocb_1 | ocb_0 | * 51h | SMVW07 | gpio_7 | gpio_6 | gpio_5 | gpio_4 | * 52h | SMVW08 | gpio_11 | gpio_10 | gpio_9 | gpio_8 | */ static const struct xec_signal vw_tbl[] = { MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SLP_S3, vw_slp_s3_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SLP_S4, vw_slp_s4_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SLP_S5, vw_slp_s5_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_OOB_RST_WARN, vw_oob_rst_warn), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_PLTRST, vw_pltrst_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SUS_STAT, vw_sus_stat_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_HOST_RST_WARN, vw_host_rst_warn), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_NMIOUT, vw_nmiout_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SMIOUT, vw_smiout_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SLP_A, vw_slp_a_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, vw_sus_pwrdn_ack), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SUS_WARN, vw_sus_warn_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SLP_WLAN, vw_slp_wlan_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SLP_LAN, vw_slp_lan_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_HOST_C10, vw_host_c10), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_DNX_WARN, vw_dnx_warn), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_PME, vw_pme_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_WAKE, vw_wake_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_OOB_RST_ACK, vw_oob_rst_ack), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, vw_target_boot_status), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_ERR_NON_FATAL, vw_error_non_fatal), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_ERR_FATAL, vw_error_fatal), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, vw_target_boot_done), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_HOST_RST_ACK, vw_host_rst_ack), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_RST_CPU_INIT, vw_rcin_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SMI, vw_smi_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SCI, vw_sci_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_DNX_ACK, vw_dnx_ack), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_SUS_ACK, vw_sus_ack_n), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_0, vw_t2c_gpio_0), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_1, vw_t2c_gpio_1), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_2, vw_t2c_gpio_2), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_3, vw_t2c_gpio_3), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_4, vw_t2c_gpio_4), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_5, vw_t2c_gpio_5), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_6, vw_t2c_gpio_6), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_7, vw_t2c_gpio_7), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_8, vw_t2c_gpio_8), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_9, vw_t2c_gpio_9), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_10, vw_t2c_gpio_10), MCHP_DT_ESPI_VW_ENTRY(ESPI_VWIRE_SIGNAL_TARGET_GPIO_11, vw_t2c_gpio_11), }; /* Buffer size are expressed in bytes */ #ifdef CONFIG_ESPI_OOB_CHANNEL static uint32_t target_rx_mem[CONFIG_ESPI_OOB_BUFFER_SIZE >> 2]; static uint32_t target_tx_mem[CONFIG_ESPI_OOB_BUFFER_SIZE >> 2]; #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static uint32_t target_mem[CONFIG_ESPI_FLASH_BUFFER_SIZE >> 2]; #endif static inline uintptr_t xec_msvw_addr(const struct device *dev, uint8_t vw_index) { uintptr_t vwbase = ESPI_XEC_CONFIG(dev)->vw_base_addr; return vwbase + vw_index * sizeof(struct espi_msvw_reg); } static inline uintptr_t xec_smvw_addr(const struct device *dev, uint8_t vw_index) { uintptr_t vwbase = ESPI_XEC_CONFIG(dev)->vw_base_addr; vwbase += ESPI_XEC_SMVW_REG_OFS; return vwbase + vw_index * sizeof(struct espi_smvw_reg); } static int espi_xec_configure(const struct device *dev, struct espi_cfg *cfg) { struct espi_iom_regs *iom_regs = ESPI_XEC_REG_BASE(dev); uint8_t iomode = 0; uint8_t cap0 = iom_regs->CAP0; uint8_t cap1 = iom_regs->CAP1; uint8_t cur_iomode = (cap1 & MCHP_ESPI_GBL_CAP1_IO_MODE_MASK) >> MCHP_ESPI_GBL_CAP1_IO_MODE_POS; /* Set frequency */ cap1 &= ~MCHP_ESPI_GBL_CAP1_MAX_FREQ_MASK; switch (cfg->max_freq) { case 20: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_20M; break; case 25: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_25M; break; case 33: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_33M; break; case 50: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_50M; break; case 66: cap1 |= MCHP_ESPI_GBL_CAP1_MAX_FREQ_66M; break; default: return -EINVAL; } /* Set IO mode */ iomode = (cfg->io_caps >> 1); if (iomode > 3) { return -EINVAL; } if (iomode != cur_iomode) { cap1 &= ~(MCHP_ESPI_GBL_CAP1_IO_MODE_MASK0 << MCHP_ESPI_GBL_CAP1_IO_MODE_POS); cap1 |= (iomode << MCHP_ESPI_GBL_CAP1_IO_MODE_POS); } /* Validate and translate eSPI API channels to MEC capabilities */ cap0 &= ~MCHP_ESPI_GBL_CAP0_MASK; if (cfg->channel_caps & ESPI_CHANNEL_PERIPHERAL) { if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_PC_SUPP; } else { return -EINVAL; } } if (cfg->channel_caps & ESPI_CHANNEL_VWIRE) { if (IS_ENABLED(CONFIG_ESPI_VWIRE_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_VW_SUPP; } else { return -EINVAL; } } if (cfg->channel_caps & ESPI_CHANNEL_OOB) { if (IS_ENABLED(CONFIG_ESPI_OOB_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_OOB_SUPP; } else { return -EINVAL; } } if (cfg->channel_caps & ESPI_CHANNEL_FLASH) { if (IS_ENABLED(CONFIG_ESPI_FLASH_CHANNEL)) { cap0 |= MCHP_ESPI_GBL_CAP0_FC_SUPP; } else { LOG_ERR("Flash channel not supported"); return -EINVAL; } } iom_regs->CAP0 = cap0; iom_regs->CAP1 = cap1; /* Activate the eSPI block *. * Need to guarantee that this register is configured before RSMRST# * de-assertion and after pinmux */ iom_regs->ACTV = 1; LOG_DBG("eSPI block activated successfully"); return 0; } static bool espi_xec_channel_ready(const struct device *dev, enum espi_channel ch) { struct espi_iom_regs *iom_regs = ESPI_XEC_REG_BASE(dev); bool sts; switch (ch) { case ESPI_CHANNEL_PERIPHERAL: sts = iom_regs->PCRDY & MCHP_ESPI_PC_READY; break; case ESPI_CHANNEL_VWIRE: sts = iom_regs->VWRDY & MCHP_ESPI_VW_READY; break; case ESPI_CHANNEL_OOB: sts = iom_regs->OOBRDY & MCHP_ESPI_OOB_READY; break; case ESPI_CHANNEL_FLASH: sts = iom_regs->FCRDY & MCHP_ESPI_FC_READY; break; default: sts = false; break; } return sts; } static int espi_xec_send_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t level) { struct xec_signal signal_info = vw_tbl[signal]; uint8_t xec_id = signal_info.xec_reg_idx; uint8_t src_id = signal_info.bit; uint8_t dir; uintptr_t regaddr; if ((src_id >= ESPI_VWIRE_SRC_ID_MAX) || (xec_id >= ESPI_MSVW_IDX_MAX)) { return -EINVAL; } if (!(signal_info.flags & BIT(MCHP_DT_ESPI_VW_FLAG_STATUS_POS))) { return -EIO; /* VW not enabled */ } dir = (signal_info.flags >> MCHP_DT_ESPI_VW_FLAG_DIR_POS) & BIT(0); if (dir == ESPI_CONTROLLER_TO_TARGET) { regaddr = xec_msvw_addr(dev, xec_id); sys_write8(level, regaddr + MSVW_BI_SRC0 + src_id); } if (dir == ESPI_TARGET_TO_CONTROLLER) { regaddr = xec_smvw_addr(dev, xec_id); sys_write8(level, regaddr + SMVW_BI_SRC0 + src_id); /* Ensure eSPI virtual wire packet is transmitted * There is no interrupt, so need to poll register */ uint8_t rd_cnt = ESPI_XEC_VWIRE_SEND_TIMEOUT; while (sys_read8(regaddr + SMVW_BI_SRC_CHG) && rd_cnt--) { k_busy_wait(100); } } return 0; } static int espi_xec_receive_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t *level) { struct xec_signal signal_info = vw_tbl[signal]; uint8_t xec_id = signal_info.xec_reg_idx; uint8_t src_id = signal_info.bit; uint8_t dir; uintptr_t regaddr; if ((src_id >= ESPI_VWIRE_SRC_ID_MAX) || (xec_id >= ESPI_SMVW_IDX_MAX) || (level == NULL)) { return -EINVAL; } if (!(signal_info.flags & BIT(MCHP_DT_ESPI_VW_FLAG_STATUS_POS))) { return -EIO; /* VW not enabled */ } dir = (signal_info.flags >> MCHP_DT_ESPI_VW_FLAG_DIR_POS) & BIT(0); if (dir == ESPI_CONTROLLER_TO_TARGET) { regaddr = xec_msvw_addr(dev, xec_id); *level = sys_read8(regaddr + MSVW_BI_SRC0 + src_id) & BIT(0); } if (dir == ESPI_TARGET_TO_CONTROLLER) { regaddr = xec_smvw_addr(dev, xec_id); *level = sys_read8(regaddr + SMVW_BI_SRC0 + src_id) & BIT(0); } return 0; } #ifdef CONFIG_ESPI_OOB_CHANNEL static int espi_xec_send_oob(const struct device *dev, struct espi_oob_packet *pckt) { int ret; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); uint8_t err_mask = MCHP_ESPI_OOB_TX_STS_IBERR | MCHP_ESPI_OOB_TX_STS_OVRUN | MCHP_ESPI_OOB_TX_STS_BADREQ; LOG_DBG("%s", __func__); if (!(regs->OOBTXSTS & MCHP_ESPI_OOB_TX_STS_CHEN)) { LOG_ERR("OOB channel is disabled"); return -EIO; } if (regs->OOBTXSTS & MCHP_ESPI_OOB_TX_STS_BUSY) { LOG_ERR("OOB channel is busy"); return -EBUSY; } if (pckt->len > CONFIG_ESPI_OOB_BUFFER_SIZE) { LOG_ERR("insufficient space"); return -EINVAL; } memcpy(target_tx_mem, pckt->buf, pckt->len); regs->OOBTXL = pckt->len; regs->OOBTXC = MCHP_ESPI_OOB_TX_CTRL_START; LOG_DBG("%s %d", __func__, regs->OOBTXL); /* Wait until ISR or timeout */ ret = k_sem_take(&data->tx_lock, K_MSEC(MAX_OOB_TIMEOUT)); if (ret == -EAGAIN) { return -ETIMEDOUT; } if (regs->OOBTXSTS & err_mask) { LOG_ERR("Tx failed %x", regs->OOBTXSTS); regs->OOBTXSTS = err_mask; return -EIO; } return 0; } static int espi_xec_receive_oob(const struct device *dev, struct espi_oob_packet *pckt) { struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); uint8_t err_mask = MCHP_ESPI_OOB_RX_STS_IBERR | MCHP_ESPI_OOB_RX_STS_OVRUN; if (regs->OOBRXSTS & err_mask) { return -EIO; } #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC int ret; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); /* Wait until ISR or timeout */ ret = k_sem_take(&data->rx_lock, K_MSEC(MAX_OOB_TIMEOUT)); if (ret == -EAGAIN) { return -ETIMEDOUT; } #endif /* Check if buffer passed to driver can fit the received buffer */ uint32_t rcvd_len = regs->OOBRXL & MCHP_ESPI_OOB_RX_LEN_MASK; if (rcvd_len > pckt->len) { LOG_ERR("space rcvd %d vs %d", rcvd_len, pckt->len); return -EIO; } pckt->len = rcvd_len; memcpy(pckt->buf, target_rx_mem, pckt->len); memset(target_rx_mem, 0, pckt->len); /* Only after data has been copied from SRAM, indicate channel * is available for next packet */ regs->OOBRXC |= MCHP_ESPI_OOB_RX_CTRL_AVAIL; return 0; } #endif /* CONFIG_ESPI_OOB_CHANNEL */ #ifdef CONFIG_ESPI_FLASH_CHANNEL static int espi_xec_flash_read(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); uint32_t err_mask = MCHP_ESPI_FC_STS_IBERR | MCHP_ESPI_FC_STS_FAIL | MCHP_ESPI_FC_STS_OVFL | MCHP_ESPI_FC_STS_BADREQ; LOG_DBG("%s", __func__); if (!(regs->FCSTS & MCHP_ESPI_FC_STS_CHAN_EN)) { LOG_ERR("Flash channel is disabled"); return -EIO; } if (pckt->len > CONFIG_ESPI_FLASH_BUFFER_SIZE) { LOG_ERR("Invalid size request"); return -EINVAL; } regs->FCFA[1] = 0; regs->FCFA[0] = pckt->flash_addr; regs->FCBA[1] = 0; regs->FCBA[0] = (uint32_t)&target_mem[0]; regs->FCLEN = pckt->len; regs->FCCTL = MCHP_ESPI_FC_CTRL_FUNC(MCHP_ESPI_FC_CTRL_RD0); regs->FCCTL |= MCHP_ESPI_FC_CTRL_START; /* Wait until ISR or timeout */ ret = k_sem_take(&data->flash_lock, K_MSEC(MAX_FLASH_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } if (regs->FCSTS & err_mask) { LOG_ERR("%s error %x", __func__, err_mask); regs->FCSTS = err_mask; return -EIO; } memcpy(pckt->buf, target_mem, pckt->len); return 0; } static int espi_xec_flash_write(const struct device *dev, struct espi_flash_packet *pckt) { int ret; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); uint32_t err_mask = MCHP_ESPI_FC_STS_IBERR | MCHP_ESPI_FC_STS_OVRUN | MCHP_ESPI_FC_STS_FAIL | MCHP_ESPI_FC_STS_BADREQ; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); LOG_DBG("%s", __func__); if (sizeof(target_mem) < pckt->len) { LOG_ERR("Packet length is too big"); return -ENOMEM; } if (!(regs->FCSTS & MCHP_ESPI_FC_STS_CHAN_EN)) { LOG_ERR("Flash channel is disabled"); return -EIO; } if ((regs->FCCFG & MCHP_ESPI_FC_CFG_BUSY)) { LOG_ERR("Flash channel is busy"); return -EBUSY; } memcpy(target_mem, pckt->buf, pckt->len); regs->FCFA[1] = 0; regs->FCFA[0] = pckt->flash_addr; regs->FCBA[1] = 0; regs->FCBA[0] = (uint32_t)&target_mem[0]; regs->FCLEN = pckt->len; regs->FCCTL = MCHP_ESPI_FC_CTRL_FUNC(MCHP_ESPI_FC_CTRL_WR0); regs->FCCTL |= MCHP_ESPI_FC_CTRL_START; /* Wait until ISR or timeout */ ret = k_sem_take(&data->flash_lock, K_MSEC(MAX_FLASH_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } if (regs->FCSTS & err_mask) { LOG_ERR("%s err: %x", __func__, err_mask); regs->FCSTS = err_mask; return -EIO; } return 0; } static int espi_xec_flash_erase(const struct device *dev, struct espi_flash_packet *pckt) { int ret; uint32_t status; uint32_t err_mask = MCHP_ESPI_FC_STS_IBERR | MCHP_ESPI_FC_STS_OVRUN | MCHP_ESPI_FC_STS_FAIL | MCHP_ESPI_FC_STS_BADREQ; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); LOG_DBG("%s", __func__); if (!(regs->FCSTS & MCHP_ESPI_FC_STS_CHAN_EN)) { LOG_ERR("Flash channel is disabled"); return -EIO; } if ((regs->FCCFG & MCHP_ESPI_FC_CFG_BUSY)) { LOG_ERR("Flash channel is busy"); return -EBUSY; } /* Clear status register */ status = regs->FCSTS; regs->FCSTS = status; regs->FCFA[1] = 0; regs->FCFA[0] = pckt->flash_addr; regs->FCLEN = ESPI_FLASH_ERASE_DUMMY; regs->FCCTL = MCHP_ESPI_FC_CTRL_FUNC(MCHP_ESPI_FC_CTRL_ERS0); regs->FCCTL |= MCHP_ESPI_FC_CTRL_START; /* Wait until ISR or timeout */ ret = k_sem_take(&data->flash_lock, K_MSEC(MAX_FLASH_TIMEOUT)); if (ret == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } if (regs->FCSTS & err_mask) { LOG_ERR("%s err: %x", __func__, err_mask); regs->FCSTS = err_mask; return -EIO; } return 0; } #endif /* CONFIG_ESPI_FLASH_CHANNEL */ static int espi_xec_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_xec_data *const data = ESPI_XEC_DATA(dev); return espi_manage_callback(&data->callbacks, callback, set); } #ifdef CONFIG_ESPI_AUTOMATIC_BOOT_DONE_ACKNOWLEDGE static void send_slave_bootdone(const struct device *dev) { int ret; uint8_t boot_done; ret = espi_xec_receive_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, &boot_done); if (!ret && !boot_done) { /* SLAVE_BOOT_DONE & SLAVE_LOAD_STS have to be sent together */ espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 1); espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, 1); } } #endif #ifdef CONFIG_ESPI_OOB_CHANNEL static void espi_init_oob(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); /* Enable OOB Tx/Rx interrupts */ mchp_xec_ecia_girq_src_en(cfg->irq_info_list[oob_up_girq_idx].gid, cfg->irq_info_list[oob_up_girq_idx].gpos); mchp_xec_ecia_girq_src_en(cfg->irq_info_list[oob_dn_girq_idx].gid, cfg->irq_info_list[oob_dn_girq_idx].gpos); regs->OOBTXA[1] = 0; regs->OOBRXA[1] = 0; regs->OOBTXA[0] = (uint32_t)&target_tx_mem[0]; regs->OOBRXA[0] = (uint32_t)&target_rx_mem[0]; regs->OOBRXL = 0x00FF0000; /* Enable OOB Tx channel enable change status interrupt */ regs->OOBTXIEN |= MCHP_ESPI_OOB_TX_IEN_CHG_EN | MCHP_ESPI_OOB_TX_IEN_DONE; /* Enable Rx channel to receive data any time * there are case where OOB is not initiated by a previous OOB Tx */ regs->OOBRXIEN |= MCHP_ESPI_OOB_RX_IEN; regs->OOBRXC |= MCHP_ESPI_OOB_RX_CTRL_AVAIL; } #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static void espi_init_flash(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); LOG_DBG("%s", __func__); /* Need to clear status done when ROM boots in MAF */ LOG_DBG("%s ESPI_FC_REGS->CFG %X", __func__, regs->FCCFG); regs->FCSTS = MCHP_ESPI_FC_STS_DONE; /* Enable interrupts */ mchp_xec_ecia_girq_src_en(cfg->irq_info_list[fc_girq_idx].gid, cfg->irq_info_list[fc_girq_idx].gpos); regs->FCIEN |= MCHP_ESPI_FC_IEN_CHG_EN; regs->FCIEN |= MCHP_ESPI_FC_IEN_DONE; } #endif static void espi_bus_init(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); /* Enable bus interrupts */ mchp_xec_ecia_girq_src_en(cfg->irq_info_list[pc_girq_idx].gid, cfg->irq_info_list[pc_girq_idx].gpos); mchp_xec_ecia_girq_src_en(cfg->irq_info_list[rst_girq_idx].gid, cfg->irq_info_list[rst_girq_idx].gpos); mchp_xec_ecia_girq_src_en(cfg->irq_info_list[vw_ch_en_girq_idx].gid, cfg->irq_info_list[vw_ch_en_girq_idx].gpos); } /* Clear specified eSPI bus GIRQ status */ static int xec_espi_bus_intr_clr(const struct device *dev, enum xec_espi_girq_idx idx) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); if (idx >= max_girq_idx) { return -EINVAL; } mchp_xec_ecia_girq_src_clr(cfg->irq_info_list[idx].gid, cfg->irq_info_list[idx].gpos); return 0; } /* Enable/disable specified eSPI bus GIRQ */ static int xec_espi_bus_intr_ctl(const struct device *dev, enum xec_espi_girq_idx idx, uint8_t enable) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); if (idx >= max_girq_idx) { return -EINVAL; } if (enable) { mchp_xec_ecia_girq_src_en(cfg->irq_info_list[idx].gid, cfg->irq_info_list[idx].gpos); } else { mchp_xec_ecia_girq_src_dis(cfg->irq_info_list[idx].gid, cfg->irq_info_list[idx].gpos); } return 0; } static void espi_rst_isr(const struct device *dev) { uint8_t rst_sts; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { ESPI_BUS_RESET, 0, 0 }; #ifdef ESPI_XEC_V2_DEBUG data->espi_rst_count++; #endif rst_sts = regs->ERIS; /* eSPI reset status register is clear on write register */ regs->ERIS = MCHP_ESPI_RST_ISTS; /* clear GIRQ latched status */ xec_espi_bus_intr_clr(dev, rst_girq_idx); if (rst_sts & MCHP_ESPI_RST_ISTS) { if (rst_sts & MCHP_ESPI_RST_ISTS_PIN_RO_HI) { evt.evt_data = 1; } else { evt.evt_data = 0; } espi_send_callbacks(&data->callbacks, dev, evt); #ifdef CONFIG_ESPI_OOB_CHANNEL espi_init_oob(dev); #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL espi_init_flash(dev); #endif espi_bus_init(dev); } } /* Configure sub devices BAR address if not using default I/O based address * then make its BAR valid. * Refer to microchip eSPI I/O base addresses for default values */ static void config_sub_devices(const struct device *dev) { xec_host_dev_init(dev); } static void configure_sirq(const struct device *dev) { struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); #ifdef CONFIG_ESPI_PERIPHERAL_UART switch (CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING) { case ESPI_PERIPHERAL_UART_PORT0: regs->SIRQ[SIRQ_UART0] = UART_DEFAULT_IRQ; break; case ESPI_PERIPHERAL_UART_PORT1: regs->SIRQ[SIRQ_UART1] = UART_DEFAULT_IRQ; break; } #endif #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC regs->SIRQ[SIRQ_KBC_KIRQ] = 1; regs->SIRQ[SIRQ_KBC_MIRQ] = 12; #endif } static void setup_espi_io_config(const struct device *dev, uint16_t host_address) { struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); regs->IOHBAR[IOB_IOC] = (host_address << 16) | MCHP_ESPI_IO_BAR_HOST_VALID; config_sub_devices(dev); configure_sirq(dev); regs->PCSTS = MCHP_ESPI_PC_STS_EN_CHG | MCHP_ESPI_PC_STS_BM_EN_CHG_POS; regs->PCIEN |= MCHP_ESPI_PC_IEN_EN_CHG; regs->PCRDY = 1; } /* * Write the interrupt select field of the specified MSVW source. * Each MSVW controls 4 virtual wires. */ static int xec_espi_vw_intr_ctrl(const struct device *dev, uint8_t msvw_idx, uint8_t src_id, uint8_t intr_mode) { struct espi_msvw_ar_regs *regs = ESPI_XEC_MSVW_REG_BASE(dev); if ((msvw_idx >= ESPI_NUM_MSVW) || (src_id > 3)) { return -EINVAL; } uintptr_t msvw_addr = (uintptr_t)&regs->MSVW[msvw_idx]; sys_write8(intr_mode, msvw_addr + MSVW_BI_IRQ_SEL0 + src_id); return 0; } static void espi_pc_isr(const struct device *dev) { struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); uint32_t status = regs->PCSTS; struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_PERIPHERAL, .evt_data = 0 }; struct espi_xec_data *data = (struct espi_xec_data *)(dev->data); LOG_DBG("%s %x", __func__, status); if (status & MCHP_ESPI_PC_STS_BUS_ERR) { LOG_ERR("%s bus error", __func__); regs->PCSTS = MCHP_ESPI_PC_STS_BUS_ERR; } if (status & MCHP_ESPI_PC_STS_EN_CHG) { if (status & MCHP_ESPI_PC_STS_EN) { setup_espi_io_config(dev, MCHP_ESPI_IOBAR_INIT_DFLT); } regs->PCSTS = MCHP_ESPI_PC_STS_EN_CHG; } if (status & MCHP_ESPI_PC_STS_BM_EN_CHG) { if (status & MCHP_ESPI_PC_STS_BM_EN) { evt.evt_data = ESPI_PC_EVT_BUS_MASTER_ENABLE; LOG_WRN("%s BM change %x", __func__, status); espi_send_callbacks(&data->callbacks, dev, evt); } regs->PCSTS = MCHP_ESPI_PC_STS_BM_EN_CHG; } xec_espi_bus_intr_clr(dev, pc_girq_idx); } static void espi_vw_chan_en_isr(const struct device *dev) { struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_VWIRE, .evt_data = 0 }; uint32_t status = regs->VWSTS; if (status & MCHP_ESPI_VW_EN_STS_RO) { regs->VWRDY = 1; evt.evt_data = 1; /* VW channel interrupt can disabled at this point */ xec_espi_bus_intr_ctl(dev, vw_ch_en_girq_idx, 0); #ifdef CONFIG_ESPI_AUTOMATIC_BOOT_DONE_ACKNOWLEDGE send_slave_bootdone(dev); #endif } espi_send_callbacks(&data->callbacks, dev, evt); xec_espi_bus_intr_clr(dev, vw_ch_en_girq_idx); } #ifdef CONFIG_ESPI_OOB_CHANNEL static void espi_oob_down_isr(const struct device *dev) { uint32_t status; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); #ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_OOB_RECEIVED, .evt_details = 0, .evt_data = 0 }; #endif status = regs->OOBRXSTS; LOG_DBG("%s %x", __func__, status); if (status & MCHP_ESPI_OOB_RX_STS_DONE) { /* Register is write-on-clear, ensure only 1 bit is affected */ regs->OOBRXSTS = MCHP_ESPI_OOB_RX_STS_DONE; #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC k_sem_give(&data->rx_lock); #else evt.evt_details = regs->OOBRXL & MCHP_ESPI_OOB_RX_LEN_MASK; espi_send_callbacks(&data->callbacks, dev, evt); #endif } xec_espi_bus_intr_clr(dev, oob_dn_girq_idx); } static void espi_oob_up_isr(const struct device *dev) { uint32_t status; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_OOB, .evt_data = 0 }; status = regs->OOBTXSTS; LOG_DBG("%s sts:%x", __func__, status); if (status & MCHP_ESPI_OOB_TX_STS_DONE) { /* Register is write-on-clear, ensure only 1 bit is affected */ status = regs->OOBTXSTS = MCHP_ESPI_OOB_TX_STS_DONE; k_sem_give(&data->tx_lock); } if (status & MCHP_ESPI_OOB_TX_STS_CHG_EN) { if (status & MCHP_ESPI_OOB_TX_STS_CHEN) { espi_init_oob(dev); /* Indicate OOB channel is ready to eSPI host */ regs->OOBRDY = 1; evt.evt_data = 1; } status = regs->OOBTXSTS = MCHP_ESPI_OOB_TX_STS_CHG_EN; espi_send_callbacks(&data->callbacks, dev, evt); } xec_espi_bus_intr_clr(dev, oob_up_girq_idx); } #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL static void espi_flash_isr(const struct device *dev) { uint32_t status; struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ESPI_CHANNEL_FLASH, .evt_data = 0, }; status = regs->FCSTS; LOG_DBG("%s %x", __func__, status); if (status & MCHP_ESPI_FC_STS_DONE) { /* Ensure to clear only relevant bit */ regs->FCSTS = MCHP_ESPI_FC_STS_DONE; k_sem_give(&data->flash_lock); } if (status & MCHP_ESPI_FC_STS_CHAN_EN_CHG) { /* Ensure to clear only relevant bit */ regs->FCSTS = MCHP_ESPI_FC_STS_CHAN_EN_CHG; if (status & MCHP_ESPI_FC_STS_CHAN_EN) { espi_init_flash(dev); /* Indicate flash channel is ready to eSPI master */ regs->FCRDY = MCHP_ESPI_FC_READY; evt.evt_data = 1; } espi_send_callbacks(&data->callbacks, dev, evt); } xec_espi_bus_intr_clr(dev, fc_girq_idx); } #endif /* Send callbacks if enabled and track eSPI host system state */ static void notify_system_state(const struct device *dev, enum espi_vwire_signal signal) { struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0 }; uint8_t status = 0; espi_xec_receive_vwire(dev, signal, &status); evt.evt_details = signal; evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } static void notify_host_warning(const struct device *dev, enum espi_vwire_signal signal) { uint8_t status; espi_xec_receive_vwire(dev, signal, &status); if (!IS_ENABLED(CONFIG_ESPI_AUTOMATIC_WARNING_ACKNOWLEDGE)) { struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = {ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0 }; evt.evt_details = signal; evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } else { k_busy_wait(ESPI_XEC_VWIRE_ACK_DELAY); /* Some flows are dependent on awareness of client's driver * about these warnings in such cases these automatic response * should not be enabled. */ switch (signal) { case ESPI_VWIRE_SIGNAL_HOST_RST_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, status); break; case ESPI_VWIRE_SIGNAL_SUS_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_ACK, status); break; case ESPI_VWIRE_SIGNAL_OOB_RST_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_ACK, status); break; case ESPI_VWIRE_SIGNAL_DNX_WARN: espi_xec_send_vwire(dev, ESPI_VWIRE_SIGNAL_DNX_ACK, status); break; default: break; } } } static void notify_vw_status(const struct device *dev, enum espi_vwire_signal signal) { struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0 }; uint8_t status = 0; espi_xec_receive_vwire(dev, signal, &status); evt.evt_details = signal; evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } /* * VW handlers must have signature * typedef void (*mchp_xec_ecia_callback_t) (int girq_id, int src, void *user) * where parameter user is a pointer to const struct device * These handlers are registered to their respective GIRQ child device of the * ECIA driver. */ static void vw_slp3_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_S3); } static void vw_slp4_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_S4); } static void vw_slp5_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_system_state(dev, ESPI_VWIRE_SIGNAL_SLP_S5); } static void vw_host_rst_warn_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_host_warning(dev, ESPI_VWIRE_SIGNAL_HOST_RST_WARN); } static void vw_sus_warn_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_host_warning(dev, ESPI_VWIRE_SIGNAL_SUS_WARN); } static void vw_oob_rst_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_host_warning(dev, ESPI_VWIRE_SIGNAL_OOB_RST_WARN); } static void vw_sus_pwrdn_ack_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK); } static void vw_sus_slp_a_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_SLP_A); } static void vw_sus_dnx_warn_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_host_warning(dev, ESPI_VWIRE_SIGNAL_DNX_WARN); } static void vw_pltrst_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct espi_event evt = { ESPI_BUS_EVENT_VWIRE_RECEIVED, ESPI_VWIRE_SIGNAL_PLTRST, 0 }; uint8_t status = 0; espi_xec_receive_vwire(dev, ESPI_VWIRE_SIGNAL_PLTRST, &status); if (status) { setup_espi_io_config(dev, MCHP_ESPI_IOBAR_INIT_DFLT); } evt.evt_data = status; espi_send_callbacks(&data->callbacks, dev, evt); } static void vw_sus_stat_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_host_warning(dev, ESPI_VWIRE_SIGNAL_SUS_STAT); } static void vw_slp_wlan_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_SLP_WLAN); } static void vw_slp_lan_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_SLP_LAN); } static void vw_host_c10_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_HOST_C10); } static void vw_nmiout_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_NMIOUT); } static void vw_smiout_handler(int girq_id, int src, void *user) { const struct device *dev = (const struct device *)user; notify_vw_status(dev, ESPI_VWIRE_SIGNAL_SMIOUT); } const struct espi_vw_isr m2s_vwires_isr[] = { {ESPI_VWIRE_SIGNAL_SLP_S3, MCHP_MSVW00_GIRQ, MCHP_MSVW00_SRC0_GIRQ_POS, vw_slp3_handler}, {ESPI_VWIRE_SIGNAL_SLP_S4, MCHP_MSVW00_GIRQ, MCHP_MSVW00_SRC1_GIRQ_POS, vw_slp4_handler}, {ESPI_VWIRE_SIGNAL_SLP_S5, MCHP_MSVW00_GIRQ, MCHP_MSVW00_SRC2_GIRQ_POS, vw_slp5_handler}, {ESPI_VWIRE_SIGNAL_OOB_RST_WARN, MCHP_MSVW01_GIRQ, MCHP_MSVW01_SRC2_GIRQ_POS, vw_oob_rst_handler}, {ESPI_VWIRE_SIGNAL_PLTRST, MCHP_MSVW01_GIRQ, MCHP_MSVW01_SRC1_GIRQ_POS, vw_pltrst_handler}, {ESPI_VWIRE_SIGNAL_SUS_STAT, MCHP_MSVW01_GIRQ, MCHP_MSVW01_SRC0_GIRQ_POS, vw_sus_stat_handler}, {ESPI_VWIRE_SIGNAL_HOST_RST_WARN, MCHP_MSVW02_GIRQ, MCHP_MSVW02_SRC0_GIRQ_POS, vw_host_rst_warn_handler}, {ESPI_VWIRE_SIGNAL_NMIOUT, MCHP_MSVW02_GIRQ, MCHP_MSVW02_SRC1_GIRQ_POS, vw_nmiout_handler}, {ESPI_VWIRE_SIGNAL_SMIOUT, MCHP_MSVW02_GIRQ, MCHP_MSVW02_SRC2_GIRQ_POS, vw_smiout_handler}, {ESPI_VWIRE_SIGNAL_SLP_A, MCHP_MSVW03_GIRQ, MCHP_MSVW03_SRC3_GIRQ_POS, vw_sus_slp_a_handler}, {ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, MCHP_MSVW03_GIRQ, MCHP_MSVW03_SRC1_GIRQ_POS, vw_sus_pwrdn_ack_handler}, {ESPI_VWIRE_SIGNAL_SUS_WARN, MCHP_MSVW03_GIRQ, MCHP_MSVW03_SRC0_GIRQ_POS, vw_sus_warn_handler}, {ESPI_VWIRE_SIGNAL_SLP_WLAN, MCHP_MSVW04_GIRQ, MCHP_MSVW04_SRC1_GIRQ_POS, vw_slp_wlan_handler}, {ESPI_VWIRE_SIGNAL_SLP_LAN, MCHP_MSVW04_GIRQ, MCHP_MSVW04_SRC0_GIRQ_POS, vw_slp_lan_handler}, {ESPI_VWIRE_SIGNAL_HOST_C10, MCHP_MSVW07_GIRQ, MCHP_MSVW07_SRC0_GIRQ_POS, vw_host_c10_handler}, {ESPI_VWIRE_SIGNAL_DNX_WARN, MCHP_MSVW08_GIRQ, MCHP_MSVW08_SRC1_GIRQ_POS, vw_sus_dnx_warn_handler}, }; static int espi_xec_init(const struct device *dev); static const struct espi_driver_api espi_xec_driver_api = { .config = espi_xec_configure, .get_channel_status = espi_xec_channel_ready, .send_vwire = espi_xec_send_vwire, .receive_vwire = espi_xec_receive_vwire, #ifdef CONFIG_ESPI_OOB_CHANNEL .send_oob = espi_xec_send_oob, .receive_oob = espi_xec_receive_oob, #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL .flash_read = espi_xec_flash_read, .flash_write = espi_xec_flash_write, .flash_erase = espi_xec_flash_erase, #endif .manage_callback = espi_xec_manage_callback, .read_lpc_request = espi_xec_read_lpc_request, .write_lpc_request = espi_xec_write_lpc_request, }; static struct espi_xec_data espi_xec_data_var; /* n = node-id, p = property, i = index */ #define XEC_IRQ_INFO(n, p, i) \ { \ .gid = MCHP_XEC_ECIA_GIRQ(DT_PROP_BY_IDX(n, p, i)), \ .gpos = MCHP_XEC_ECIA_GIRQ_POS(DT_PROP_BY_IDX(n, p, i)), \ .anid = MCHP_XEC_ECIA_NVIC_AGGR(DT_PROP_BY_IDX(n, p, i)), \ .dnid = MCHP_XEC_ECIA_NVIC_DIRECT(DT_PROP_BY_IDX(n, p, i)), \ }, static const struct espi_xec_irq_info espi_xec_irq_info_0[] = { DT_FOREACH_PROP_ELEM(DT_NODELABEL(espi0), girqs, XEC_IRQ_INFO) }; /* pin control structure(s) */ PINCTRL_DT_INST_DEFINE(0); static const struct espi_xec_config espi_xec_config = { .base_addr = DT_INST_REG_ADDR(0), .vw_base_addr = DT_INST_REG_ADDR_BY_NAME(0, vw), .pcr_idx = DT_INST_PROP_BY_IDX(0, pcrs, 0), .pcr_bitpos = DT_INST_PROP_BY_IDX(0, pcrs, 1), .irq_info_size = ARRAY_SIZE(espi_xec_irq_info_0), .irq_info_list = espi_xec_irq_info_0, .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; DEVICE_DT_INST_DEFINE(0, &espi_xec_init, NULL, &espi_xec_data_var, &espi_xec_config, PRE_KERNEL_2, CONFIG_ESPI_INIT_PRIORITY, &espi_xec_driver_api); /* * Connect ESPI bus interrupt handlers: ESPI_RESET and channels. * MEC172x hardware fixed SAF interrupt routing bug. SAF driver * will connect its direct mode interrupt handler(s) on this GIRQ. */ static void espi_xec_connect_irqs(const struct device *dev) { ARG_UNUSED(dev); /* eSPI Reset */ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 7, irq), DT_INST_IRQ_BY_IDX(0, 7, priority), espi_rst_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 7, irq)); /* eSPI Virtual wire channel enable change ISR */ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 8, irq), DT_INST_IRQ_BY_IDX(0, 8, priority), espi_vw_chan_en_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 8, irq)); /* eSPI Peripheral Channel */ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 0, irq), DT_INST_IRQ_BY_IDX(0, 0, priority), espi_pc_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 0, irq)); #ifdef CONFIG_ESPI_OOB_CHANNEL /* eSPI OOB Upstream direction */ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 4, irq), DT_INST_IRQ_BY_IDX(0, 4, priority), espi_oob_up_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 4, irq)); /* eSPI OOB Channel Downstream direction */ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 5, irq), DT_INST_IRQ_BY_IDX(0, 5, priority), espi_oob_down_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 5, irq)); #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 6, irq), DT_INST_IRQ_BY_IDX(0, 6, priority), espi_flash_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_IDX(0, 6, irq)); #endif } /* MSVW is a 96-bit register and SMVW is a 64-bit register. * Each MSVW/SMVW controls a group of 4 eSPI virtual wires. * Host index located in b[7:0] * Reset source located in b[9:8] * Reset VW values SRC[3:0] located in b[15:12]. * MSVW current VW state values located in bits[64, 72, 80, 88] * SMVW current VW state values located in bits[32, 40, 48, 56] */ static void xec_vw_cfg_properties(const struct xec_signal *p, uint32_t regaddr, uint8_t dir) { uint32_t src_ofs = 4u; uint8_t src_pos = (8u * p->bit); uint8_t rst_state = (p->flags >> MCHP_DT_ESPI_VW_FLAG_RST_STATE_POS) & MCHP_DT_ESPI_VW_FLAG_RST_STATE_MSK0; uint8_t rst_src = rst_src = (p->flags >> MCHP_DT_ESPI_VW_FLAG_RST_SRC_POS) & MCHP_DT_ESPI_VW_FLAG_RST_SRC_MSK0; if (dir) { src_ofs = 8u; } if (rst_state || rst_src) { /* change reset source or state ? */ sys_write8(0, regaddr); /* disable register */ uint8_t temp = sys_read8(regaddr + 1u); if (rst_state) { /* change reset state and default value of this vwire? */ rst_state--; if (rst_state) { temp |= BIT(p->bit + 4u); sys_set_bit(regaddr + src_ofs, src_pos); } else { temp |= ~BIT(p->bit + 4u); sys_clear_bit(regaddr + src_ofs, src_pos); } } if (rst_src) { /* change reset source of all vwires in this group? */ rst_src--; temp = (temp & ~0x3u) | (rst_src & 0x3u); } sys_write8(temp, regaddr + 1u); } if (sys_read8(regaddr) != p->host_idx) { sys_write8(p->host_idx, regaddr); } } /* Check each VW register set host index is present. * Some VW's power up with the host index and others do not. * NOTE: Virtual wires are in groups of 4. Disabling one wire in a group * will disable all wires in the group. We do not implement disabling. */ static void xec_vw_config(const struct device *dev) { for (int i = ESPI_VWIRE_SIGNAL_TARGET_GPIO_0; i < ARRAY_SIZE(vw_tbl); i++) { const struct xec_signal *p = &vw_tbl[i]; uint32_t regaddr = xec_smvw_addr(dev, p->xec_reg_idx); uint8_t dir = (p->flags >> MCHP_DT_ESPI_VW_FLAG_DIR_POS) & BIT(0); uint8_t en = (p->flags & BIT(MCHP_DT_ESPI_VW_FLAG_STATUS_POS)); if (dir) { regaddr = xec_msvw_addr(dev, p->xec_reg_idx); } if (en) { xec_vw_cfg_properties(p, regaddr, dir); } } } static int xec_register_vw_handlers(const struct device *dev) { for (int i = 0; i < ARRAY_SIZE(m2s_vwires_isr); i++) { const struct espi_vw_isr *vwi = &m2s_vwires_isr[i]; struct xec_signal signal_info = vw_tbl[vwi->signal]; uint8_t xec_id = signal_info.xec_reg_idx; uint8_t en = (signal_info.flags & BIT(MCHP_DT_ESPI_VW_FLAG_STATUS_POS)); if (!en) { LOG_INF("VW %d not enabled, skipping", vwi->signal); continue; } /* enables interrupt in eSPI MSVWn register */ xec_espi_vw_intr_ctrl(dev, xec_id, signal_info.bit, MSVW_IRQ_SEL_EDGE_BOTH); /* register handler */ int ret = mchp_xec_ecia_set_callback(vwi->girq_id, vwi->girq_pos, vwi->the_isr, (void *)dev); if (ret) { return -EIO; } mchp_xec_ecia_girq_src_en(vwi->girq_id, vwi->girq_pos); } return 0; } /* * Initialize eSPI hardware and associated peripherals blocks using eSPI * as their host interface. * We change VW capabilities reported to match the number of VWires the * driver is supporting. * A VW packet on the bus contains VW count followed by the VW groups. * The VW count is a zero based 6-bit value: (0 - 63) specifying the number of * groups in the packet. * A VW group consists of two bytes: VW host index and VW data. Each group * contains the state of 4 virtual wires. * The total supported virtual wires is 64 * 4 = 256. * MEC172x supports 11 MSVW groups and 11 SMVW groups. * NOTE: While ESPI_nRESET is active most of the eSPI hardware is held * in reset state. */ static int espi_xec_init(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = ESPI_XEC_REG_BASE(dev); struct espi_xec_data *const data = ESPI_XEC_DATA(dev); struct pcr_regs *pcr = XEC_PCR_REG_BASE; int ret; ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC eSPI V2 pinctrl setup failed (%d)", ret); return ret; } #ifdef ESPI_XEC_V2_DEBUG data->espi_rst_count = 0; #endif /* clear eSPI PCR sleep enable */ z_mchp_xec_pcr_periph_sleep(cfg->pcr_idx, cfg->pcr_bitpos, 0); /* Configure eSPI_PLTRST# to cause nSIO_RESET reset * NOTE: this is also clearing bit 0(PWR_INV) causing the internal * RESET_VCC to de-assert. Host facing peripherals will no longer * be held in reset. */ pcr->PWR_RST_CTRL = MCHP_PCR_PR_CTRL_USE_ESPI_PLTRST; regs->PLTSRC = MCHP_ESPI_PLTRST_SRC_IS_VW; /* Configure the channels and its capabilities based on build config */ regs->CAP0 |= MCHP_ESPI_GBL_CAP0_VW_SUPP | MCHP_ESPI_GBL_CAP0_PC_SUPP; regs->CAPVW = MAX(ESPI_NUM_MSVW, ESPI_NUM_SMVW); regs->CAPPC |= MCHP_ESPI_PC_CAP_MAX_PLD_SZ_64; #ifdef CONFIG_ESPI_OOB_CHANNEL regs->CAP0 |= MCHP_ESPI_GBL_CAP0_OOB_SUPP; regs->CAPOOB |= MCHP_ESPI_OOB_CAP_MAX_PLD_SZ_73; k_sem_init(&data->tx_lock, 0, 1); #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC k_sem_init(&data->rx_lock, 0, 1); #endif /* CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC */ #else regs->CAP0 &= ~MCHP_ESPI_GBL_CAP0_OOB_SUPP; #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL regs->CAP0 |= MCHP_ESPI_GBL_CAP0_FC_SUPP | MCHP_ESPI_FC_CAP_MAX_PLD_SZ_64; regs->CAPFC |= MCHP_ESPI_FC_CAP_SHARE_MAF_SAF | MCHP_ESPI_FC_CAP_MAX_RD_SZ_64; k_sem_init(&data->flash_lock, 0, 1); #else regs->CAP0 &= ~MCHP_ESPI_GBL_CAP0_FC_SUPP; #endif /* Clear reset interrupt status and enable interrupts */ regs->ERIS = MCHP_ESPI_RST_ISTS; regs->ERIE |= MCHP_ESPI_RST_IEN; regs->PCSTS = MCHP_ESPI_PC_STS_EN_CHG; regs->PCIEN |= MCHP_ESPI_PC_IEN_EN_CHG; xec_vw_config(dev); /* register VWire handlers with their aggregated GIRQs * in the ECIA driver */ ret = xec_register_vw_handlers(dev); if (ret) { LOG_ERR("XEX eSPI V2 register VW handlers error %d", ret); return ret; } /* Enable interrupts for each logical channel enable assertion */ xec_espi_bus_intr_ctl(dev, pc_girq_idx, 1); xec_espi_bus_intr_ctl(dev, vw_ch_en_girq_idx, 1); xec_espi_bus_intr_ctl(dev, rst_girq_idx, 1); #ifdef CONFIG_ESPI_OOB_CHANNEL espi_init_oob(dev); #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL espi_init_flash(dev); #endif espi_xec_connect_irqs(dev); ret = xec_host_dev_connect_irqs(dev); return ret; } ```
/content/code_sandbox/drivers/espi/espi_mchp_xec_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
15,145
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_host_sub /** * @file * @brief Nuvoton NPCX host sub modules driver * * This file contains the drivers of NPCX Host Sub-Modules that serve as an * interface between the Host and Core domains. Please refer the block diagram. * * +------------+ * | Serial |---> TXD * +<--->| Port |<--- RXD * | | |<--> ... * | +------------+ * | +------------+ | * +------------+ |<--->| KBC & PM |<--->| * eSPI_CLK --->| eSPI Bus | | | Channels | | * eSPI_RST --->| Controller | | +------------+ | * eSPI_IO3-0 <-->| |<-->| +------------+ | * eSPI_CS --->| (eSPI mode)| | | Shared | | * eSPI_ALERT <-->| | |<--->| Memory |<--->| * +------------+ | +------------+ | * | +------------+ | * |<--->| MSWC |<--->| * | +------------+ | * | +------------+ | * | | Core | | * |<--->| to Host |<--->| * | | Access | | * | +------------+ | * HMIB | Core Bus * (Host Modules Internal Bus) +------------ * * * For most of them, the Host can configure these modules via eSPI(Peripheral * Channel)/LPC by accessing 'Configuration and Control register Set' which IO * base address is 0x4E as default. (The table below illustrates structure of * 'Configuration and Control Register Set') And the interrupts in core domain * help handling any events from host side. * * Index | Configuration and Control Register Set * --------|--------------------------------------------------+ Bank Select * 07h | Logical Device Number Register (LDN) |---------+ * --------|--------------------------------------------------- | * 20-2Fh | SuperI/O Configuration Registers | | * ------------------------------------------------------------ | * --------|---------------------------------------------------_ | * 30h | Logical Device Control Register | |_ | * --------|--------------------------------------------------- | |_ | * 60-63h | I/O Space Configuration Registers | | | | | * --------|--------------------------------------------------- | | | | * 70-71h | Interrupt Configuration Registers | | | | | * --------|--------------------------------------------------- | | | | * 73-74h | DMA Configuration Registers (No support in NPCX) | | | | | * --------|--------------------------------------------------- | | |<--+ * F0-FFh | Special Logical Device Configuration Registers | | | | * --------|--------------------------------------------------- | | | * |--------------------------------------------------- | | * |--------------------------------------------------- | * |--------------------------------------------------- * * * This driver introduces six host sub-modules. It includes: * * 1. Keyboard and Mouse Controller (KBC) interface. * Intel 8051SL-compatible Host interface * 8042 KBD standard interface (ports 60h, 64h) * Legacy IRQ: IRQ1 (KBD) and IRQ12 (mouse) support * Configured by two logical devices: Keyboard and Mouse (LDN 0x06/0x05) * * 2. Power Management (PM) channels. * PM channel registers * Command/Status register * Data register * channel 1: legacy 62h, 66h; channel 2: legacy 68h, 6Ch; * channel 3: legacy 6Ah, 6Eh; channel 4: legacy 6Bh, 6Fh; * PM interrupt using: * Serial IRQ * SMI * EC_SCI * Configured by four logical devices: PM1/2/3/4 (LDN 0x11/0x12/0x17/0x1E) * * 3. Shared Memory mechanism (SHM). * This module allows sharing of the on-chip RAM by both Core and the Host. * It also supports the following features: * Four Core/Host communication windows for direct RAM access * Eight Protection regions for each access window * Host IRQ and SMI generation * Port 80 debug support * Configured by one logical device: SHM (LDN 0x0F) * * 4. Core Access to Host Modules (C2H). * A interface to access module registers in host domain. * It enables the Core to access the registers in host domain (i.e., Host * Configuration, Serial Port, SHM, and MSWC), through HMIB. * * 5. Mobile System Wake-Up functions (MSWC). * It detects and handles wake-up events from various sources in the Host * modules and alerts the Core for better power consumption. * * 6. Serial Port (Legacy UART) * It provides UART functionality by supporting serial data communication * with a remote peripheral device or a modem. * * INCLUDE FILES: soc_host.h * */ #include <assert.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/sys/ring_buffer.h> #include <soc.h> #include "espi_utils.h" #include "soc_host.h" #include "soc_espi.h" #include "soc_miwu.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(host_sub_npcx, LOG_LEVEL_ERR); struct host_sub_npcx_config { /* host module instances */ struct mswc_reg *const inst_mswc; struct shm_reg *const inst_shm; struct c2h_reg *const inst_c2h; struct kbc_reg *const inst_kbc; struct pmch_reg *const inst_pm_acpi; struct pmch_reg *const inst_pm_hcmd; /* clock configuration */ const uint8_t clks_size; const struct npcx_clk_cfg *clks_list; /* mapping table between host access signals and wake-up input */ struct npcx_wui host_acc_wui; }; struct host_sub_npcx_data { sys_slist_t *callbacks; /* pointer on the espi callback list */ uint8_t plt_rst_asserted; /* current PLT_RST# status */ uint8_t espi_rst_level; /* current ESPI_RST# status */ const struct device *host_bus_dev; /* device for eSPI/LPC bus */ #ifdef CONFIG_ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE struct ring_buf port80_ring_buf; uint8_t port80_data[CONFIG_ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_RING_BUF_SIZE]; struct k_work work; #endif }; struct npcx_dp80_buf { union { uint16_t offset_code_16; uint8_t offset_code[2]; }; }; static const struct npcx_clk_cfg host_dev_clk_cfg[] = NPCX_DT_CLK_CFG_ITEMS_LIST(0); struct host_sub_npcx_config host_sub_cfg = { .inst_mswc = (struct mswc_reg *)DT_INST_REG_ADDR_BY_NAME(0, mswc), .inst_shm = (struct shm_reg *)DT_INST_REG_ADDR_BY_NAME(0, shm), .inst_c2h = (struct c2h_reg *)DT_INST_REG_ADDR_BY_NAME(0, c2h), .inst_kbc = (struct kbc_reg *)DT_INST_REG_ADDR_BY_NAME(0, kbc), .inst_pm_acpi = (struct pmch_reg *)DT_INST_REG_ADDR_BY_NAME(0, pm_acpi), .inst_pm_hcmd = (struct pmch_reg *)DT_INST_REG_ADDR_BY_NAME(0, pm_hcmd), .host_acc_wui = NPCX_DT_WUI_ITEM_BY_NAME(0, host_acc_wui), .clks_size = ARRAY_SIZE(host_dev_clk_cfg), .clks_list = host_dev_clk_cfg, }; struct host_sub_npcx_data host_sub_data; /* Application shouldn't touch these flags in KBC status register directly */ #define NPCX_KBC_STS_MASK (BIT(NPCX_HIKMST_IBF) | BIT(NPCX_HIKMST_OBF) \ | BIT(NPCX_HIKMST_A2)) /* IO base address of EC Logical Device Configuration */ #define NPCX_EC_CFG_IO_ADDR 0x4E /* Timeout to wait for Core-to-Host transaction to be completed. */ #define NPCX_C2H_TRANSACTION_TIMEOUT_US 200 /* Logical Device Number Assignments */ #define EC_CFG_LDN_SP 0x03 #define EC_CFG_LDN_MOUSE 0x05 #define EC_CFG_LDN_KBC 0x06 #define EC_CFG_LDN_SHM 0x0F #define EC_CFG_LDN_ACPI 0x11 /* PM Channel 1 */ #define EC_CFG_LDN_HCMD 0x12 /* PM Channel 2 */ /* Index of EC (4E/4F) Configuration Register */ #define EC_CFG_IDX_LDN 0x07 #define EC_CFG_IDX_CTRL 0x30 #define EC_CFG_IDX_DATA_IO_ADDR_H 0x60 #define EC_CFG_IDX_DATA_IO_ADDR_L 0x61 #define EC_CFG_IDX_CMD_IO_ADDR_H 0x62 #define EC_CFG_IDX_CMD_IO_ADDR_L 0x63 /* LDN Activation Enable */ #define EC_CFG_IDX_CTRL_LDN_ENABLE 0x01 /* Index of SuperI/O Control and Configuration Registers */ #define EC_CFG_IDX_SUPERIO_SIOCF9 0x29 #define EC_CFG_IDX_SUPERIO_SIOCF9_CKEN 2 /* Index of Special Logical Device Configuration (Shared Memory Module) */ #define EC_CFG_IDX_SHM_CFG 0xF1 #define EC_CFG_IDX_SHM_WND1_ADDR_0 0xF4 #define EC_CFG_IDX_SHM_WND1_ADDR_1 0xF5 #define EC_CFG_IDX_SHM_WND1_ADDR_2 0xF6 #define EC_CFG_IDX_SHM_WND1_ADDR_3 0xF7 #define EC_CFG_IDX_SHM_WND2_ADDR_0 0xF8 #define EC_CFG_IDX_SHM_WND2_ADDR_1 0xF9 #define EC_CFG_IDX_SHM_WND2_ADDR_2 0xFA #define EC_CFG_IDX_SHM_WND2_ADDR_3 0xFB #define EC_CFG_IDX_SHM_DP80_ADDR_RANGE 0xFD /* Index of Special Logical Device Configuration (Serial Port/Host UART) */ #define EC_CFG_IDX_SP_CFG 0xF0 /* Enable selection of bank 2 and 3 for the Serial Port */ #define EC_CFG_IDX_SP_CFG_BK_SL_ENABLE 7 /* Host sub-device local inline functions */ static inline uint8_t host_shd_mem_wnd_size_sl(uint32_t size) { /* The minimum supported shared memory region size is 8 bytes */ if (size <= 8U) { size = 8U; } /* The maximum supported shared memory region size is 4K bytes */ if (size >= 4096U) { size = 4096U; } /* * If window size is not a power-of-two, it is rounded-up to the next * power-of-two value, and return value corresponds to RWINx_SIZE field. */ return (32 - __builtin_clz(size - 1U)) & 0xff; } /* Host KBC sub-device local functions */ #if defined(CONFIG_ESPI_PERIPHERAL_8042_KBC) static void host_kbc_ibf_isr(const void *arg) { ARG_UNUSED(arg); struct kbc_reg *const inst_kbc = host_sub_cfg.inst_kbc; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_8042_KBC, ESPI_PERIPHERAL_NODATA }; struct espi_evt_data_kbc *kbc_evt = (struct espi_evt_data_kbc *)&evt.evt_data; /* KBC Input Buffer Full event */ kbc_evt->evt = HOST_KBC_EVT_IBF; /* The data in KBC Input Buffer */ kbc_evt->data = inst_kbc->HIKMDI; /* * Indicates if the host sent a command or data. * 0 = data * 1 = Command. */ kbc_evt->type = IS_BIT_SET(inst_kbc->HIKMST, NPCX_HIKMST_A2); LOG_DBG("%s: kbc data 0x%02x", __func__, evt.evt_data); espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); } static void host_kbc_obe_isr(const void *arg) { ARG_UNUSED(arg); struct kbc_reg *const inst_kbc = host_sub_cfg.inst_kbc; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_8042_KBC, ESPI_PERIPHERAL_NODATA }; struct espi_evt_data_kbc *kbc_evt = (struct espi_evt_data_kbc *)&evt.evt_data; /* Disable KBC OBE interrupt first */ inst_kbc->HICTRL &= ~BIT(NPCX_HICTRL_OBECIE); LOG_DBG("%s: kbc status 0x%02x", __func__, inst_kbc->HIKMST); /* * Notify application that host already read out data. The application * might need to clear status register via espi_api_lpc_write_request() * with E8042_CLEAR_FLAG opcode in callback. */ kbc_evt->evt = HOST_KBC_EVT_OBE; kbc_evt->data = 0; kbc_evt->type = 0; espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); } static void host_kbc_init(void) { struct kbc_reg *const inst_kbc = host_sub_cfg.inst_kbc; /* Make sure the previous OBF and IRQ has been sent out. */ k_busy_wait(4); /* Set FW_OBF to clear OBF flag in both STATUS and HIKMST to 0 */ inst_kbc->HICTRL |= BIT(NPCX_HICTRL_FW_OBF); /* Ensure there is no OBF set in this period. */ k_busy_wait(4); /* * Init KBC with: * 1. Enable Input Buffer Full (IBF) core interrupt for Keyboard/mouse. * 2. Enable Output Buffer Full Mouse(OBFM) SIRQ 12. * 3. Enable Output Buffer Full Keyboard (OBFK) SIRQ 1. */ inst_kbc->HICTRL = BIT(NPCX_HICTRL_IBFCIE) | BIT(NPCX_HICTRL_OBFMIE) | BIT(NPCX_HICTRL_OBFKIE); /* Configure SIRQ 1/12 type (level + high) */ inst_kbc->HIIRQC = 0x00; } #endif /* Host ACPI sub-device local functions */ #if defined(CONFIG_ESPI_PERIPHERAL_HOST_IO) static void host_acpi_process_input_data(uint8_t data) { struct pmch_reg *const inst_acpi = host_sub_cfg.inst_pm_acpi; struct espi_event evt = { .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION, .evt_details = ESPI_PERIPHERAL_HOST_IO, .evt_data = ESPI_PERIPHERAL_NODATA }; struct espi_evt_data_acpi *acpi_evt = (struct espi_evt_data_acpi *)&evt.evt_data; LOG_DBG("%s: acpi data 0x%02x", __func__, data); /* * Indicates if the host sent a command or data. * 0 = data * 1 = Command. */ acpi_evt->type = IS_BIT_SET(inst_acpi->HIPMST, NPCX_HIPMST_CMD); acpi_evt->data = data; espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); } static void host_acpi_init(void) { struct pmch_reg *const inst_acpi = host_sub_cfg.inst_pm_acpi; /* Use SMI/SCI postive polarity by default */ inst_acpi->HIPMCTL &= ~BIT(NPCX_HIPMCTL_SCIPOL); inst_acpi->HIPMIC &= ~BIT(NPCX_HIPMIC_SMIPOL); /* Set SMIB/SCIB bits to make sure SMI#/SCI# are driven to high */ inst_acpi->HIPMIC |= BIT(NPCX_HIPMIC_SMIB) | BIT(NPCX_HIPMIC_SCIB); /* * Allow SMI#/SCI# generated from PM module. * On eSPI bus, we suggest set VW value of SCI#/SMI# directly. */ inst_acpi->HIPMIE |= BIT(NPCX_HIPMIE_SCIE); inst_acpi->HIPMIE |= BIT(NPCX_HIPMIE_SMIE); /* * Init ACPI PM channel (Host IO) with: * 1. Enable Input-Buffer Full (IBF) core interrupt. * 2. BIT 7 must be 1. */ inst_acpi->HIPMCTL |= BIT(7) | BIT(NPCX_HIPMCTL_IBFIE); } #endif #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) /* Host command argument and memory map buffers */ static uint8_t shm_host_cmd[CONFIG_ESPI_NPCX_PERIPHERAL_HOST_CMD_PARAM_SIZE] __aligned(8); /* Host command sub-device local functions */ static void host_hcmd_process_input_data(uint8_t data) { struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_EC_HOST_CMD, ESPI_PERIPHERAL_NODATA }; evt.evt_data = data; LOG_DBG("%s: host cmd data 0x%02x", __func__, evt.evt_data); espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); } static void host_hcmd_init(void) { struct pmch_reg *const inst_hcmd = host_sub_cfg.inst_pm_hcmd; struct shm_reg *const inst_shm = host_sub_cfg.inst_shm; uint32_t win_size = CONFIG_ESPI_NPCX_PERIPHERAL_HOST_CMD_PARAM_SIZE; /* Don't stall SHM transactions */ inst_shm->SHM_CTL &= ~0x40; /* Disable Window 1 protection */ inst_shm->WIN1_WR_PROT = 0; inst_shm->WIN1_RD_PROT = 0; /* Configure Win1 size for ec host command. */ SET_FIELD(inst_shm->WIN_SIZE, NPCX_WIN_SIZE_RWIN1_SIZE_FIELD, host_shd_mem_wnd_size_sl(win_size)); inst_shm->WIN_BASE1 = (uint32_t)shm_host_cmd; /* * Clear processing flag before enabling host's interrupts in case * it's set by the other command during sysjump. */ inst_hcmd->HIPMST &= ~BIT(NPCX_HIPMST_F0); /* * Init Host Command PM channel (Host IO) with: * 1. Enable Input-Buffer Full (IBF) core interrupt. * 2. BIT 7 must be 1. */ inst_hcmd->HIPMCTL |= BIT(7) | BIT(NPCX_HIPMCTL_IBFIE); } #endif #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) /* Host command argument and memory map buffers */ static uint8_t shm_acpi_mmap[CONFIG_ESPI_NPCX_PERIPHERAL_ACPI_SHD_MEM_SIZE] __aligned(8); static void host_shared_mem_region_init(void) { struct shm_reg *const inst_shm = host_sub_cfg.inst_shm; uint32_t win_size = CONFIG_ESPI_NPCX_PERIPHERAL_ACPI_SHD_MEM_SIZE; /* Don't stall SHM transactions */ inst_shm->SHM_CTL &= ~0x40; /* Disable Window 2 protection */ inst_shm->WIN2_WR_PROT = 0; inst_shm->WIN2_RD_PROT = 0; /* Configure Win2 size for ACPI shared mem region. */ SET_FIELD(inst_shm->WIN_SIZE, NPCX_WIN_SIZE_RWIN2_SIZE_FIELD, host_shd_mem_wnd_size_sl(win_size)); inst_shm->WIN_BASE2 = (uint32_t)shm_acpi_mmap; /* Enable write protect of Share memory window 2 */ inst_shm->WIN2_WR_PROT = 0xFF; /* * TODO: Initialize shm_acpi_mmap buffer for host command flags. We * might use EACPI_GET_SHARED_MEMORY in espi_api_lpc_read_request() * instead of setting host command flags here directly. */ } #endif #if defined(CONFIG_ESPI_PERIPHERAL_HOST_IO) || \ defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) /* Host pm (host io) sub-module isr function for all channels such as ACPI. */ static void host_pmch_ibf_isr(const void *arg) { ARG_UNUSED(arg); struct pmch_reg *const inst_acpi = host_sub_cfg.inst_pm_acpi; struct pmch_reg *const inst_hcmd = host_sub_cfg.inst_pm_hcmd; uint8_t in_data; /* Host put data on input buffer of ACPI channel */ if (IS_BIT_SET(inst_acpi->HIPMST, NPCX_HIPMST_IBF)) { /* Set processing flag before reading command byte */ inst_acpi->HIPMST |= BIT(NPCX_HIPMST_F0); /* Read out input data and clear IBF pending bit */ in_data = inst_acpi->HIPMDI; #if defined(CONFIG_ESPI_PERIPHERAL_HOST_IO) host_acpi_process_input_data(in_data); #endif } /* Host put data on input buffer of HOSTCMD channel */ if (IS_BIT_SET(inst_hcmd->HIPMST, NPCX_HIPMST_IBF)) { /* Set processing flag before reading command byte */ inst_hcmd->HIPMST |= BIT(NPCX_HIPMST_F0); /* Read out input data and clear IBF pending bit */ in_data = inst_hcmd->HIPMDI; #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) host_hcmd_process_input_data(in_data); #endif } } #endif /* Host port80 sub-device local functions */ #if defined(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80) #if defined(CONFIG_ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE) static void host_port80_work_handler(struct k_work *item) { uint32_t code = 0; struct host_sub_npcx_data *data = CONTAINER_OF(item, struct host_sub_npcx_data, work); struct ring_buf *rbuf = &data->port80_ring_buf; struct espi_event evt = {ESPI_BUS_PERIPHERAL_NOTIFICATION, (ESPI_PERIPHERAL_INDEX_0 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80, ESPI_PERIPHERAL_NODATA}; while (!ring_buf_is_empty(rbuf)) { struct npcx_dp80_buf dp80_buf; uint8_t offset; ring_buf_get(rbuf, &dp80_buf.offset_code[0], sizeof(dp80_buf.offset_code)); offset = dp80_buf.offset_code[1]; code |= dp80_buf.offset_code[0] << (8 * offset); if (ring_buf_is_empty(rbuf)) { evt.evt_data = code; espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); break; } /* peek the offset of the next byte */ ring_buf_peek(rbuf, &dp80_buf.offset_code[0], sizeof(dp80_buf.offset_code)); offset = dp80_buf.offset_code[1]; /* * If the peeked next byte's offset is 0, it is the start of the new code. * Pass the current code to the application layer to handle the Port80 code. */ if (offset == 0) { evt.evt_data = code; espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); code = 0; } } } #endif static void host_port80_isr(const void *arg) { ARG_UNUSED(arg); struct shm_reg *const inst_shm = host_sub_cfg.inst_shm; uint8_t status = inst_shm->DP80STS; #ifdef CONFIG_ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE struct ring_buf *rbuf = &host_sub_data.port80_ring_buf; while (IS_BIT_SET(inst_shm->DP80STS, NPCX_DP80STS_FNE)) { struct npcx_dp80_buf dp80_buf; dp80_buf.offset_code_16 = inst_shm->DP80BUF; ring_buf_put(rbuf, &dp80_buf.offset_code[0], sizeof(dp80_buf.offset_code)); } k_work_submit(&host_sub_data.work); #else struct espi_event evt = {ESPI_BUS_PERIPHERAL_NOTIFICATION, (ESPI_PERIPHERAL_INDEX_0 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80, ESPI_PERIPHERAL_NODATA}; /* Read out port80 data continuously if FIFO is not empty */ while (IS_BIT_SET(inst_shm->DP80STS, NPCX_DP80STS_FNE)) { LOG_DBG("p80: %04x", inst_shm->DP80BUF); evt.evt_data = inst_shm->DP80BUF; espi_send_callbacks(host_sub_data.callbacks, host_sub_data.host_bus_dev, evt); } #endif LOG_DBG("%s: p80 status 0x%02X", __func__, status); /* If FIFO is overflow, show error msg */ if (IS_BIT_SET(status, NPCX_DP80STS_FOR)) { inst_shm->DP80STS |= BIT(NPCX_DP80STS_FOR); LOG_DBG("Port80 FIFO Overflow!"); } /* If there are pending post codes remains in FIFO after processing and sending previous * post codes, do not clear the FNE bit. This allows this handler to be called again * immediately after it exists. */ if (!IS_BIT_SET(inst_shm->DP80STS, NPCX_DP80STS_FNE)) { /* Clear all pending bit indicates that FIFO was written by host */ inst_shm->DP80STS |= BIT(NPCX_DP80STS_FWR); } } static void host_port80_init(void) { struct shm_reg *const inst_shm = host_sub_cfg.inst_shm; /* * Init PORT80 which includes: * Enables a Core interrupt on every Host write to the FIFO, * SYNC mode (It must be 1 in eSPI mode), Read Auto Advance mode, and * Port80 module itself. */ inst_shm->DP80CTL = BIT(NPCX_DP80CTL_CIEN) | BIT(NPCX_DP80CTL_RAA) | BIT(NPCX_DP80CTL_DP80EN) | BIT(NPCX_DP80CTL_SYNCEN); } #endif #if defined(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE) static void host_cus_opcode_enable_interrupts(void) { /* Enable host KBC sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) { irq_enable(DT_INST_IRQ_BY_NAME(0, kbc_ibf, irq)); irq_enable(DT_INST_IRQ_BY_NAME(0, kbc_obe, irq)); } /* Enable host PM channel (Host IO) sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO) || IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) { irq_enable(DT_INST_IRQ_BY_NAME(0, pmch_ibf, irq)); } /* Enable host Port80 sub-device interrupt installation */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80)) { irq_enable(DT_INST_IRQ_BY_NAME(0, p80_fifo, irq)); } /* Enable host interface interrupts if its interface is eSPI */ if (IS_ENABLED(CONFIG_ESPI)) { npcx_espi_enable_interrupts(host_sub_data.host_bus_dev); } } static void host_cus_opcode_disable_interrupts(void) { /* Disable host KBC sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) { irq_disable(DT_INST_IRQ_BY_NAME(0, kbc_ibf, irq)); irq_disable(DT_INST_IRQ_BY_NAME(0, kbc_obe, irq)); } /* Disable host PM channel (Host IO) sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO) || IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) { irq_disable(DT_INST_IRQ_BY_NAME(0, pmch_ibf, irq)); } /* Disable host Port80 sub-device interrupt installation */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80)) { irq_disable(DT_INST_IRQ_BY_NAME(0, p80_fifo, irq)); } /* Disable host interface interrupts if its interface is eSPI */ if (IS_ENABLED(CONFIG_ESPI)) { npcx_espi_disable_interrupts(host_sub_data.host_bus_dev); } } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ #if defined(CONFIG_ESPI_PERIPHERAL_UART) /* host uart pinmux configuration */ PINCTRL_DT_DEFINE(DT_INST(0, nuvoton_npcx_host_uart)); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(nuvoton_npcx_host_uart) == 1, "only one 'nuvoton_npcx_host_uart' compatible node may be present"); const struct pinctrl_dev_config *huart_cfg = PINCTRL_DT_DEV_CONFIG_GET(DT_INST(0, nuvoton_npcx_host_uart)); /* Host UART sub-device local functions */ void host_uart_init(void) { struct c2h_reg *const inst_c2h = host_sub_cfg.inst_c2h; /* Configure pin-mux for serial port device */ pinctrl_apply_state(huart_cfg, PINCTRL_STATE_DEFAULT); /* Make sure unlock host access of serial port */ inst_c2h->LKSIOHA &= ~BIT(NPCX_LKSIOHA_LKSPHA); /* Clear 'Host lock violation occurred' bit of serial port initially */ inst_c2h->SIOLV |= BIT(NPCX_SIOLV_SPLV); } #endif /* host core-to-host interface local functions */ static void host_c2h_wait_write_done(void) { struct c2h_reg *const inst_c2h = host_sub_cfg.inst_c2h; uint32_t elapsed_cycles; uint32_t start_cycles = k_cycle_get_32(); uint32_t max_wait_cycles = k_us_to_cyc_ceil32(NPCX_C2H_TRANSACTION_TIMEOUT_US); while (IS_BIT_SET(inst_c2h->SIBCTRL, NPCX_SIBCTRL_CSWR)) { elapsed_cycles = k_cycle_get_32() - start_cycles; if (elapsed_cycles > max_wait_cycles) { LOG_ERR("c2h write transaction expired!"); break; } } } static void host_c2h_wait_read_done(void) { struct c2h_reg *const inst_c2h = host_sub_cfg.inst_c2h; uint32_t elapsed_cycles; uint32_t start_cycles = k_cycle_get_32(); uint32_t max_wait_cycles = k_us_to_cyc_ceil32(NPCX_C2H_TRANSACTION_TIMEOUT_US); while (IS_BIT_SET(inst_c2h->SIBCTRL, NPCX_SIBCTRL_CSRD)) { elapsed_cycles = k_cycle_get_32() - start_cycles; if (elapsed_cycles > max_wait_cycles) { LOG_ERR("c2h read transaction expired!"); break; } } } void host_c2h_write_io_cfg_reg(uint8_t reg_index, uint8_t reg_data) { struct c2h_reg *const inst_c2h = host_sub_cfg.inst_c2h; /* Disable interrupts */ unsigned int key = irq_lock(); /* Lock host access EC configuration registers (0x4E/0x4F) */ inst_c2h->LKSIOHA |= BIT(NPCX_LKSIOHA_LKCFG); /* Enable Core-to-Host access CFG module */ inst_c2h->CRSMAE |= BIT(NPCX_CRSMAE_CFGAE); /* Verify core-to-host modules is not in progress */ host_c2h_wait_read_done(); host_c2h_wait_write_done(); /* * Specifying the in-direct IO address which A0 = 0 indicates the index * register is accessed. Then write index address directly and it starts * a write transaction to host sub-module on LPC/eSPI bus. */ inst_c2h->IHIOA = NPCX_EC_CFG_IO_ADDR; inst_c2h->IHD = reg_index; host_c2h_wait_write_done(); /* * Specifying the in-direct IO address which A0 = 1 indicates the data * register is accessed. Then write data directly and it starts a write * transaction to host sub-module on LPC/eSPI bus. */ inst_c2h->IHIOA = NPCX_EC_CFG_IO_ADDR + 1; inst_c2h->IHD = reg_data; host_c2h_wait_write_done(); /* Disable Core-to-Host access CFG module */ inst_c2h->CRSMAE &= ~BIT(NPCX_CRSMAE_CFGAE); /* Unlock host access EC configuration registers (0x4E/0x4F) */ inst_c2h->LKSIOHA &= ~BIT(NPCX_LKSIOHA_LKCFG); /* Enable interrupts */ irq_unlock(key); } uint8_t host_c2h_read_io_cfg_reg(uint8_t reg_index) { struct c2h_reg *const inst_c2h = host_sub_cfg.inst_c2h; uint8_t data_val; /* Disable interrupts */ unsigned int key = irq_lock(); /* Lock host access EC configuration registers (0x4E/0x4F) */ inst_c2h->LKSIOHA |= BIT(NPCX_LKSIOHA_LKCFG); /* Enable Core-to-Host access CFG module */ inst_c2h->CRSMAE |= BIT(NPCX_CRSMAE_CFGAE); /* Verify core-to-host modules is not in progress */ host_c2h_wait_read_done(); host_c2h_wait_write_done(); /* * Specifying the in-direct IO address which A0 = 0 indicates the index * register is accessed. Then write index address directly and it starts * a write transaction to host sub-module on LPC/eSPI bus. */ inst_c2h->IHIOA = NPCX_EC_CFG_IO_ADDR; inst_c2h->IHD = reg_index; host_c2h_wait_write_done(); /* * Specifying the in-direct IO address which A0 = 1 indicates the data * register is accessed. Then write CSRD bit in SIBCTRL to issue a read * transaction to host sub-module on LPC/eSPI bus. Once it was done, * read data out from IHD. */ inst_c2h->IHIOA = NPCX_EC_CFG_IO_ADDR + 1; inst_c2h->SIBCTRL |= BIT(NPCX_SIBCTRL_CSRD); host_c2h_wait_read_done(); data_val = inst_c2h->IHD; /* Disable Core-to-Host access CFG module */ inst_c2h->CRSMAE &= ~BIT(NPCX_CRSMAE_CFGAE); /* Unlock host access EC configuration registers (0x4E/0x4F) */ inst_c2h->LKSIOHA &= ~BIT(NPCX_LKSIOHA_LKCFG); /* Enable interrupts */ irq_unlock(key); return data_val; } /* Platform specific host sub modules functions */ int npcx_host_periph_read_request(enum lpc_peripheral_opcode op, uint32_t *data) { if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { struct kbc_reg *const inst_kbc = host_sub_cfg.inst_kbc; /* Make sure kbc 8042 is on */ if (!IS_BIT_SET(inst_kbc->HICTRL, NPCX_HICTRL_OBFKIE) || !IS_BIT_SET(inst_kbc->HICTRL, NPCX_HICTRL_OBFMIE)) { return -ENOTSUP; } switch (op) { case E8042_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = IS_BIT_SET(inst_kbc->HIKMST, NPCX_HIKMST_OBF); break; case E8042_IBF_HAS_CHAR: *data = IS_BIT_SET(inst_kbc->HIKMST, NPCX_HIKMST_IBF); break; case E8042_READ_KB_STS: *data = inst_kbc->HIKMST; break; default: return -EINVAL; } } else if (op >= EACPI_START_OPCODE && op <= EACPI_MAX_OPCODE) { struct pmch_reg *const inst_acpi = host_sub_cfg.inst_pm_acpi; /* Make sure pm channel for apci is on */ if (!IS_BIT_SET(inst_acpi->HIPMCTL, NPCX_HIPMCTL_IBFIE)) { return -ENOTSUP; } switch (op) { case EACPI_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = IS_BIT_SET(inst_acpi->HIPMST, NPCX_HIPMST_OBF); break; case EACPI_IBF_HAS_CHAR: *data = IS_BIT_SET(inst_acpi->HIPMST, NPCX_HIPMST_IBF); break; case EACPI_READ_STS: *data = inst_acpi->HIPMST; break; #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) case EACPI_GET_SHARED_MEMORY: *data = (uint32_t)shm_acpi_mmap; break; #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */ default: return -EINVAL; } } #if defined(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE) else if (op >= ECUSTOM_START_OPCODE && op <= ECUSTOM_MAX_OPCODE) { /* Other customized op codes */ switch (op) { case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY: *data = (uint32_t)shm_host_cmd; break; case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY_SIZE: *data = CONFIG_ESPI_NPCX_PERIPHERAL_HOST_CMD_PARAM_SIZE; break; default: return -EINVAL; } } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ else { return -ENOTSUP; } return 0; } int npcx_host_periph_write_request(enum lpc_peripheral_opcode op, const uint32_t *data) { volatile uint32_t __attribute__((unused)) dummy; struct kbc_reg *const inst_kbc = host_sub_cfg.inst_kbc; if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { /* Make sure kbc 8042 is on */ if (!IS_BIT_SET(inst_kbc->HICTRL, NPCX_HICTRL_OBFKIE) || !IS_BIT_SET(inst_kbc->HICTRL, NPCX_HICTRL_OBFMIE)) { return -ENOTSUP; } if (data) { LOG_INF("%s: op 0x%x data %x", __func__, op, *data); } else { LOG_INF("%s: op 0x%x only", __func__, op); } switch (op) { case E8042_WRITE_KB_CHAR: inst_kbc->HIKDO = *data & 0xff; /* * Enable KBC OBE interrupt after putting data in * keyboard data register. */ inst_kbc->HICTRL |= BIT(NPCX_HICTRL_OBECIE); break; case E8042_WRITE_MB_CHAR: inst_kbc->HIMDO = *data & 0xff; /* * Enable KBC OBE interrupt after putting data in * mouse data register. */ inst_kbc->HICTRL |= BIT(NPCX_HICTRL_OBECIE); break; case E8042_RESUME_IRQ: /* Enable KBC IBF interrupt */ inst_kbc->HICTRL |= BIT(NPCX_HICTRL_IBFCIE); break; case E8042_PAUSE_IRQ: /* Disable KBC IBF interrupt */ inst_kbc->HICTRL &= ~BIT(NPCX_HICTRL_IBFCIE); break; case E8042_CLEAR_OBF: /* Clear OBF flag in both STATUS and HIKMST to 0 */ inst_kbc->HICTRL |= BIT(NPCX_HICTRL_FW_OBF); break; case E8042_SET_FLAG: /* FW shouldn't modify these flags directly */ inst_kbc->HIKMST |= *data & ~NPCX_KBC_STS_MASK; break; case E8042_CLEAR_FLAG: /* FW shouldn't modify these flags directly */ inst_kbc->HIKMST &= ~(*data | NPCX_KBC_STS_MASK); break; default: return -EINVAL; } } else if (op >= EACPI_START_OPCODE && op <= EACPI_MAX_OPCODE) { struct pmch_reg *const inst_acpi = host_sub_cfg.inst_pm_acpi; /* Make sure pm channel for apci is on */ if (!IS_BIT_SET(inst_acpi->HIPMCTL, NPCX_HIPMCTL_IBFIE)) { return -ENOTSUP; } switch (op) { case EACPI_WRITE_CHAR: inst_acpi->HIPMDO = (*data & 0xff); break; case EACPI_WRITE_STS: inst_acpi->HIPMST = (*data & 0xff); break; default: return -EINVAL; } } #if defined(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE) else if (op >= ECUSTOM_START_OPCODE && op <= ECUSTOM_MAX_OPCODE) { /* Other customized op codes */ struct pmch_reg *const inst_hcmd = host_sub_cfg.inst_pm_hcmd; switch (op) { case ECUSTOM_HOST_SUBS_INTERRUPT_EN: if (*data != 0) { host_cus_opcode_enable_interrupts(); } else { host_cus_opcode_disable_interrupts(); } break; case ECUSTOM_HOST_CMD_SEND_RESULT: /* * Write result to the data byte. This sets the TOH * status bit. */ inst_hcmd->HIPMDO = (*data & 0xff); /* Clear processing flag */ inst_hcmd->HIPMST &= ~BIT(NPCX_HIPMST_F0); break; default: return -EINVAL; } } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ else { return -ENOTSUP; } return 0; } void npcx_host_init_subs_host_domain(void) { struct c2h_reg *const inst_c2h = host_sub_cfg.inst_c2h; /* Enable Core-to-Host access module */ inst_c2h->SIBCTRL |= BIT(NPCX_SIBCTRL_CSAE); if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) { /* * Select Keyboard/Mouse banks which LDN are 0x06/05 and enable * modules by setting bit 0 in its Control (index is 0x30) reg. */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_LDN, EC_CFG_LDN_KBC); host_c2h_write_io_cfg_reg(EC_CFG_IDX_CTRL, EC_CFG_IDX_CTRL_LDN_ENABLE); host_c2h_write_io_cfg_reg(EC_CFG_IDX_LDN, EC_CFG_LDN_MOUSE); host_c2h_write_io_cfg_reg(EC_CFG_IDX_CTRL, EC_CFG_IDX_CTRL_LDN_ENABLE); } if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO)) { /* * Select ACPI bank which LDN are 0x11 (PM Channel 1) and enable * module by setting bit 0 in its Control (index is 0x30) reg. */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_LDN, EC_CFG_LDN_ACPI); host_c2h_write_io_cfg_reg(EC_CFG_IDX_CTRL, EC_CFG_IDX_CTRL_LDN_ENABLE); } if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || IS_ENABLED(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)) { /* Select 'Host Command' bank which LDN are 0x12 (PM chan 2) */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_LDN, EC_CFG_LDN_HCMD); #if defined(CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM) /* Configure IO address of CMD portt (default: 0x200) */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_DATA_IO_ADDR_H, (CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM >> 8) & 0xff); host_c2h_write_io_cfg_reg(EC_CFG_IDX_DATA_IO_ADDR_L, CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM & 0xff); /* Configure IO address of Data portt (default: 0x204) */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_CMD_IO_ADDR_H, ((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM + 4) >> 8) & 0xff); host_c2h_write_io_cfg_reg(EC_CFG_IDX_CMD_IO_ADDR_L, (CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM + 4) & 0xff); #endif /* Enable 'Host Command' io port (PM Channel 2) */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_CTRL, EC_CFG_IDX_CTRL_LDN_ENABLE); /* Select 'Shared Memory' bank which LDN are 0x0F */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_LDN, EC_CFG_LDN_SHM); /* WIN 1 & 2 mapping to IO space */ host_c2h_write_io_cfg_reg(0xF1, host_c2h_read_io_cfg_reg(0xF1) | 0x30); /* WIN1 as Host Command on the IO address (default: 0x0800) */ #if defined(CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM) host_c2h_write_io_cfg_reg(EC_CFG_IDX_SHM_WND1_ADDR_1, (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM >> 8) & 0xff); host_c2h_write_io_cfg_reg(EC_CFG_IDX_SHM_WND1_ADDR_0, CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM & 0xff); #endif /* Set WIN2 as MEMMAP on the configured IO address */ #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM) host_c2h_write_io_cfg_reg(EC_CFG_IDX_SHM_WND2_ADDR_1, (CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM >> 8) & 0xff); host_c2h_write_io_cfg_reg(EC_CFG_IDX_SHM_WND2_ADDR_0, CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM & 0xff); #endif if (IS_ENABLED(CONFIG_ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE)) { host_c2h_write_io_cfg_reg(EC_CFG_IDX_SHM_DP80_ADDR_RANGE, 0x0f); } /* Enable SHM direct memory access */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_CTRL, EC_CFG_IDX_CTRL_LDN_ENABLE); } if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_UART)) { /* Select Serial Port banks which LDN are 0x03. */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_LDN, EC_CFG_LDN_SP); /* Enable SIO_CLK */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_SUPERIO_SIOCF9, host_c2h_read_io_cfg_reg(EC_CFG_IDX_SUPERIO_SIOCF9) | BIT(EC_CFG_IDX_SUPERIO_SIOCF9_CKEN)); /* Enable Bank Select */ host_c2h_write_io_cfg_reg(EC_CFG_IDX_SP_CFG, host_c2h_read_io_cfg_reg(EC_CFG_IDX_SP_CFG) | BIT(EC_CFG_IDX_SP_CFG_BK_SL_ENABLE)); host_c2h_write_io_cfg_reg(EC_CFG_IDX_CTRL, EC_CFG_IDX_CTRL_LDN_ENABLE); } LOG_DBG("Hos sub-modules configurations are done!"); } void npcx_host_enable_access_interrupt(void) { npcx_miwu_irq_get_and_clear_pending(&host_sub_cfg.host_acc_wui); npcx_miwu_irq_enable(&host_sub_cfg.host_acc_wui); } void npcx_host_disable_access_interrupt(void) { npcx_miwu_irq_disable(&host_sub_cfg.host_acc_wui); } int npcx_host_init_subs_core_domain(const struct device *host_bus_dev, sys_slist_t *callbacks) { struct mswc_reg *const inst_mswc = host_sub_cfg.inst_mswc; struct shm_reg *const inst_shm = host_sub_cfg.inst_shm; const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); int i; uint8_t shm_sts; host_sub_data.callbacks = callbacks; host_sub_data.host_bus_dev = host_bus_dev; /* Turn on all host necessary sub-module clocks first */ for (i = 0; i < host_sub_cfg.clks_size; i++) { int ret; if (!device_is_ready(clk_dev)) { return -ENODEV; } ret = clock_control_on(clk_dev, (clock_control_subsys_t) &host_sub_cfg.clks_list[i]); if (ret < 0) { return ret; } } /* Configure EC legacy configuration IO base address to 0x4E. */ if (!IS_BIT_SET(inst_mswc->MSWCTL1, NPCX_MSWCTL1_VHCFGA)) { inst_mswc->HCBAL = NPCX_EC_CFG_IO_ADDR; inst_mswc->HCBAH = 0x0; } /* * Set HOSTWAIT bit and avoid the other settings, then host can freely * communicate with slave (EC). */ inst_shm->SMC_CTL &= BIT(NPCX_SMC_CTL_HOSTWAIT); /* Clear shared memory status */ shm_sts = inst_shm->SMC_STS; inst_shm->SMC_STS = shm_sts; /* host sub-module initialization in core domain */ #if defined(CONFIG_ESPI_PERIPHERAL_8042_KBC) host_kbc_init(); #endif #if defined(CONFIG_ESPI_PERIPHERAL_HOST_IO) host_acpi_init(); #endif #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) host_hcmd_init(); #endif #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) host_shared_mem_region_init(); #endif #if defined(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80) host_port80_init(); #if defined(CONFIG_ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE) ring_buf_init(&host_sub_data.port80_ring_buf, sizeof(host_sub_data.port80_data), host_sub_data.port80_data); k_work_init(&host_sub_data.work, host_port80_work_handler); #endif #endif #if defined(CONFIG_ESPI_PERIPHERAL_UART) host_uart_init(); #endif /* Host KBC sub-device interrupt installation */ #if defined(CONFIG_ESPI_PERIPHERAL_8042_KBC) IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, kbc_ibf, irq), DT_INST_IRQ_BY_NAME(0, kbc_ibf, priority), host_kbc_ibf_isr, NULL, 0); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, kbc_obe, irq), DT_INST_IRQ_BY_NAME(0, kbc_obe, priority), host_kbc_obe_isr, NULL, 0); #endif /* Host PM channel (Host IO) sub-device interrupt installation */ #if defined(CONFIG_ESPI_PERIPHERAL_HOST_IO) || \ defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, pmch_ibf, irq), DT_INST_IRQ_BY_NAME(0, pmch_ibf, priority), host_pmch_ibf_isr, NULL, 0); #endif /* Host Port80 sub-device interrupt installation */ #if defined(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80) IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, p80_fifo, irq), DT_INST_IRQ_BY_NAME(0, p80_fifo, priority), host_port80_isr, NULL, 0); #endif if (IS_ENABLED(CONFIG_PM)) { /* * Configure the host access wake-up event triggered from a host * transaction on eSPI/LPC bus. Do not enable it here. Or plenty * of interrupts will jam the system in S0. */ npcx_miwu_interrupt_configure(&host_sub_cfg.host_acc_wui, NPCX_MIWU_MODE_EDGE, NPCX_MIWU_TRIG_HIGH); } return 0; } ```
/content/code_sandbox/drivers/espi/host_subs_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,950
```unknown # Microchip XEC ESPI configuration options config ESPI_XEC bool "XEC Microchip ESPI driver" default y depends on DT_HAS_MICROCHIP_XEC_ESPI_ENABLED help Enable the Microchip XEC ESPI driver for MEC15xx family. config ESPI_XEC_V2 bool "XEC Microchip ESPI V2 driver" default y depends on DT_HAS_MICROCHIP_XEC_ESPI_V2_ENABLED help Enable the Microchip XEC ESPI driver for MEC172x series. if ESPI_XEC || ESPI_XEC_V2 config ESPI_OOB_CHANNEL default y config ESPI_FLASH_CHANNEL default y config ESPI_PERIPHERAL_HOST_IO default y config ESPI_PERIPHERAL_HOST_IO_PVT default y config ESPI_PERIPHERAL_DEBUG_PORT_80 default y config ESPI_PERIPHERAL_UART default y config ESPI_PERIPHERAL_UART_SOC_MAPPING int "SoC port exposed as logical eSPI UART" default 2 if SOC_SERIES_MEC15XX default 1 if SOC_SERIES_MEC172X depends on ESPI_PERIPHERAL_UART help This tells the driver to which SoC UART to direct the UART traffic send over eSPI from host. config ESPI_OOB_BUFFER_SIZE int "eSPI OOB channel buffer size in bytes" default 128 depends on ESPI_OOB_CHANNEL help Use minimum RAM buffer size by default but allow applications to override the value. Maximum OOB payload is 73 bytes. config ESPI_FLASH_BUFFER_SIZE int "eSPI Flash channel buffer size in bytes" default 256 depends on ESPI_FLASH_CHANNEL help Use maximum RAM buffer size defined by spec but allow applications to override if eSPI host doesn't support it. config ESPI_TAF_XEC bool "XEC Microchip ESPI TAF driver" default y depends on SOC_SERIES_MEC15XX depends on DT_HAS_MICROCHIP_XEC_ESPI_SAF_ENABLED help Enable the Microchip XEC TAF ESPI driver for MEC15xx family. config ESPI_TAF_XEC_V2 bool "XEC Microchip ESPI TAF V2 driver" default y depends on SOC_SERIES_MEC172X depends on DT_HAS_MICROCHIP_XEC_ESPI_SAF_V2_ENABLED help Enable the Microchip XEC TAF ESPI driver for MEC172x series. endif #ESPI_XEC if ESPI_XEC_V2 config ESPI_XEC_PERIPHERAL_ACPI_SHD_MEM_SIZE int "Host I/O peripheral port size for shared memory in MEC172X series" depends on ESPI_XEC_V2 || ESPI_PERIPHERAL_ACPI_SHM_REGION default 256 help This is the port size used by the Host and EC to communicate over the shared memory region to return the ACPI response data. config ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE int "Host I/O peripheral port size for ec host command in MEC172X series" depends on ESPI_XEC_V2 || ESPI_PERIPHERAL_EC_HOST_CMD default 256 help This is the port size used by the Host and EC to communicate over the shared memory region to return the host command parameter data. config ESPI_PERIPHERAL_8042_KBC default y if ESPI_PERIPHERAL_CHANNEL config ESPI_PERIPHERAL_XEC_MAILBOX bool "SoC Mailbox over eSPI" help Enable a 32 byte mailbox interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_ACPI_EC2 bool "SoC ACPI EC 2 over eSPI" help Enable ACPI EC2 interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_ACPI_EC3 bool "SoC ACPI EC 3 over eSPI" help Enable ACPI EC3 interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_ACPI_EC4 bool "SoC ACPI EC 4 over eSPI" help Enable ACPI EC4 interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_ACPI_PM1 bool "SoC ACPI PM1 over eSPI" help Enable ACPI PM1 interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_EMI0 bool "SoC EMI 0 over eSPI" help Enable EMI 0 interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_EMI1 bool "SoC EMI 1 over eSPI" help Enable EMI 1 interface accessible via Host I/O over the ESPI Peripheral Channel. config ESPI_PERIPHERAL_XEC_EMI2 bool "SoC EMI 2 over eSPI" help Enable EMI 2 interface accessible via Host I/O over the ESPI Peripheral Channel. endif #ESPI_PERIPHERAL_CHANNEL config ESPI_TAF bool "XEC Microchip ESPI TAF driver" depends on ESPI_FLASH_CHANNEL help Enable Target Attached Flash eSPI driver. TAF depends upon ESPI XEC driver and flash channel. config ESPI_TAF_INIT_PRIORITY int "ESPI TAF driver initialization priority" depends on ESPI_TAF default 4 help Driver initialization priority for ESPI TAF driver. config ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA bool "Read ACPI EC Event Data in IBF ISR" depends on ESPI_PERIPHERAL_CHANNEL help Enable reading event data in ACPI EC IBF ISR. This is used in OS environment where application expects IBF ISR to read data and pass to callback. endif #ESPI_XEC_V2 if ESPI_XEC_V2 && ESPI_PERIPHERAL_8042_KBC config ESPI_PERIPHERAL_KBC_IBF_EVT_DATA bool "KBC event data format in IBF" help Enable espi_evt_data_kbc format for encoding event in KBC IBF ISR config ESPI_PERIPHERAL_KBC_OBE_CBK bool "KBC OBE Callback" help Enable KBC OBE callback from OBE ISR endif #ESPI_XEC_V2 && ESPI_PERIPHERAL_8042_KBC ```
/content/code_sandbox/drivers/espi/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,447
```objective-c /* * */ /** * @file Header with definitions for eSPI drivers callback functions */ #ifndef ZEPHYR_DRIVERS_ESPI_UTILS_H_ #define ZEPHYR_DRIVERS_ESPI_UTILS_H_ /** * @brief Generic function to insert or remove a callback from a callback list. * * @param callbacks A pointer to the original list of callbacks (can be NULL). * @param callback A pointer of the callback to insert or remove from the list. * @param set A boolean indicating insertion or removal of the callback. * * @return 0 on success, negative errno otherwise. */ static inline int espi_manage_callback(sys_slist_t *callbacks, struct espi_callback *callback, bool set) { __ASSERT(callback, "No callback!"); __ASSERT(callback->handler, "No callback handler!"); if (!sys_slist_is_empty(callbacks)) { if (!sys_slist_find_and_remove(callbacks, &callback->node)) { if (!set) { return -EINVAL; } } } else if (!set) { return -EINVAL; } if (set) { sys_slist_prepend(callbacks, &callback->node); } return 0; } /** * @brief Generic function to go through and fire callback from a callback list. * * @param list A pointer on the espi callback list. * @param dev A pointer on the espi driver instance. * @param pins The details on the event that triggered the callback. */ static inline void espi_send_callbacks(sys_slist_t *list, const struct device *dev, struct espi_event evt) { struct espi_callback *cb, *tmp; SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, cb, tmp, node) { if (cb->evt_type & evt.evt_type) { __ASSERT(cb->handler, "No callback handler!"); cb->handler(dev, cb, evt); } } } #endif /* ZEPHYR_DRIVERS_ESPI_UTILS_H_ */ ```
/content/code_sandbox/drivers/espi/espi_utils.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
430
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ESPI_MCHP_XEC_ESPI_V2_H_ #define ZEPHYR_DRIVERS_ESPI_MCHP_XEC_ESPI_V2_H_ #include <stdint.h> #include <zephyr/device.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/pinctrl.h> /* #define ESPI_XEC_V2_DEBUG 1 */ struct espi_isr { uint8_t girq_id; uint8_t girq_pos; void (*the_isr)(const struct device *dev); }; struct espi_vw_isr { uint8_t signal; uint8_t girq_id; uint8_t girq_pos; void (*the_isr)(int girq, int bpos, void *dev); }; struct espi_xec_irq_info { uint8_t gid; /* GIRQ id [8, 26] */ uint8_t gpos; /* bit position in GIRQ [0, 31] */ uint8_t anid; /* Aggregated GIRQ NVIC number */ uint8_t dnid; /* Direct GIRQ NVIC number */ }; struct espi_xec_config { uint32_t base_addr; uint32_t vw_base_addr; uint8_t pcr_idx; uint8_t pcr_bitpos; uint8_t irq_info_size; uint8_t rsvd[1]; const struct espi_xec_irq_info *irq_info_list; const struct pinctrl_dev_config *pcfg; }; #define ESPI_XEC_CONFIG(dev) \ ((struct espi_xec_config * const)(dev)->config) struct espi_xec_data { sys_slist_t callbacks; struct k_sem tx_lock; struct k_sem rx_lock; struct k_sem flash_lock; #ifdef ESPI_XEC_V2_DEBUG uint32_t espi_rst_count; #endif }; #define ESPI_XEC_DATA(dev) \ ((struct espi_xec_data * const)(dev)->data) struct xec_signal { uint8_t host_idx; uint8_t bit; uint8_t xec_reg_idx; uint8_t flags; }; enum mchp_msvw_regs { MCHP_MSVW00, MCHP_MSVW01, MCHP_MSVW02, MCHP_MSVW03, MCHP_MSVW04, MCHP_MSVW05, MCHP_MSVW06, MCHP_MSVW07, MCHP_MSVW08, }; enum mchp_smvw_regs { MCHP_SMVW00, MCHP_SMVW01, MCHP_SMVW02, MCHP_SMVW03, MCHP_SMVW04, MCHP_SMVW05, MCHP_SMVW06, MCHP_SMVW07, MCHP_SMVW08, }; enum xec_espi_girq_idx { pc_girq_idx = 0, bm1_girq_idx, bm2_girq_idx, ltr_girq_idx, oob_up_girq_idx, oob_dn_girq_idx, fc_girq_idx, rst_girq_idx, vw_ch_en_girq_idx, max_girq_idx, }; int xec_host_dev_init(const struct device *dev); int xec_host_dev_connect_irqs(const struct device *dev); int espi_xec_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data); int espi_xec_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data); #endif /* ZEPHYR_DRIVERS_ESPI_MCHP_XEC_ESPI_V2_H_ */ ```
/content/code_sandbox/drivers/espi/espi_mchp_xec_v2.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
802
```c /* * */ #define DT_DRV_COMPAT microchip_xec_espi_saf_v2 #include <zephyr/kernel.h> #include <soc.h> #include <errno.h> #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/espi_saf.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h> #include <zephyr/logging/log.h> #include "espi_mchp_xec_v2.h" #include "espi_utils.h" LOG_MODULE_REGISTER(espi_saf, CONFIG_ESPI_LOG_LEVEL); /* common clock control device node for all Microchip XEC chips */ #define MCHP_XEC_CLOCK_CONTROL_NODE DT_NODELABEL(pcr) /* SAF EC Portal read/write flash access limited to 1-64 bytes */ #define MAX_SAF_ECP_BUFFER_SIZE 64ul /* 1 second maximum for flash operations */ #define MAX_SAF_FLASH_TIMEOUT 125000ul /* 1000ul */ #define MAX_SAF_FLASH_TIMEOUT_MS 1000ul /* 64 bytes @ 24MHz quad is approx. 6 us */ #define SAF_WAIT_INTERVAL 8 /* After 8 wait intervals yield */ #define SAF_YIELD_THRESHOLD 64 /* Get QMSPI 0 encoded GIRQ information */ #define XEC_QMSPI_ENC_GIRQ \ DT_PROP_BY_IDX(DT_INST(0, microchip_xec_qmspi_ldma), girqs, 0) #define XEC_QMSPI_GIRQ MCHP_XEC_ECIA_GIRQ(XEC_QMSPI_ENC_GIRQ) #define XEC_QMSPI_GIRQ_POS MCHP_XEC_ECIA_GIRQ_POS(XEC_QMSPI_ENC_GIRQ) #define XEC_SAF_DONE_ENC_GIRQ DT_INST_PROP_BY_IDX(0, girqs, 0) #define XEC_SAF_ERR_ENC_GIRQ DT_INST_PROP_BY_IDX(0, girqs, 1) #define XEC_SAF_DONE_GIRQ MCHP_XEC_ECIA_GIRQ(XEC_SAF_DONE_ENC_GIRQ) #define XEC_SAF_DONE_GIRQ_POS MCHP_XEC_ECIA_GIRQ_POS(XEC_SAF_ERR_ENC_GIRQ) /* * SAF configuration from Device Tree * SAF controller register block base address * QMSPI controller register block base address * SAF communications register block base address * Flash STATUS1 poll timeout in 32KHz periods * Flash consecutive read timeout in units of 20 ns * Delay before first Poll-1 command after suspend in 20 ns units * Hold off suspend for this interval if erase or program in 32KHz periods. * Add delay between Poll STATUS1 commands in 20 ns units. */ struct espi_saf_xec_config { struct mchp_espi_saf * const saf_base; struct qmspi_regs * const qmspi_base; struct mchp_espi_saf_comm * const saf_comm_base; struct espi_iom_regs * const iom_base; void (*irq_config_func)(void); uint32_t poll_timeout; uint32_t consec_rd_timeout; uint32_t sus_chk_delay; uint16_t sus_rsm_interval; uint16_t poll_interval; uint8_t pcr_idx; uint8_t pcr_pos; uint8_t irq_info_size; uint8_t rsvd1; const struct espi_xec_irq_info *irq_info_list; }; struct espi_saf_xec_data { struct k_sem ecp_lock; uint32_t hwstatus; sys_slist_t callbacks; }; /* EC portal local flash r/w buffer */ static uint32_t slave_mem[MAX_SAF_ECP_BUFFER_SIZE]; /* * @brief eSPI SAF configuration */ static inline void mchp_saf_cs_descr_wr(struct mchp_espi_saf *regs, uint8_t cs, uint32_t val) { regs->SAF_CS_OP[cs].OP_DESCR = val; } static inline void mchp_saf_poll2_mask_wr(struct mchp_espi_saf *regs, uint8_t cs, uint16_t val) { LOG_DBG("%s cs: %d mask %x", __func__, cs, val); if (cs == 0) { regs->SAF_CS0_CFG_P2M = val; } else { regs->SAF_CS1_CFG_P2M = val; } } static inline void mchp_saf_cm_prefix_wr(struct mchp_espi_saf *regs, uint8_t cs, uint16_t val) { if (cs == 0) { regs->SAF_CS0_CM_PRF = val; } else { regs->SAF_CS1_CM_PRF = val; } } /* * Initialize SAF flash protection regions. * SAF HW implements 17 protection regions. * At least one protection region must be configured to allow * EC access to the local flash through the EC Portal. * Each protection region is composed of 4 32-bit registers * Start bits[19:0] = bits[31:12] region start address (4KB boundaries) * Limit bits[19:0] = bits[31:12] region limit address (4KB boundaries) * Write protect b[7:0] = masters[7:0] allow write/erase. 1=allowed * Read protetc b[7:0] = masters[7:0] allow read. 1=allowed * * This routine configures protection region 0 for full flash array * address range and read-write-erase for all masters. * This routine must be called AFTER the flash configuration size/limit and * threshold registers have been programmed. * * POR default values: * Start = 0x7ffff * Limit = 0 * Write Prot = 0x01 Master 0 always granted write/erase * Read Prot = 0x01 Master 0 always granted read * * Sample code configures PR[0] * Start = 0 * Limit = 0x7ffff * WR = 0xFF * RD = 0xFF */ static void saf_protection_regions_init(struct mchp_espi_saf *regs) { LOG_DBG("%s", __func__); for (size_t n = 0; n < MCHP_ESPI_SAF_PR_MAX; n++) { if (n == 0) { regs->SAF_PROT_RG[0].START = 0U; regs->SAF_PROT_RG[0].LIMIT = regs->SAF_FL_CFG_SIZE_LIM >> 12; regs->SAF_PROT_RG[0].WEBM = MCHP_SAF_MSTR_ALL; regs->SAF_PROT_RG[0].RDBM = MCHP_SAF_MSTR_ALL; } else { regs->SAF_PROT_RG[n].START = MCHP_SAF_PROT_RG_START_DFLT; regs->SAF_PROT_RG[n].LIMIT = MCHP_SAF_PROT_RG_LIMIT_DFLT; regs->SAF_PROT_RG[n].WEBM = 0U; regs->SAF_PROT_RG[n].RDBM = 0U; } LOG_DBG("PROT[%d] START %x", n, regs->SAF_PROT_RG[n].START); LOG_DBG("PROT[%d] LIMIT %x", n, regs->SAF_PROT_RG[n].LIMIT); LOG_DBG("PROT[%d] WEBM %x", n, regs->SAF_PROT_RG[n].WEBM); LOG_DBG("PROT[%d] RDBM %x", n, regs->SAF_PROT_RG[n].RDBM); } } static int qmspi_freq_div(uint32_t freqhz, uint32_t *fdiv) { clock_control_subsys_t clkss = (clock_control_subsys_t)(MCHP_XEC_PCR_CLK_PERIPH_FAST); uint32_t clk = 0u; if (!fdiv) { return -EINVAL; } if (clock_control_get_rate(DEVICE_DT_GET(MCHP_XEC_CLOCK_CONTROL_NODE), (clock_control_subsys_t)clkss, &clk)) { return -EIO; } *fdiv = 0u; /* maximum divider = 0x10000 */ if (freqhz) { *fdiv = clk / freqhz; } return 0u; } static int qmspi_freq_div_from_mhz(uint32_t freqmhz, uint32_t *fdiv) { uint32_t freqhz = freqmhz * 1000000u; return qmspi_freq_div(freqhz, fdiv); } /* * Take over and re-initialize QMSPI for use by SAF HW engine. * When SAF is activated, QMSPI registers are controlled by SAF * HW engine. CPU no longer has access to QMSPI registers. * 1. Save QMSPI driver frequency divider, SPI signalling mode, and * chip select timing. * 2. Put QMSPI controller in a known state by performing a soft reset. * 3. Clear QMSPI GIRQ status * 4. Configure QMSPI interface control for SAF. * 5. Load flash device independent (generic) descriptors. * 6. Enable transfer done interrupt in QMSPI * 7. Enable QMSPI SAF mode * 8. If user configuration overrides frequency, signalling mode, * or chip select timing derive user values. * 9. Program QMSPI MODE and CSTIM registers with activate set. */ static int saf_qmspi_init(const struct espi_saf_xec_config *xcfg, const struct espi_saf_cfg *cfg) { uint32_t qmode, qfdiv, cstim, n; struct qmspi_regs * const qregs = xcfg->qmspi_base; struct mchp_espi_saf * const regs = xcfg->saf_base; const struct espi_saf_hw_cfg *hwcfg = &cfg->hwcfg; qmode = qregs->MODE; if (!(qmode & MCHP_QMSPI_M_ACTIVATE)) { return -EAGAIN; } qmode = qregs->MODE & (MCHP_QMSPI_M_FDIV_MASK | MCHP_QMSPI_M_SIG_MASK); cstim = qregs->CSTM; qregs->MODE = MCHP_QMSPI_M_SRST; qregs->STS = MCHP_QMSPI_STS_RW1C_MASK; mchp_soc_ecia_girq_src_dis(XEC_QMSPI_GIRQ, XEC_QMSPI_GIRQ_POS); mchp_soc_ecia_girq_src_clr(XEC_QMSPI_GIRQ, XEC_QMSPI_GIRQ_POS); qregs->IFCTRL = (MCHP_QMSPI_IFC_WP_OUT_HI | MCHP_QMSPI_IFC_WP_OUT_EN | MCHP_QMSPI_IFC_HOLD_OUT_HI | MCHP_QMSPI_IFC_HOLD_OUT_EN); for (n = 0; n < MCHP_SAF_NUM_GENERIC_DESCR; n++) { qregs->DESCR[MCHP_SAF_CM_EXIT_START_DESCR + n] = hwcfg->generic_descr[n]; } /* SAF HW uses QMSPI interrupt signal */ qregs->IEN = MCHP_QMSPI_IEN_XFR_DONE; qmode |= (MCHP_QMSPI_M_SAF_DMA_MODE_EN | MCHP_QMSPI_M_CS0 | MCHP_QMSPI_M_ACTIVATE); if (hwcfg->flags & MCHP_SAF_HW_CFG_FLAG_CPHA) { qmode = (qmode & ~(MCHP_QMSPI_M_SIG_MASK)) | ((hwcfg->qmspi_cpha << MCHP_QMSPI_M_SIG_POS) & MCHP_QMSPI_M_SIG_MASK); } /* Copy QMSPI frequency divider into SAF CS0 and CS1 QMSPI frequency * dividers. SAF HW uses CS0/CS1 divider register fields to overwrite * QMSPI frequency divider in QMSPI.Mode register. Later we will update * SAF CS0/CS1 SPI frequency dividers based on flash configuration. */ qfdiv = (qmode & MCHP_QMSPI_M_FDIV_MASK) >> MCHP_QMSPI_M_FDIV_POS; qfdiv = qfdiv | (qfdiv << 16); /* read and rest clock dividers */ regs->SAF_CLKDIV_CS0 = qfdiv; regs->SAF_CLKDIV_CS1 = qfdiv; if (hwcfg->flags & MCHP_SAF_HW_CFG_FLAG_CSTM) { cstim = hwcfg->qmspi_cs_timing; } /* MEC172x SAF uses TX LDMA channel 0 in non-descriptor mode. * SAF HW writes QMSPI.Control and TX LDMA channel 0 registers * to transmit opcode, address, and data. We configure must * configure TX LDMA channel 0 control register. We believe SAF * HW will set bit[6] to 1. */ qregs->LDTX[0].CTRL = MCHP_QMSPI_LDC_EN | MCHP_QMSPI_LDC_RS_EN | MCHP_QMSPI_LDC_ASZ_4; qmode |= MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN; qregs->MODE = qmode; qregs->CSTM = cstim; return 0; } /* * Registers at offsets: * SAF Poll timeout @ 0x194. Hard coded to 0x28000. Default value = 0. * recommended value = 0x28000 32KHz clocks (5 seconds). b[17:0] * SAF Poll interval @ 0x198. Hard coded to 0 * Default value = 0. Recommended = 0. b[15:0] * SAF Suspend/Resume Interval @ 0x19c. Hard coded to 0x8 * Default value = 0x01. Min time erase/prog in 32KHz units. * SAF Consecutive Read Timeout @ 0x1a0. Hard coded to 0x2. b[15:0] * Units of MCLK. Recommend < 20us. b[19:0] * SAF Suspend Check Delay @ 0x1ac. Not touched. * Default = 0. Recommend = 20us. Units = MCLK. b[19:0] */ static void saf_flash_timing_init(struct mchp_espi_saf * const regs, const struct espi_saf_xec_config *cfg) { LOG_DBG("%s\n", __func__); regs->SAF_POLL_TMOUT = cfg->poll_timeout; regs->SAF_POLL_INTRVL = cfg->poll_interval; regs->SAF_SUS_RSM_INTRVL = cfg->sus_rsm_interval; regs->SAF_CONSEC_RD_TMOUT = cfg->consec_rd_timeout; regs->SAF_SUS_CHK_DLY = cfg->sus_chk_delay; LOG_DBG("SAF_POLL_TMOUT %x\n", regs->SAF_POLL_TMOUT); LOG_DBG("SAF_POLL_INTRVL %x\n", regs->SAF_POLL_INTRVL); LOG_DBG("SAF_SUS_RSM_INTRVL %x\n", regs->SAF_SUS_RSM_INTRVL); LOG_DBG("SAF_CONSEC_RD_TMOUT %x\n", regs->SAF_CONSEC_RD_TMOUT); LOG_DBG("SAF_SUS_CHK_DLY %x\n", regs->SAF_SUS_CHK_DLY); } /* * Disable DnX bypass feature. */ static void saf_dnx_bypass_init(struct mchp_espi_saf * const regs) { regs->SAF_DNX_PROT_BYP = 0; regs->SAF_DNX_PROT_BYP = 0xffffffff; } /* * Bitmap of flash erase size from 1KB up to 128KB. * eSPI SAF specification requires 4KB erase support. * MCHP SAF supports 4KB, 32KB, and 64KB. * Only report 32KB and 64KB to Host if supported by both * flash devices. */ static int saf_init_erase_block_size(const struct device *dev, const struct espi_saf_cfg *cfg) { const struct espi_saf_xec_config * const xcfg = dev->config; struct espi_iom_regs * const espi_iom = xcfg->iom_base; struct espi_saf_flash_cfg *fcfg = cfg->flash_cfgs; uint32_t opb = fcfg->opb; uint8_t erase_bitmap = MCHP_ESPI_SERASE_SZ_4K; LOG_DBG("%s\n", __func__); if (cfg->nflash_devices > 1) { fcfg++; opb &= fcfg->opb; } if ((opb & MCHP_SAF_CS_OPB_ER0_MASK) == 0) { /* One or both do not support 4KB erase! */ return -EINVAL; } if (opb & MCHP_SAF_CS_OPB_ER1_MASK) { erase_bitmap |= MCHP_ESPI_SERASE_SZ_32K; } if (opb & MCHP_SAF_CS_OPB_ER2_MASK) { erase_bitmap |= MCHP_ESPI_SERASE_SZ_64K; } espi_iom->SAFEBS = erase_bitmap; return 0; } /* * Set the continuous mode prefix and 4-byte address mode bits * based upon the flash configuration information. * Updates: * SAF Flash Config Poll2 Mask @ 0x1A4 * SAF Flash Config Special Mode @ 0x1B0 * SAF Flash Misc Config @ 0x38 */ static void saf_flash_misc_cfg(struct mchp_espi_saf * const regs, uint8_t cs, const struct espi_saf_flash_cfg *fcfg) { uint32_t d, v; d = regs->SAF_FL_CFG_MISC; v = MCHP_SAF_FL_CFG_MISC_CS0_CPE; if (cs) { v = MCHP_SAF_FL_CFG_MISC_CS1_CPE; } /* Does this flash device require a prefix for continuous mode? */ if (fcfg->cont_prefix != 0) { d |= v; } else { d &= ~v; } v = MCHP_SAF_FL_CFG_MISC_CS0_4BM; if (cs) { v = MCHP_SAF_FL_CFG_MISC_CS1_4BM; } /* Use 32-bit addressing for this flash device? */ if (fcfg->flags & MCHP_FLASH_FLAG_ADDR32) { d |= v; } else { d &= ~v; } regs->SAF_FL_CFG_MISC = d; LOG_DBG("%s SAF_FL_CFG_MISC: %x", __func__, d); } static void saf_flash_pd_cfg(struct mchp_espi_saf * const regs, uint8_t cs, const struct espi_saf_flash_cfg *fcfg) { uint32_t pdval = 0u; uint32_t msk = 0u; if (cs == 0) { msk = BIT(SAF_PWRDN_CTRL_CS0_PD_EN_POS) | BIT(SAF_PWRDN_CTRL_CS0_PD_EN_POS); if (fcfg->flags & MCHP_FLASH_FLAG_V2_PD_CS0_EN) { pdval |= BIT(SAF_PWRDN_CTRL_CS0_PD_EN_POS); } if (fcfg->flags & MCHP_FLASH_FLAG_V2_PD_CS0_EC_WK_EN) { pdval |= BIT(SAF_PWRDN_CTRL_CS0_WPA_EN_POS); } } else { msk = BIT(SAF_PWRDN_CTRL_CS1_PD_EN_POS) | BIT(SAF_PWRDN_CTRL_CS1_PD_EN_POS); if (fcfg->flags & MCHP_FLASH_FLAG_V2_PD_CS1_EN) { pdval |= BIT(SAF_PWRDN_CTRL_CS1_PD_EN_POS); } if (fcfg->flags & MCHP_FLASH_FLAG_V2_PD_CS1_EC_WK_EN) { pdval |= BIT(SAF_PWRDN_CTRL_CS1_PD_EN_POS); } } regs->SAF_PWRDN_CTRL = (regs->SAF_PWRDN_CTRL & ~msk) | pdval; } /* Configure SAF per chip select QMSPI clock dividers. * SAF HW implements two QMSP clock divider registers per chip select: * Each divider register is composed of two 16-bit fields: * b[15:0] = QMSPI clock divider for SPI read * b[31:16] = QMSPI clock divider for all other SPI commands */ static int saf_flash_freq_cfg(struct mchp_espi_saf * const regs, uint8_t cs, const struct espi_saf_flash_cfg *fcfg) { uint32_t fmhz, fdiv, saf_qclk; if (cs == 0) { saf_qclk = regs->SAF_CLKDIV_CS0; } else { saf_qclk = regs->SAF_CLKDIV_CS1; } fmhz = fcfg->rd_freq_mhz; if (fmhz) { fdiv = 0u; if (qmspi_freq_div_from_mhz(fmhz, &fdiv)) { LOG_ERR("%s SAF CLKDIV CS0 bad freq MHz %u", __func__, fmhz); return -EIO; } if (fdiv) { saf_qclk = (saf_qclk & ~SAF_CLKDIV_CS_MSK0) | (fdiv & SAF_CLKDIV_CS_MSK0); } } fmhz = fcfg->freq_mhz; if (fmhz) { fdiv = 0u; if (qmspi_freq_div_from_mhz(fmhz, &fdiv)) { LOG_ERR("%s SAF CLKDIV CS1 bad freq MHz %u", __func__, fmhz); return -EIO; } if (fdiv) { saf_qclk &= ~(SAF_CLKDIV_CS_MSK0 << 16); saf_qclk |= (fdiv & SAF_CLKDIV_CS_MSK0) << 16; } } if (cs == 0) { regs->SAF_CLKDIV_CS0 = saf_qclk; } else { regs->SAF_CLKDIV_CS1 = saf_qclk; } return 0; } /* * Program flash device specific SAF and QMSPI registers. * * CS0 OpA @ 0x4c or CS1 OpA @ 0x5C * CS0 OpB @ 0x50 or CS1 OpB @ 0x60 * CS0 OpC @ 0x54 or CS1 OpC @ 0x64 * Poll 2 Mask @ 0x1a4 * Continuous Prefix @ 0x1b0 * CS0: QMSPI descriptors 0-5 or CS1 QMSPI descriptors 6-11 * CS0 Descrs @ 0x58 or CS1 Descrs @ 0x68 * SAF CS0 QMSPI frequency dividers (read/all other) commands * SAF CS1 QMSPI frequency dividers (read/all other) commands */ static int saf_flash_cfg(const struct device *dev, const struct espi_saf_flash_cfg *fcfg, uint8_t cs) { uint32_t d, did; const struct espi_saf_xec_config * const xcfg = dev->config; struct mchp_espi_saf * const regs = xcfg->saf_base; struct qmspi_regs * const qregs = xcfg->qmspi_base; LOG_DBG("%s cs=%u", __func__, cs); regs->SAF_CS_OP[cs].OPA = fcfg->opa; regs->SAF_CS_OP[cs].OPB = fcfg->opb; regs->SAF_CS_OP[cs].OPC = fcfg->opc; regs->SAF_CS_OP[cs].OP_DESCR = (uint32_t)fcfg->cs_cfg_descr_ids; did = MCHP_SAF_QMSPI_CS0_START_DESCR; if (cs != 0) { did = MCHP_SAF_QMSPI_CS1_START_DESCR; } for (size_t i = 0; i < MCHP_SAF_QMSPI_NUM_FLASH_DESCR; i++) { d = fcfg->descr[i] & ~(MCHP_QMSPI_C_NEXT_DESCR_MASK); d |= (((did + 1) << MCHP_QMSPI_C_NEXT_DESCR_POS) & MCHP_QMSPI_C_NEXT_DESCR_MASK); qregs->DESCR[did++] = d; } mchp_saf_poll2_mask_wr(regs, cs, fcfg->poll2_mask); mchp_saf_cm_prefix_wr(regs, cs, fcfg->cont_prefix); saf_flash_misc_cfg(regs, cs, fcfg); saf_flash_pd_cfg(regs, cs, fcfg); return saf_flash_freq_cfg(regs, cs, fcfg); } static const uint32_t tag_map_dflt[MCHP_ESPI_SAF_TAGMAP_MAX] = { MCHP_SAF_TAG_MAP0_DFLT, MCHP_SAF_TAG_MAP1_DFLT, MCHP_SAF_TAG_MAP2_DFLT }; static void saf_tagmap_init(struct mchp_espi_saf * const regs, const struct espi_saf_cfg *cfg) { const struct espi_saf_hw_cfg *hwcfg = &cfg->hwcfg; for (int i = 0; i < MCHP_ESPI_SAF_TAGMAP_MAX; i++) { if (hwcfg->tag_map[i] & MCHP_SAF_HW_CFG_TAGMAP_USE) { regs->SAF_TAG_MAP[i] = hwcfg->tag_map[i]; } else { regs->SAF_TAG_MAP[i] = tag_map_dflt[i]; } } LOG_DBG("SAF TAG0 %x", regs->SAF_TAG_MAP[0]); LOG_DBG("SAF TAG1 %x", regs->SAF_TAG_MAP[1]); LOG_DBG("SAF TAG2 %x", regs->SAF_TAG_MAP[2]); } #define SAF_QSPI_LDMA_CTRL \ (MCHP_QMSPI_LDC_EN | MCHP_QMSPI_LDC_RS_EN | \ MCHP_QMSPI_LDC_ASZ_4) static void saf_qmspi_ldma_cfg(const struct espi_saf_xec_config * const xcfg) { struct qmspi_regs * const qregs = xcfg->qmspi_base; uint32_t qmode = qregs->MODE; uint32_t n, temp, chan; qregs->MODE = qmode & ~(MCHP_QMSPI_M_ACTIVATE); for (n = 0u; n < MCHP_QMSPI_MAX_DESCR; n++) { temp = qregs->DESCR[n]; if (temp & MCHP_QMSPI_C_TX_MASK) { chan = (temp & MCHP_QMSPI_C_TX_DMA_MASK) >> MCHP_QMSPI_C_TX_DMA_POS; if (chan) { /* zero is disabled */ chan--; /* register array index starts at 0 */ qregs->LDMA_TX_DESCR_BM |= BIT(n); qregs->LDTX[chan].CTRL = SAF_QSPI_LDMA_CTRL; } } if (temp & MCHP_QMSPI_C_RX_EN) { chan = (temp & MCHP_QMSPI_C_RX_DMA_MASK) >> MCHP_QMSPI_C_RX_DMA_POS; if (chan) { chan--; qregs->LDMA_RX_DESCR_BM |= BIT(n); qregs->LDRX[chan].CTRL = SAF_QSPI_LDMA_CTRL; } } } qregs->MODE = qmode; } /* * Configure SAF and QMSPI for SAF operation based upon the * number and characteristics of local SPI flash devices. * NOTE: SAF is configured but not activated. SAF should be * activated only when eSPI master sends Flash Channel enable * message with MAF/SAF select flag. */ static int espi_saf_xec_configuration(const struct device *dev, const struct espi_saf_cfg *cfg) { int ret = 0; uint32_t totalsz = 0; uint32_t u = 0; LOG_DBG("%s", __func__); if ((dev == NULL) || (cfg == NULL)) { return -EINVAL; } const struct espi_saf_xec_config * const xcfg = dev->config; struct mchp_espi_saf * const regs = xcfg->saf_base; struct mchp_espi_saf_comm * const comm_regs = xcfg->saf_comm_base; const struct espi_saf_hw_cfg *hwcfg = &cfg->hwcfg; const struct espi_saf_flash_cfg *fcfg = cfg->flash_cfgs; if ((fcfg == NULL) || (cfg->nflash_devices == 0U) || (cfg->nflash_devices > MCHP_SAF_MAX_FLASH_DEVICES)) { return -EINVAL; } if (regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN) { return -EAGAIN; } saf_qmspi_init(xcfg, cfg); regs->SAF_CS0_CFG_P2M = 0; regs->SAF_CS1_CFG_P2M = 0; regs->SAF_FL_CFG_GEN_DESCR = MCHP_SAF_FL_CFG_GEN_DESCR_STD; /* global flash power down activity counter and interval time */ regs->SAF_AC_RELOAD = hwcfg->flash_pd_timeout; regs->SAF_FL_PWR_TMOUT = hwcfg->flash_pd_min_interval; /* flash device connected to CS0 required */ totalsz = fcfg->flashsz; regs->SAF_FL_CFG_THRH = totalsz; ret = saf_flash_cfg(dev, fcfg, 0); if (ret) { return ret; } /* optional second flash device connected to CS1 */ if (cfg->nflash_devices > 1) { fcfg++; totalsz += fcfg->flashsz; } /* Program CS1 configuration (same as CS0 if only one device) */ ret = saf_flash_cfg(dev, fcfg, 1); if (ret) { return ret; } if (totalsz == 0) { return -EAGAIN; } regs->SAF_FL_CFG_SIZE_LIM = totalsz - 1; LOG_DBG("SAF_FL_CFG_THRH = %x SAF_FL_CFG_SIZE_LIM = %x", regs->SAF_FL_CFG_THRH, regs->SAF_FL_CFG_SIZE_LIM); saf_tagmap_init(regs, cfg); saf_protection_regions_init(regs); saf_dnx_bypass_init(regs); saf_flash_timing_init(regs, xcfg); ret = saf_init_erase_block_size(dev, cfg); if (ret != 0) { LOG_ERR("SAF Config bad flash erase config"); return ret; } /* Default or expedited prefetch? */ u = MCHP_SAF_FL_CFG_MISC_PFOE_DFLT; if (cfg->hwcfg.flags & MCHP_SAF_HW_CFG_FLAG_PFEXP) { u = MCHP_SAF_FL_CFG_MISC_PFOE_EXP; } regs->SAF_FL_CFG_MISC = (regs->SAF_FL_CFG_MISC & ~(MCHP_SAF_FL_CFG_MISC_PFOE_MASK)) | u; /* enable prefetch ? */ if (cfg->hwcfg.flags & MCHP_SAF_HW_CFG_FLAG_PFEN) { comm_regs->SAF_COMM_MODE |= MCHP_SAF_COMM_MODE_PF_EN; } else { comm_regs->SAF_COMM_MODE &= ~(MCHP_SAF_COMM_MODE_PF_EN); } LOG_DBG("%s SAF_FL_CFG_MISC: %x", __func__, regs->SAF_FL_CFG_MISC); LOG_DBG("%s Aft MCHP_SAF_COMM_MODE_REG: %x", __func__, comm_regs->SAF_COMM_MODE); saf_qmspi_ldma_cfg(xcfg); return 0; } static int espi_saf_xec_set_pr(const struct device *dev, const struct espi_saf_protection *pr) { if ((dev == NULL) || (pr == NULL)) { return -EINVAL; } if (pr->nregions >= MCHP_ESPI_SAF_PR_MAX) { return -EINVAL; } const struct espi_saf_xec_config * const xcfg = dev->config; struct mchp_espi_saf * const regs = xcfg->saf_base; if (regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN) { return -EAGAIN; } const struct espi_saf_pr *preg = pr->pregions; size_t n = pr->nregions; while (n--) { uint8_t regnum = preg->pr_num; if (regnum >= MCHP_ESPI_SAF_PR_MAX) { return -EINVAL; } /* NOTE: If previously locked writes have no effect */ if (preg->flags & MCHP_SAF_PR_FLAG_ENABLE) { regs->SAF_PROT_RG[regnum].START = preg->start >> 12U; regs->SAF_PROT_RG[regnum].LIMIT = (preg->start + preg->size - 1U) >> 12U; regs->SAF_PROT_RG[regnum].WEBM = preg->master_bm_we; regs->SAF_PROT_RG[regnum].RDBM = preg->master_bm_rd; } else { regs->SAF_PROT_RG[regnum].START = 0x7FFFFU; regs->SAF_PROT_RG[regnum].LIMIT = 0U; regs->SAF_PROT_RG[regnum].WEBM = 0U; regs->SAF_PROT_RG[regnum].RDBM = 0U; } if (preg->flags & MCHP_SAF_PR_FLAG_LOCK) { regs->SAF_PROT_LOCK |= (1UL << regnum); } preg++; } return 0; } static bool espi_saf_xec_channel_ready(const struct device *dev) { const struct espi_saf_xec_config * const xcfg = dev->config; struct mchp_espi_saf * const regs = xcfg->saf_base; if (regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN) { return true; } return false; } /* * MCHP SAF hardware supports a range of flash block erase * sizes from 1KB to 128KB. The eSPI Host specification requires * 4KB must be supported. The MCHP SAF QMSPI HW interface only * supported three erase sizes. Most SPI flash devices chosen for * SAF support 4KB, 32KB, and 64KB. * Get flash erase sizes driver has configured from eSPI capabilities * registers. We assume driver flash tables have opcodes to match * capabilities configuration. * Check requested erase size is supported. */ struct erase_size_encoding { uint8_t hwbitpos; uint8_t encoding; }; static const struct erase_size_encoding ersz_enc[] = { { MCHP_ESPI_SERASE_SZ_4K_BITPOS, 0 }, { MCHP_ESPI_SERASE_SZ_32K_BITPOS, 1 }, { MCHP_ESPI_SERASE_SZ_64K_BITPOS, 2 } }; #define SAF_ERASE_ENCODING_MAX_ENTRY \ (sizeof(ersz_enc) / sizeof(struct erase_size_encoding)) static uint32_t get_erase_size_encoding(const struct device *dev, uint32_t erase_size) { const struct espi_saf_xec_config * const xcfg = dev->config; struct espi_iom_regs * const espi_iom = xcfg->iom_base; uint8_t supsz = espi_iom->SAFEBS; LOG_DBG("%s\n", __func__); for (int i = 0; i < SAF_ERASE_ENCODING_MAX_ENTRY; i++) { uint32_t sz = MCHP_ESPI_SERASE_SZ(ersz_enc[i].hwbitpos); if ((sz == erase_size) && (supsz & (1 << ersz_enc[i].hwbitpos))) { return ersz_enc[i].encoding; } } return 0xffffffffU; } static int check_ecp_access_size(uint32_t reqlen) { if ((reqlen < MCHP_SAF_ECP_CMD_RW_LEN_MIN) || (reqlen > MCHP_SAF_ECP_CMD_RW_LEN_MAX)) { return -EAGAIN; } return 0; } /* * EC access to SAF atttached flash array * Allowed commands: * MCHP_SAF_ECP_CMD_READ(0x0), MCHP_SAF_ECP_CMD_WRITE(0x01), * MCHP_SAF_ECP_CMD_ERASE(0x02), MCHP_SAF_ECP_CMD_RPMC_OP1_CS0(0x03), * MCHP_SAF_ECP_CMD_RPMC_OP2_CS0(0x04), MCHP_SAF_ECP_CMD_RPMC_OP1_CS1(0x83), * MCHP_SAF_ECP_CMD_RPMC_OP2_CS1(0x84) */ static int saf_ecp_access(const struct device *dev, struct espi_saf_packet *pckt, uint8_t cmd) { uint32_t scmd, err_mask, n; int rc, counter; struct espi_saf_xec_data *xdat = dev->data; const struct espi_saf_xec_config * const xcfg = dev->config; struct mchp_espi_saf * const regs = xcfg->saf_base; const struct espi_xec_irq_info *safirq = &xcfg->irq_info_list[0]; counter = 0; err_mask = MCHP_SAF_ECP_STS_ERR_MASK; LOG_DBG("%s", __func__); if (!(regs->SAF_FL_CFG_MISC & MCHP_SAF_FL_CFG_MISC_SAF_EN)) { LOG_ERR("SAF is disabled"); return -EIO; } n = regs->SAF_ECP_BUSY; if (n & (MCHP_SAF_ECP_EC0_BUSY | MCHP_SAF_ECP_EC1_BUSY)) { LOG_ERR("SAF EC Portal is busy: 0x%08x", n); return -EBUSY; } switch (cmd) { case MCHP_SAF_ECP_CMD_READ: case MCHP_SAF_ECP_CMD_WRITE: rc = check_ecp_access_size(pckt->len); if (rc) { LOG_ERR("SAF EC Portal size out of bounds"); return rc; } if (cmd == MCHP_SAF_ECP_CMD_WRITE) { memcpy(slave_mem, pckt->buf, pckt->len); } n = pckt->len; break; case MCHP_SAF_ECP_CMD_ERASE: n = get_erase_size_encoding(dev, pckt->len); if (n == UINT32_MAX) { LOG_ERR("SAF EC Portal unsupported erase size"); return -EAGAIN; } break; case MCHP_SAF_ECP_CMD_RPMC_OP1_CS0: case MCHP_SAF_ECP_CMD_RPMC_OP2_CS0: rc = check_ecp_access_size(pckt->len); if (rc) { LOG_ERR("SAF EC Portal RPMC size out of bounds"); return rc; } if (!(regs->SAF_CFG_CS0_OPD & SAF_CFG_CS_OPC_RPMC_OP2_MSK)) { LOG_ERR("SAF CS0 RPMC opcode not configured"); return -EIO; } n = pckt->len; break; case MCHP_SAF_ECP_CMD_RPMC_OP1_CS1: case MCHP_SAF_ECP_CMD_RPMC_OP2_CS1: rc = check_ecp_access_size(pckt->len); if (rc) { LOG_ERR("SAF EC Portal RPMC size out of bounds"); return rc; } if (!(regs->SAF_CFG_CS1_OPD & SAF_CFG_CS_OPC_RPMC_OP2_MSK)) { LOG_ERR("SAF CS1 RPMC opcode not configured"); return -EIO; } n = pckt->len; break; default: LOG_ERR("SAF EC Portal bad cmd"); return -EAGAIN; } LOG_DBG("%s params val done", __func__); regs->SAF_ECP_INTEN = 0; regs->SAF_ECP_STATUS = MCHP_SAF_ECP_STS_MASK; mchp_xec_ecia_girq_src_clr(safirq->gid, safirq->gpos); regs->SAF_ECP_INTEN = BIT(MCHP_SAF_ECP_INTEN_DONE_POS); regs->SAF_ECP_FLAR = pckt->flash_addr; regs->SAF_ECP_BFAR = (uint32_t)&slave_mem[0]; scmd = MCHP_SAF_ECP_CMD_PUT_FLASH_NP | ((uint32_t)cmd << MCHP_SAF_ECP_CMD_CTYPE_POS) | ((n << MCHP_SAF_ECP_CMD_LEN_POS) & MCHP_SAF_ECP_CMD_LEN_MASK); LOG_DBG("%s ECP_FLAR=0x%x", __func__, regs->SAF_ECP_FLAR); LOG_DBG("%s ECP_BFAR=0x%x", __func__, regs->SAF_ECP_BFAR); LOG_DBG("%s ECP_CMD=0x%x", __func__, scmd); regs->SAF_ECP_CMD = scmd; regs->SAF_ECP_START = MCHP_SAF_ECP_START; rc = k_sem_take(&xdat->ecp_lock, K_MSEC(MAX_SAF_FLASH_TIMEOUT_MS)); if (rc == -EAGAIN) { LOG_ERR("%s timeout", __func__); return -ETIMEDOUT; } LOG_DBG("%s wake on semaphore", __func__); n = regs->SAF_ECP_STATUS; /* clear hardware status and check for errors */ if (n & err_mask) { regs->SAF_ECP_STATUS = n; LOG_ERR("%s error %x", __func__, n); return -EIO; } if (cmd == MCHP_SAF_ECP_CMD_READ) { memcpy(pckt->buf, slave_mem, pckt->len); } return rc; } /* Flash read using SAF EC Portal */ static int saf_xec_flash_read(const struct device *dev, struct espi_saf_packet *pckt) { LOG_DBG("%s", __func__); return saf_ecp_access(dev, pckt, MCHP_SAF_ECP_CMD_READ); } /* Flash write using SAF EC Portal */ static int saf_xec_flash_write(const struct device *dev, struct espi_saf_packet *pckt) { return saf_ecp_access(dev, pckt, MCHP_SAF_ECP_CMD_WRITE); } /* Flash erase using SAF EC Portal */ static int saf_xec_flash_erase(const struct device *dev, struct espi_saf_packet *pckt) { return saf_ecp_access(dev, pckt, MCHP_SAF_ECP_CMD_ERASE); } static int espi_saf_xec_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_saf_xec_data *data = dev->data; return espi_manage_callback(&data->callbacks, callback, set); } static int espi_saf_xec_activate(const struct device *dev) { if (dev == NULL) { return -EINVAL; } const struct espi_saf_xec_config * const xcfg = dev->config; struct mchp_espi_saf * const regs = xcfg->saf_base; const struct espi_xec_irq_info *safirq = &xcfg->irq_info_list[1]; regs->SAF_ESPI_MON_STATUS = MCHP_SAF_ESPI_MON_STS_IEN_MSK; mchp_xec_ecia_girq_src_clr(safirq->gid, safirq->gpos); regs->SAF_FL_CFG_MISC |= MCHP_SAF_FL_CFG_MISC_SAF_EN; regs->SAF_ESPI_MON_INTEN = (BIT(MCHP_SAF_ESPI_MON_STS_IEN_TMOUT_POS) | BIT(MCHP_SAF_ESPI_MON_STS_IEN_OOR_POS) | BIT(MCHP_SAF_ESPI_MON_STS_IEN_AV_POS) | BIT(MCHP_SAF_ESPI_MON_STS_IEN_BND_4K_POS) | BIT(MCHP_SAF_ESPI_MON_STS_IEN_ERSZ_POS)); k_busy_wait(1000); /* TODO FIXME get estimate of time interval */ return 0; } static void espi_saf_done_isr(const struct device *dev) { const struct espi_saf_xec_config * const xcfg = dev->config; struct espi_saf_xec_data *data = dev->data; struct mchp_espi_saf * const regs = xcfg->saf_base; const struct espi_xec_irq_info *safirq = &xcfg->irq_info_list[0]; uint32_t ecp_status = regs->SAF_ECP_STATUS; struct espi_event evt = { .evt_type = ESPI_BUS_TAF_NOTIFICATION, .evt_details = BIT(0), .evt_data = ecp_status }; regs->SAF_ECP_INTEN = 0u; regs->SAF_ECP_STATUS = BIT(MCHP_SAF_ECP_STS_DONE_POS); mchp_xec_ecia_girq_src_clr(safirq->gid, safirq->gpos); data->hwstatus = ecp_status; LOG_DBG("SAF Done ISR: status=0x%x", ecp_status); espi_send_callbacks(&data->callbacks, dev, evt); k_sem_give(&data->ecp_lock); } static void espi_saf_err_isr(const struct device *dev) { const struct espi_saf_xec_config * const xcfg = dev->config; struct espi_saf_xec_data *data = dev->data; struct mchp_espi_saf * const regs = xcfg->saf_base; const struct espi_xec_irq_info *safirq = &xcfg->irq_info_list[1]; uint32_t mon_status = regs->SAF_ESPI_MON_STATUS; struct espi_event evt = { .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION, .evt_details = BIT(7), .evt_data = mon_status }; regs->SAF_ESPI_MON_STATUS = mon_status; mchp_xec_ecia_girq_src_clr(safirq->gid, safirq->gpos); data->hwstatus = mon_status; espi_send_callbacks(&data->callbacks, dev, evt); } static const struct espi_saf_driver_api espi_saf_xec_driver_api = { .config = espi_saf_xec_configuration, .set_protection_regions = espi_saf_xec_set_pr, .activate = espi_saf_xec_activate, .get_channel_status = espi_saf_xec_channel_ready, .flash_read = saf_xec_flash_read, .flash_write = saf_xec_flash_write, .flash_erase = saf_xec_flash_erase, .manage_callback = espi_saf_xec_manage_callback, }; static int espi_saf_xec_init(const struct device *dev) { const struct espi_saf_xec_config * const xcfg = dev->config; struct espi_saf_xec_data * const data = dev->data; struct espi_iom_regs * const espi_iom = xcfg->iom_base; /* ungate SAF clocks by disabling PCR sleep enable */ z_mchp_xec_pcr_periph_sleep(xcfg->pcr_idx, xcfg->pcr_pos, 0); /* Configure the channels and its capabilities based on build config */ espi_iom->CAP0 |= MCHP_ESPI_GBL_CAP0_FC_SUPP; espi_iom->CAPFC &= ~(MCHP_ESPI_FC_CAP_SHARE_MASK); espi_iom->CAPFC |= MCHP_ESPI_FC_CAP_SHARE_MAF_SAF; xcfg->irq_config_func(); k_sem_init(&data->ecp_lock, 0, 1); return 0; } /* n = node-id, p = property, i = index */ #define XEC_SAF_IRQ_INFO(n, p, i) \ { \ .gid = MCHP_XEC_ECIA_GIRQ(DT_PROP_BY_IDX(n, p, i)), \ .gpos = MCHP_XEC_ECIA_GIRQ_POS(DT_PROP_BY_IDX(n, p, i)), \ .anid = MCHP_XEC_ECIA_NVIC_AGGR(DT_PROP_BY_IDX(n, p, i)), \ .dnid = MCHP_XEC_ECIA_NVIC_DIRECT(DT_PROP_BY_IDX(n, p, i)), \ }, #define ESPI_SAF_XEC_DEVICE(n) \ \ static struct espi_saf_xec_data espisaf_xec_data_##n; \ \ static void espi_saf_xec_connect_irqs_##n(void); \ \ static const struct espi_xec_irq_info espi_saf_xec_irq_info_##n[] = { \ DT_INST_FOREACH_PROP_ELEM(n, girqs, XEC_SAF_IRQ_INFO) \ }; \ \ static const struct espi_saf_xec_config espisaf_xec_config_##n = { \ .saf_base = (struct mchp_espi_saf * const)( \ DT_INST_REG_ADDR_BY_IDX(n, 0)), \ .qmspi_base = (struct qmspi_regs * const)( \ DT_INST_REG_ADDR_BY_IDX(n, 1)), \ .saf_comm_base = (struct mchp_espi_saf_comm * const)( \ DT_INST_REG_ADDR_BY_IDX(n, 2)), \ .iom_base = (struct espi_iom_regs * const)( \ DT_REG_ADDR_BY_NAME(DT_INST_PARENT(n), io)), \ .poll_timeout = DT_INST_PROP_OR(n, poll_timeout, \ MCHP_SAF_FLASH_POLL_TIMEOUT), \ .consec_rd_timeout = DT_INST_PROP_OR( \ n, consec_rd_timeout, MCHP_SAF_FLASH_CONSEC_READ_TIMEOUT), \ .sus_chk_delay = DT_INST_PROP_OR(n, sus_chk_delay, \ MCHP_SAF_FLASH_SUS_CHK_DELAY), \ .sus_rsm_interval = DT_INST_PROP_OR(n, sus_rsm_interval, \ MCHP_SAF_FLASH_SUS_RSM_INTERVAL), \ .poll_interval = DT_INST_PROP_OR(n, poll_interval, \ MCHP_SAF_FLASH_POLL_INTERVAL), \ .pcr_idx = DT_INST_PROP_BY_IDX(n, pcrs, 0), \ .pcr_pos = DT_INST_PROP_BY_IDX(n, pcrs, 1), \ .irq_config_func = espi_saf_xec_connect_irqs_##n, \ .irq_info_size = ARRAY_SIZE(espi_saf_xec_irq_info_##n), \ .irq_info_list = espi_saf_xec_irq_info_##n, \ }; \ DEVICE_DT_INST_DEFINE(0, &espi_saf_xec_init, NULL, \ &espisaf_xec_data_##n, \ &espisaf_xec_config_##n, POST_KERNEL, \ CONFIG_ESPI_TAF_INIT_PRIORITY, \ &espi_saf_xec_driver_api); \ \ static void espi_saf_xec_connect_irqs_##n(void) \ { \ uint8_t girq, gpos; \ \ /* SAF Done */ \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, 0, irq), \ DT_INST_IRQ_BY_IDX(n, 0, priority), \ espi_saf_done_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(n, 0, irq)); \ \ girq = MCHP_XEC_ECIA_GIRQ(DT_INST_PROP_BY_IDX(n, girqs, 0)); \ gpos = MCHP_XEC_ECIA_GIRQ_POS(DT_INST_PROP_BY_IDX(n, girqs, 0)); \ mchp_xec_ecia_girq_src_en(girq, gpos); \ \ /* SAF Error */ \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, 1, irq), \ DT_INST_IRQ_BY_IDX(n, 1, priority), \ espi_saf_err_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(n, 1, irq)); \ \ girq = MCHP_XEC_ECIA_GIRQ(DT_INST_PROP_BY_IDX(n, girqs, 1)); \ gpos = MCHP_XEC_ECIA_GIRQ_POS(DT_INST_PROP_BY_IDX(n, girqs, 1)); \ mchp_xec_ecia_girq_src_en(girq, gpos); \ } DT_INST_FOREACH_STATUS_OKAY(ESPI_SAF_XEC_DEVICE) ```
/content/code_sandbox/drivers/espi/espi_saf_mchp_xec_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,998
```unknown # NPCX eSPI driver configuration options config ESPI_NPCX bool "Nuvoton NPCX embedded controller (EC) ESPI driver" default y depends on SOC_FAMILY_NPCX depends on DT_HAS_NUVOTON_NPCX_ESPI_ENABLED help This option enables the Intel Enhanced Serial Peripheral Interface (eSPI) for NPCX family of processors. config ESPI_NPCX_PERIPHERAL_ACPI_SHD_MEM_SIZE int "Host I/O peripheral port size for shared memory in npcx series" depends on ESPI_NPCX || ESPI_PERIPHERAL_ACPI_SHM_REGION default 256 help This is the port size used by the Host and EC to communicate over the shared memory region to return the ACPI response data. Please notice the valid value in npcx ec series for this option is 8/16/32/ 64/128/256/512/1024/2048/4096 bytes. config ESPI_NPCX_PERIPHERAL_HOST_CMD_PARAM_SIZE int "Host I/O peripheral port size for ec host command in npcx series" depends on ESPI_NPCX || ESPI_PERIPHERAL_EC_HOST_CMD default 256 help This is the port size used by the Host and EC to communicate over the shared memory region to return the host command parameter data. Please notice the valid value in npcx ec series for this option is 8/16/32/64/128/256/512/1024/2048/4096 bytes. config ESPI_NPCX_BYPASS_CH_ENABLE_FATAL_ERROR bool depends on SOC_SERIES_NPCX7 || SOC_SERIES_NPCX9 default y help Workaround the issue documented in NPCX99nF errata rev1_2, No.3.10. Enabling an eSPI channel during an eSPI transaction might (with low probability) cause the eSPI_SIF module to transition to a wrong state and therefore response with FATAL_ERROR on an incoming transaction. config ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE bool "Host can write 1/2/4 bytes of Port80 data in a eSPI transaction" depends on (SOC_SERIES_NPCX9 || SOC_SERIES_NPCX4) && ESPI_PERIPHERAL_DEBUG_PORT_80 select RING_BUFFER help EC can accept 1/2/4 bytes of Port 80 data written from the Host in an eSPI transaction. config ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_RING_BUF_SIZE int "Debug Port80 ring buffer size" depends on ESPI_NPCX_PERIPHERAL_DEBUG_PORT_80_MULTI_BYTE default 256 help The size of the ring buffer in byte used by the Port80 ISR to store Postcodes from Host. config ESPI_TAF_NPCX bool "Nuvoton NPCX embedded controller (EC) ESPI TAF driver" depends on SOC_SERIES_NPCX4 depends on FLASH help This option enables the Intel Enhanced Serial Peripheral Interface Target Attached Flash (eSPI TAF) for NPCX4 family of processors. choice ESPI_TAF_ACCESS_MODE_CHOICE prompt "eSPI TAF Read Access Mode" default ESPI_TAF_AUTO_MODE config ESPI_TAF_AUTO_MODE bool "eSPI TAF Automatic Mode" help This is the setting to use auto mode for eSPI TAF read. config ESPI_TAF_MANUAL_MODE bool "eSPI TAF Manual Mode" help This is the setting to use manual mode for eSPI TAF read. endchoice config ESPI_TAF_PR_NUM int "Sets of protection region settings" default 16 help This size is display how many group of slave attached flash protection region. # The default value 'y' for the existing options if ESPI_NPCX is selected. if ESPI_NPCX config ESPI_OOB_CHANNEL default y config ESPI_PERIPHERAL_8042_KBC default y config ESPI_PERIPHERAL_HOST_IO default y config ESPI_PERIPHERAL_DEBUG_PORT_80 default y config ESPI_PERIPHERAL_EC_HOST_CMD default y config ESPI_PERIPHERAL_ACPI_SHM_REGION default y config ESPI_PERIPHERAL_CUSTOM_OPCODE default y config ESPI_NPCX_SUPP_VW_GPIO bool "Indicates that the eSPI hardware supports virtual wire GPIOs" default y if SOC_SERIES_NPCX9 || SOC_SERIES_NPCX4 help Selected if NPCX series supports virtual wire GPIOs in eSPI module. config ESPI_NPCX_CAF_GLOBAL_RESET_WORKAROUND bool default y if SOC_SERIES_NPCX4 && ESPI_FLASH_CHANNEL help Workaround the issue "Global Reset" in the npcx4 SoC errata. endif #ESPI_NPCX ```
/content/code_sandbox/drivers/espi/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,068
```unknown # eSPI configuration options menuconfig ESPI bool "Enhanced Serial Peripheral Interface (eSPI) bus drivers" help Enable ESPI Driver. if ESPI source "drivers/espi/Kconfig.xec" source "drivers/espi/Kconfig.npcx" source "drivers/espi/Kconfig.espi_emul" source "drivers/espi/Kconfig.it8xxx2" module = ESPI module-str = espi source "subsys/logging/Kconfig.template.log_config" config ESPI_TARGET bool "ESPI target driver" default y help Enables eSPI driver in target mode. config ESPI_INIT_PRIORITY int "ESPI Controller driver initialization priority" default 3 help Driver initialization priority for eSPI driver. config ESPI_PERIPHERAL_CHANNEL bool "eSPI peripheral channel" default y help eSPI Controller supports peripheral channel. config ESPI_VWIRE_CHANNEL bool "eSPI virtual wire channel" default y help eSPI Controller supports virtual wires channel. config ESPI_AUTOMATIC_WARNING_ACKNOWLEDGE bool "Automatic acknowledge for eSPI HOST warnings" default y depends on ESPI_VWIRE_CHANNEL depends on ESPI_TARGET help Enable automatic acknowledgment from eSPI target towards eSPI controller whenever it receives suspend or reset warning. If this is disabled, it means the app wants to be give the opportunity to prepare for either HOST suspend or reset. config ESPI_AUTOMATIC_BOOT_DONE_ACKNOWLEDGE bool "Automatic acknowledge target boot status" default y depends on ESPI_VWIRE_CHANNEL depends on ESPI_TARGET help Enable automatic acknowledgment from target basic configuration been completed by sending a virtual wire message to the eSPI master. This depends on SPI boot configuration. It could be either very early in the flow after the VW channel is configured. Or it could be until flash channel is configured. config ESPI_OOB_CHANNEL bool "eSPI Out-of-band channel" help eSPI Controller supports OOB channel. config ESPI_FLASH_CHANNEL bool "ESPI flash channel" help eSPI Controller supports flash channel. if ESPI_PERIPHERAL_CHANNEL config ESPI_PERIPHERAL_UART bool "UART peripheral" help Enables UART over eSPI peripheral channel. config ESPI_PERIPHERAL_8042_KBC bool "8042 kbc peripheral" help Enables 8042 keyboard controller over eSPI peripheral channel. config ESPI_PERIPHERAL_HOST_IO bool "Host I/O peripheral" help Enables ACPI Host I/O over eSPI peripheral channel. config ESPI_PERIPHERAL_HOST_IO_PVT bool "Host I/O peripheral Private Channel" help Enables ACPI Host I/O over eSPI peripheral channel for private channel. config ESPI_PERIPHERAL_HOST_IO_PVT_PORT_NUM hex "Host I/O peripheral Private Channel" depends on ESPI_PERIPHERAL_HOST_IO_PVT default 0x06A00000 help This is the port number used by the Host and EC to communicate over the private channel. Please ensure the Host code is configured to use the same port. Also, ensure the port number selected doesn't clash with the existing ports (like 80, 92, 62 etc). config ESPI_PERIPHERAL_DEBUG_PORT_80 bool "Debug Port 80 peripheral" help Enables debug Port 80 over eSPI peripheral channel. config ESPI_PERIPHERAL_EC_HOST_CMD bool "Host peripheral device support EC host command subsystem" help Enables Embedded Controller (EC) host command subsystem via eSPI peripheral channel. config ESPI_PERIPHERAL_ACPI_SHM_REGION bool "Host peripheral device support shared memory region" help Enables shared memory region over eSPI peripheral channel to access the ACPI response data. config ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM hex "Host I/O peripheral port number for shared memory region" depends on ESPI_PERIPHERAL_ACPI_SHM_REGION default 0x0900 help This is the port number used by the Host and EC to communicate over the shared memory region to access the ACPI response data. Please ensure the Host code is configured to use for accessing ACPI response data. Also, ensure the port number selected doesn't clash with the existing ports. config ESPI_PERIPHERAL_CUSTOM_OPCODE bool "Host peripheral device support customized opcode" help Enables opcode is customized for certain platforms such as Chromebook and so on over eSPI peripheral channel. config ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM hex "Host I/O peripheral port number for ec host command data" depends on ESPI_PERIPHERAL_EC_HOST_CMD default 0x0200 help This is the port number used by the Host and EC to communicate over the eSPI peripheral channel to send EC host command data and its result. Please ensure the Host code is configured to use for accessing host command data and result. Also, ensure the port number selected doesn't clash with the existing ports. config ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM hex "Host I/O peripheral port number for ec host command parameters" depends on ESPI_PERIPHERAL_EC_HOST_CMD default 0x0800 help This is the port number used by the Host and EC to communicate over the eSPI peripheral channel to access the host command request and response data. Please ensure the Host code is configured to use for accessing these package data. Also, ensure the port number selected doesn't clash with the existing ports. endif # ESPI_PERIPHERAL_CHANNEL config ESPI_OOB_CHANNEL_RX_ASYNC bool "OOB host-initiated traffic handling" depends on ESPI_OOB_CHANNEL help Enables asynchronous handling for host-initiated OOB traffic. Otherwise OOB traffic is assumed to be always client-initiated. config ESPI_TAF bool "ESPI TAF driver" depends on ESPI_FLASH_CHANNEL help Enable Target Attached Flash eSPI driver. TAF depends upon ESPI driver and flash channel. config ESPI_TAF_INIT_PRIORITY int "ESPI TAF driver initialization priority" depends on ESPI_TAF default 4 help Driver initialization priority for eSPI TAF driver. TAF driver must initialize after the ESPI driver. endif # ESPI ```
/content/code_sandbox/drivers/espi/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,389
```c /* * * * This driver creates fake eSPI buses which can contain emulated devices * (mainly host), implemented by a separate emulation driver. * The API between this driver/controller and device emulators attached * to its bus is defined by struct emul_espi_device_api. */ #define DT_DRV_COMPAT zephyr_espi_emul_controller #define LOG_LEVEL CONFIG_ESPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(espi_emul_ctlr); #include <zephyr/device.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/espi_emul.h> #include "espi_utils.h" /** Working data for the controller */ struct espi_emul_data { /* List of struct espi_emul associated with the device */ sys_slist_t emuls; /* eSPI host configuration */ struct espi_cfg cfg; /** List of eSPI callbacks */ sys_slist_t callbacks; }; static struct espi_emul *espi_emul_find(const struct device *dev, unsigned int chipsel) { struct espi_emul_data *data = dev->data; sys_snode_t *node; SYS_SLIST_FOR_EACH_NODE(&data->emuls, node) { struct espi_emul *emul; emul = CONTAINER_OF(node, struct espi_emul, node); if (emul->chipsel == chipsel) { return emul; } } return NULL; } static int espi_emul_config(const struct device *dev, struct espi_cfg *cfg) { struct espi_emul_data *data = dev->data; __ASSERT_NO_MSG(cfg); data->cfg = *cfg; return 0; } static int emul_espi_trigger_event(const struct device *dev, struct espi_event *evt) { struct espi_emul_data *data = dev->data; if (((evt->evt_type & ESPI_BUS_EVENT_VWIRE_RECEIVED) && !(data->cfg.channel_caps & ESPI_CHANNEL_VWIRE)) || ((evt->evt_type & ESPI_BUS_EVENT_OOB_RECEIVED) && !(data->cfg.channel_caps & ESPI_CHANNEL_OOB)) || ((evt->evt_type & ESPI_BUS_PERIPHERAL_NOTIFICATION) && !(data->cfg.channel_caps & ESPI_CHANNEL_PERIPHERAL))) { return -EIO; } espi_send_callbacks(&data->callbacks, dev, *evt); return 0; } static bool espi_emul_get_channel_status(const struct device *dev, enum espi_channel ch) { struct espi_emul_data *data = dev->data; return (data->cfg.channel_caps & ch); } static int espi_emul_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { const struct emul_espi_device_api *api; struct espi_emul *emul; struct espi_emul_data *emul_data = dev->data; ARG_UNUSED(data); if (!(emul_data->cfg.channel_caps & ESPI_CHANNEL_VWIRE)) { LOG_ERR("bad channel vwire"); return -EINVAL; } emul = espi_emul_find(dev, EMUL_ESPI_HOST_CHIPSEL); if (!emul) { LOG_ERR("espi_emul not found"); return -ENOTSUP; } __ASSERT_NO_MSG(emul->api); api = emul->api; switch (op) { #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION case EACPI_GET_SHARED_MEMORY: __ASSERT_NO_MSG(api->get_acpi_shm); *data = (uint32_t)api->get_acpi_shm(emul->target); break; #endif default: return -EINVAL; } return 0; } static int espi_emul_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); ARG_UNUSED(op); ARG_UNUSED(data); return -EINVAL; } static int espi_emul_send_vwire(const struct device *dev, enum espi_vwire_signal vw, uint8_t level) { const struct emul_espi_device_api *api; struct espi_emul *emul; struct espi_emul_data *data = dev->data; if (!(data->cfg.channel_caps & ESPI_CHANNEL_VWIRE)) { return -EIO; } emul = espi_emul_find(dev, EMUL_ESPI_HOST_CHIPSEL); if (!emul) { LOG_DBG("espi_emul not found"); return -EIO; } __ASSERT_NO_MSG(emul->api); __ASSERT_NO_MSG(emul->api->set_vw); api = emul->api; return api->set_vw(emul->target, vw, level); } static int espi_emul_receive_vwire(const struct device *dev, enum espi_vwire_signal vw, uint8_t *level) { const struct emul_espi_device_api *api; struct espi_emul *emul; struct espi_emul_data *data = dev->data; if (!(data->cfg.channel_caps & ESPI_CHANNEL_VWIRE)) { return -EIO; } emul = espi_emul_find(dev, EMUL_ESPI_HOST_CHIPSEL); if (!emul) { LOG_INF("espi_emul not found"); return -EIO; } __ASSERT_NO_MSG(emul->api); __ASSERT_NO_MSG(emul->api->get_vw); api = emul->api; return api->get_vw(emul->target, vw, level); } static int espi_emul_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_emul_data *data = dev->data; return espi_manage_callback(&data->callbacks, callback, set); } /** * Set up a new emulator and add it to the list * * @param dev eSPI emulation controller device */ static int espi_emul_init(const struct device *dev) { struct espi_emul_data *data = dev->data; sys_slist_init(&data->emuls); return emul_init_for_bus(dev); } int espi_emul_register(const struct device *dev, struct espi_emul *emul) { struct espi_emul_data *data = dev->data; const char *name = emul->target->dev->name; sys_slist_append(&data->emuls, &emul->node); LOG_INF("Register emulator '%s' at cs %u\n", name, emul->chipsel); return 0; } /* Device instantiation */ static struct emul_espi_driver_api emul_espi_driver_api = { .espi_api = { .config = espi_emul_config, .get_channel_status = espi_emul_get_channel_status, .read_lpc_request = espi_emul_read_lpc_request, .write_lpc_request = espi_emul_write_lpc_request, .send_vwire = espi_emul_send_vwire, .receive_vwire = espi_emul_receive_vwire, .manage_callback = espi_emul_manage_callback }, .trigger_event = emul_espi_trigger_event, .find_emul = espi_emul_find, }; #define EMUL_LINK_AND_COMMA(node_id) \ { \ .dev = DEVICE_DT_GET(node_id), \ }, #define ESPI_EMUL_INIT(n) \ static const struct emul_link_for_bus emuls_##n[] = { \ DT_FOREACH_CHILD_STATUS_OKAY(DT_DRV_INST(n), EMUL_LINK_AND_COMMA)}; \ static struct emul_list_for_bus espi_emul_cfg_##n = { \ .children = emuls_##n, \ .num_children = ARRAY_SIZE(emuls_##n), \ }; \ static struct espi_emul_data espi_emul_data_##n; \ DEVICE_DT_INST_DEFINE(n, &espi_emul_init, NULL, &espi_emul_data_##n, &espi_emul_cfg_##n, \ POST_KERNEL, CONFIG_ESPI_INIT_PRIORITY, &emul_espi_driver_api); DT_INST_FOREACH_STATUS_OKAY(ESPI_EMUL_INIT) ```
/content/code_sandbox/drivers/espi/espi_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,819
```c /* * */ #define DT_DRV_COMPAT microchip_xec_espi_host_dev #include <zephyr/kernel.h> #include <soc.h> #include <errno.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h> #include <zephyr/logging/log.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #include "espi_utils.h" #include "espi_mchp_xec_v2.h" #define CONNECT_IRQ_MBOX0 NULL #define CONNECT_IRQ_KBC0 NULL #define CONNECT_IRQ_ACPI_EC0 NULL #define CONNECT_IRQ_ACPI_EC1 NULL #define CONNECT_IRQ_ACPI_EC2 NULL #define CONNECT_IRQ_ACPI_EC3 NULL #define CONNECT_IRQ_ACPI_EC4 NULL #define CONNECT_IRQ_ACPI_PM1 NULL #define CONNECT_IRQ_EMI0 NULL #define CONNECT_IRQ_EMI1 NULL #define CONNECT_IRQ_EMI2 NULL #define CONNECT_IRQ_RTC0 NULL #define CONNECT_IRQ_P80BD0 NULL #define INIT_MBOX0 NULL #define INIT_KBC0 NULL #define INIT_ACPI_EC0 NULL #define INIT_ACPI_EC1 NULL #define INIT_ACPI_EC2 NULL #define INIT_ACPI_EC3 NULL #define INIT_ACPI_EC4 NULL #define INIT_ACPI_PM1 NULL #define INIT_EMI0 NULL #define INIT_EMI1 NULL #define INIT_EMI2 NULL #define INIT_RTC0 NULL #define INIT_P80BD0 NULL #define INIT_UART0 NULL #define INIT_UART1 NULL /* BARs as defined in LPC spec chapter 11 */ #define ESPI_XEC_KBC_BAR_ADDRESS 0x00600000 #define ESPI_XEC_UART0_BAR_ADDRESS 0x03F80000 #define ESPI_XEC_MBOX_BAR_ADDRESS 0x03600000 #define ESPI_XEC_PORT80_BAR_ADDRESS 0x00800000 #define ESPI_XEC_PORT81_BAR_ADDRESS 0x00810000 #define ESPI_XEC_ACPI_EC0_BAR_ADDRESS 0x00620000 /* Espi peripheral has 3 uart ports */ #define ESPI_PERIPHERAL_UART_PORT0 0 #define ESPI_PERIPHERAL_UART_PORT1 1 #define UART_DEFAULT_IRQ_POS 2u #define UART_DEFAULT_IRQ BIT(UART_DEFAULT_IRQ_POS) /* PCR */ #define XEC_PCR_REG_BASE \ ((struct pcr_regs *)(DT_REG_ADDR(DT_NODELABEL(pcr)))) struct xec_espi_host_sram_config { uint32_t host_sram1_base; uint32_t host_sram2_base; uint16_t ec_sram1_ofs; uint16_t ec_sram2_ofs; uint8_t sram1_acc_size; uint8_t sram2_acc_size; }; struct xec_espi_host_dev_config { const struct device *parent; uint32_t reg_base; /* logical device registers */ uint32_t host_mem_base; /* 32-bit host memory address */ uint16_t host_io_base; /* 16-bit host I/O address */ uint8_t ldn; /* Logical device number */ uint8_t num_ecia; uint32_t *girqs; }; struct xec_acpi_ec_config { uintptr_t regbase; uint32_t ibf_ecia_info; uint32_t obe_ecia_info; }; #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION static uint8_t ec_host_cmd_sram[CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE + CONFIG_ESPI_XEC_PERIPHERAL_ACPI_SHD_MEM_SIZE] __aligned(8); #else static uint8_t ec_host_cmd_sram[CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE] __aligned(8); #endif #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD */ #ifdef CONFIG_ESPI_PERIPHERAL_XEC_MAILBOX BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(mbox0), okay), "XEC mbox0 DT node is disabled!"); static struct xec_mbox_config { uintptr_t regbase; uint32_t ecia_info; }; static const struct xec_mbox0_config xec_mbox0_cfg = { .regbase = DT_REG_ADDR(DT_NODELABEL(mbox0)), .ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(mbox0), girqs, 0), }; /* dev is a pointer to espi0 (parent) device */ static void mbox0_isr(const struct device *dev) { uint8_t girq = MCHP_XEC_ECIA_GIRQ(xec_mbox0_cfg.ecia_info); uint8_t bitpos = MCHP_XEC_ECIA_GIRQ_POS(xec_mbox0_cfg.ecia_info); /* clear GIRQ source, inline version */ mchp_soc_ecia_girq_src_clr(girq, bitpos); } static int connect_irq_mbox0(const struct device *dev) { /* clear GIRQ source */ mchp_xec_ecia_info_girq_src_clr(xec_mbox0_cfg.ecia_info); IRQ_CONNECT(DT_IRQN(DT_NODELABLE(mbox0)), DT_IRQ(DT_NODELABLE(mbox0), priority), acpi_ec0_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQN(DT_NODELABLE(mbox0))); /* enable GIRQ source */ mchp_xec_ecia_info_girq_src_en(xec_mbox0_cfg.ecia_info); return 0; } /* Called by eSPI Bus init, eSPI reset de-assertion, and eSPI Platform Reset * de-assertion. */ static int init_mbox0(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; regs->IOHBAR[IOB_MBOX] = ESPI_XEC_MBOX_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; return 0; } #undef CONNECT_IRQ_MBOX0 #define CONNECT_IRQ_MBOX0 connect_irq_mbox0 #undef INIT_MBOX0 #define INIT_MBOX0 init_mbox0 #endif /* CONFIG_ESPI_PERIPHERAL_XEC_MAILBOX */ #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(kbc0), okay), "XEC kbc0 DT node is disabled!"); struct xec_kbc0_config { uintptr_t regbase; uint32_t ibf_ecia_info; uint32_t obe_ecia_info; }; static const struct xec_kbc0_config xec_kbc0_cfg = { .regbase = DT_REG_ADDR(DT_NODELABEL(kbc0)), .ibf_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 1), .obe_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 0), }; static void kbc0_ibf_isr(const struct device *dev) { struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase; struct espi_xec_data *const data = (struct espi_xec_data *const)dev->data; #ifdef CONFIG_ESPI_PERIPHERAL_KBC_IBF_EVT_DATA /* Chrome solution */ struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_8042_KBC, ESPI_PERIPHERAL_NODATA, }; struct espi_evt_data_kbc *kbc_evt = (struct espi_evt_data_kbc *)&evt.evt_data; /* * Indicates if the host sent a command or data. * 0 = data * 1 = Command. */ kbc_evt->type = kbc_hw->EC_KBC_STS & MCHP_KBC_STS_CD ? 1 : 0; /* The data in KBC Input Buffer */ kbc_evt->data = kbc_hw->EC_DATA; /* KBC Input Buffer Full event */ kbc_evt->evt = HOST_KBC_EVT_IBF; #else /* Windows solution */ /* The high byte contains information from the host, * and the lower byte speficies if the host sent * a command or data. 1 = Command. */ uint32_t isr_data = ((kbc_hw->EC_KBC_STS & MCHP_KBC_STS_CD) << E8042_ISR_CMD_DATA_POS); isr_data |= ((kbc_hw->EC_DATA & 0xFF) << E8042_ISR_DATA_POS); struct espi_event evt = { .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION, .evt_details = ESPI_PERIPHERAL_8042_KBC, .evt_data = isr_data }; #endif espi_send_callbacks(&data->callbacks, dev, evt); mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.ibf_ecia_info); } static void kbc0_obe_isr(const struct device *dev) { #ifdef CONFIG_ESPI_PERIPHERAL_KBC_OBE_CBK /* Chrome solution */ struct espi_xec_data *const data = (struct espi_xec_data *const)dev->data; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_8042_KBC, ESPI_PERIPHERAL_NODATA, }; struct espi_evt_data_kbc *kbc_evt = (struct espi_evt_data_kbc *)&evt.evt_data; /* Disable KBC OBE interrupt first */ mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.obe_ecia_info); /* * Notify application that host already read out data. The application * might need to clear status register via espi_api_lpc_write_request() * with E8042_CLEAR_FLAG opcode in callback. */ kbc_evt->evt = HOST_KBC_EVT_OBE; kbc_evt->data = 0; kbc_evt->type = 0; espi_send_callbacks(&data->callbacks, dev, evt); #else /* Windows solution */ /* disable and clear GIRQ interrupt and status */ mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.obe_ecia_info); #endif mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.obe_ecia_info); } /* dev is a pointer to espi0 device */ static int kbc0_rd_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase; ARG_UNUSED(dev); if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { /* Make sure kbc 8042 is on */ if (!(kbc_hw->KBC_CTRL & MCHP_KBC_CTRL_OBFEN)) { return -ENOTSUP; } switch (op) { case E8042_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = kbc_hw->EC_KBC_STS & MCHP_KBC_STS_OBF ? 1 : 0; break; case E8042_IBF_HAS_CHAR: *data = kbc_hw->EC_KBC_STS & MCHP_KBC_STS_IBF ? 1 : 0; break; case E8042_READ_KB_STS: *data = kbc_hw->EC_KBC_STS; break; default: return -EINVAL; } } else { return -ENOTSUP; } return 0; } /* dev is a pointer to espi0 device */ static int kbc0_wr_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase; volatile uint32_t __attribute__((unused)) dummy; ARG_UNUSED(dev); if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { /* Make sure kbc 8042 is on */ if (!(kbc_hw->KBC_CTRL & MCHP_KBC_CTRL_OBFEN)) { return -ENOTSUP; } switch (op) { case E8042_WRITE_KB_CHAR: kbc_hw->EC_DATA = *data & 0xff; break; case E8042_WRITE_MB_CHAR: kbc_hw->EC_AUX_DATA = *data & 0xff; break; case E8042_RESUME_IRQ: mchp_xec_ecia_info_girq_src_clr( xec_kbc0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_en( xec_kbc0_cfg.ibf_ecia_info); break; case E8042_PAUSE_IRQ: mchp_xec_ecia_info_girq_src_dis( xec_kbc0_cfg.ibf_ecia_info); break; case E8042_CLEAR_OBF: dummy = kbc_hw->HOST_AUX_DATA; break; case E8042_SET_FLAG: /* FW shouldn't modify these flags directly */ *data &= ~(MCHP_KBC_STS_OBF | MCHP_KBC_STS_IBF | MCHP_KBC_STS_AUXOBF); kbc_hw->EC_KBC_STS |= *data; break; case E8042_CLEAR_FLAG: /* FW shouldn't modify these flags directly */ *data |= (MCHP_KBC_STS_OBF | MCHP_KBC_STS_IBF | MCHP_KBC_STS_AUXOBF); kbc_hw->EC_KBC_STS &= ~(*data); break; default: return -EINVAL; } } else { return -ENOTSUP; } return 0; } static int connect_irq_kbc0(const struct device *dev) { /* clear GIRQ source */ mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.obe_ecia_info); IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_ibf, irq), DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_ibf, priority), kbc0_ibf_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_ibf, irq)); IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_obe, irq), DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_obe, priority), kbc0_obe_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_obe, irq)); /* enable GIRQ sources */ mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.obe_ecia_info); return 0; } static int init_kbc0(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase; kbc_hw->KBC_CTRL |= MCHP_KBC_CTRL_AUXH; kbc_hw->KBC_CTRL |= MCHP_KBC_CTRL_OBFEN; /* This is the activate register, but the HAL has a funny name */ kbc_hw->KBC_PORT92_EN = MCHP_KBC_PORT92_EN; regs->IOHBAR[IOB_KBC] = ESPI_XEC_KBC_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; return 0; } #undef CONNECT_IRQ_KBC0 #define CONNECT_IRQ_KBC0 connect_irq_kbc0 #undef INIT_KBC0 #define INIT_KBC0 init_kbc0 #endif /* CONFIG_ESPI_PERIPHERAL_8042_KBC */ #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO static const struct xec_acpi_ec_config xec_acpi_ec0_cfg = { .regbase = DT_REG_ADDR(DT_NODELABEL(acpi_ec0)), .ibf_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec0), girqs, 0), .obe_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec0), girqs, 1), }; static void acpi_ec0_ibf_isr(const struct device *dev) { struct espi_xec_data *const data = (struct espi_xec_data *const)dev->data; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_HOST_IO, ESPI_PERIPHERAL_NODATA }; #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA struct acpi_ec_regs *acpi_ec0_hw = (struct acpi_ec_regs *)xec_acpi_ec0_cfg.regbase; /* Updates to fit Chrome shim layer design */ struct espi_evt_data_acpi *acpi_evt = (struct espi_evt_data_acpi *)&evt.evt_data; /* Host put data on input buffer of ACPI EC0 channel */ if (acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_IBF) { /* Set processing flag before reading command byte */ acpi_ec0_hw->EC_STS |= MCHP_ACPI_EC_STS_UD1A; /* * Indicates if the host sent a command or data. * 0 = data * 1 = Command. */ acpi_evt->type = acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_CMD ? 1 : 0; acpi_evt->data = acpi_ec0_hw->OS2EC_DATA; } #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA */ espi_send_callbacks(&data->callbacks, dev, evt); /* clear GIRQ status */ mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.ibf_ecia_info); } static void acpi_ec0_obe_isr(const struct device *dev) { /* disable and clear GIRQ status */ mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec0_cfg.obe_ecia_info); mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.obe_ecia_info); } static int eacpi_rd_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { struct acpi_ec_regs *acpi_ec0_hw = (struct acpi_ec_regs *)xec_acpi_ec0_cfg.regbase; ARG_UNUSED(dev); switch (op) { case EACPI_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_OBF ? 1 : 0; break; case EACPI_IBF_HAS_CHAR: *data = acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_IBF ? 1 : 0; break; case EACPI_READ_STS: *data = acpi_ec0_hw->EC_STS; break; #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) case EACPI_GET_SHARED_MEMORY: *data = (uint32_t)ec_host_cmd_sram + CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE; break; #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */ default: return -EINVAL; } return 0; } static int eacpi_wr_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { struct acpi_ec_regs *acpi_ec0_hw = (struct acpi_ec_regs *)xec_acpi_ec0_cfg.regbase; ARG_UNUSED(dev); switch (op) { case EACPI_WRITE_CHAR: acpi_ec0_hw->EC2OS_DATA = (*data & 0xff); break; case EACPI_WRITE_STS: acpi_ec0_hw->EC_STS = (*data & 0xff); break; default: return -EINVAL; } return 0; } static int connect_irq_acpi_ec0(const struct device *dev) { mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.obe_ecia_info); IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_ibf, irq), DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_ibf, priority), acpi_ec0_ibf_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_ibf, irq)); IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_obe, irq), DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_obe, priority), acpi_ec0_obe_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_obe, irq)); mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.obe_ecia_info); return 0; } static int init_acpi_ec0(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; regs->IOHBAR[IOB_ACPI_EC0] = ESPI_XEC_ACPI_EC0_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; return 0; } #undef CONNECT_IRQ_ACPI_EC0 #define CONNECT_IRQ_ACPI_EC0 connect_irq_acpi_ec0 #undef INIT_ACPI_EC0 #define INIT_ACPI_EC0 init_acpi_ec0 #endif /* CONFIG_ESPI_PERIPHERAL_HOST_IO */ #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \ defined(CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT) static const struct xec_acpi_ec_config xec_acpi_ec1_cfg = { .regbase = DT_REG_ADDR(DT_NODELABEL(acpi_ec1)), .ibf_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec1), girqs, 0), .obe_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec1), girqs, 1), }; static void acpi_ec1_ibf_isr(const struct device *dev) { struct espi_xec_data *const data = (struct espi_xec_data *const)dev->data; struct espi_event evt = { .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION, #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD .evt_details = ESPI_PERIPHERAL_EC_HOST_CMD, #else .evt_details = ESPI_PERIPHERAL_HOST_IO_PVT, #endif .evt_data = ESPI_PERIPHERAL_NODATA }; #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA struct acpi_ec_regs *acpi_ec1_hw = (struct acpi_ec_regs *)xec_acpi_ec1_cfg.regbase; /* Updates to fit Chrome shim layer design. * Host put data on input buffer of ACPI EC1 channel. */ if (acpi_ec1_hw->EC_STS & MCHP_ACPI_EC_STS_IBF) { /* Set processing flag before reading command byte */ acpi_ec1_hw->EC_STS |= MCHP_ACPI_EC_STS_UD1A; /* Read out input data and clear IBF pending bit */ evt.evt_data = acpi_ec1_hw->OS2EC_DATA; } #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA */ espi_send_callbacks(&data->callbacks, dev, evt); /* clear GIRQ status */ mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.ibf_ecia_info); } static void acpi_ec1_obe_isr(const struct device *dev) { /* disable and clear GIRQ status */ mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec1_cfg.obe_ecia_info); mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.obe_ecia_info); } static int connect_irq_acpi_ec1(const struct device *dev) { mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.obe_ecia_info); IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_ibf, irq), DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_ibf, priority), acpi_ec1_ibf_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_ibf, irq)); IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_obe, irq), DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_obe, priority), acpi_ec1_obe_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_obe, irq)); mchp_xec_ecia_info_girq_src_en(xec_acpi_ec1_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_en(xec_acpi_ec1_cfg.obe_ecia_info); return 0; } static int init_acpi_ec1(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD regs->IOHBAR[IOB_ACPI_EC1] = (CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM << 16) | MCHP_ESPI_IO_BAR_HOST_VALID; #else regs->IOHBAR[IOB_ACPI_EC1] = CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT_PORT_NUM | MCHP_ESPI_IO_BAR_HOST_VALID; regs->IOHBAR[IOB_MBOX] = ESPI_XEC_MBOX_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; #endif return 0; } #undef CONNECT_IRQ_ACPI_EC1 #define CONNECT_IRQ_ACPI_EC1 connect_irq_acpi_ec1 #undef INIT_ACPI_EC1 #define INIT_ACPI_EC1 init_acpi_ec1 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD || CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT */ #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(emi0), okay), "XEC EMI0 DT node is disabled!"); struct xec_emi_config { uintptr_t regbase; }; static const struct xec_emi_config xec_emi0_cfg = { .regbase = DT_REG_ADDR(DT_NODELABEL(emi0)), }; static int init_emi0(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; struct emi_regs *emi_hw = (struct emi_regs *)xec_emi0_cfg.regbase; regs->IOHBAR[IOB_EMI0] = (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM << 16) | MCHP_ESPI_IO_BAR_HOST_VALID; emi_hw->MEM_BA_0 = (uint32_t)ec_host_cmd_sram; #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION emi_hw->MEM_RL_0 = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE + CONFIG_ESPI_XEC_PERIPHERAL_ACPI_SHD_MEM_SIZE; #else emi_hw->MEM_RL_0 = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE; #endif emi_hw->MEM_WL_0 = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE; return 0; } #undef INIT_EMI0 #define INIT_EMI0 init_emi0 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD */ #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE static void host_cus_opcode_enable_interrupts(void); static void host_cus_opcode_disable_interrupts(void); static int ecust_rd_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); switch (op) { #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY: *data = (uint32_t)ec_host_cmd_sram; break; case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY_SIZE: *data = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE; break; #endif default: return -EINVAL; } return 0; } static int ecust_wr_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { struct acpi_ec_regs *acpi_ec1_hw = (struct acpi_ec_regs *)xec_acpi_ec1_cfg.regbase; ARG_UNUSED(dev); switch (op) { case ECUSTOM_HOST_SUBS_INTERRUPT_EN: if (*data != 0) { host_cus_opcode_enable_interrupts(); } else { host_cus_opcode_disable_interrupts(); } break; case ECUSTOM_HOST_CMD_SEND_RESULT: /* * Write result to the data byte. This sets the OBF * status bit. */ acpi_ec1_hw->EC2OS_DATA = (*data & 0xff); /* Clear processing flag */ acpi_ec1_hw->EC_STS &= ~MCHP_ACPI_EC_STS_UD1A; break; default: return -EINVAL; } return 0; } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) && \ defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) static int eacpi_shm_rd_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); switch (op) { case EACPI_GET_SHARED_MEMORY: *data = (uint32_t)&ec_host_cmd_sram[CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE]; break; default: return -EINVAL; } return 0; } static int eacpi_shm_wr_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); ARG_UNUSED(op); ARG_UNUSED(data); return -EINVAL; } #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */ #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 struct xec_p80bd_config { uintptr_t regbase; uint32_t ecia_info; }; static const struct xec_p80bd_config xec_p80bd0_cfg = { .regbase = DT_REG_ADDR(DT_NODELABEL(p80bd0)), .ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(p80bd0), girqs, 0), }; /* * MEC172x P80 BIOS Debug Port hardware captures writes to its 4-byte I/O range * Hardware provides status indicating byte lane(s) of each write. * We must decode the byte lane information and produce one or more * notification packets. */ static void p80bd0_isr(const struct device *dev) { struct espi_xec_data *const data = (struct espi_xec_data *const)dev->data; struct p80bd_regs *p80regs = (struct p80bd_regs *)xec_p80bd0_cfg.regbase; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, 0, ESPI_PERIPHERAL_NODATA }; int count = 8; /* limit ISR to 8 bytes */ uint32_t dattr = p80regs->EC_DA; /* b[7:0]=8-bit value written, b[15:8]=attributes */ while ((dattr & MCHP_P80BD_ECDA_NE) && (count--)) { /* Not empty? */ /* espi_event protocol No Data value is 0 so pick a bit and * set it. This depends on the application. */ evt.evt_data = (dattr & 0xffu) | BIT(16); switch (dattr & MCHP_P80BD_ECDA_LANE_MSK) { case MCHP_P80BD_ECDA_LANE_0: evt.evt_details |= (ESPI_PERIPHERAL_INDEX_0 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80; break; case MCHP_P80BD_ECDA_LANE_1: evt.evt_details |= (ESPI_PERIPHERAL_INDEX_1 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80; break; case MCHP_P80BD_ECDA_LANE_2: break; case MCHP_P80BD_ECDA_LANE_3: break; default: break; } if (evt.evt_details) { espi_send_callbacks(&data->callbacks, dev, evt); evt.evt_details = 0; } } /* clear GIRQ status */ mchp_xec_ecia_info_girq_src_clr(xec_p80bd0_cfg.ecia_info); } static int connect_irq_p80bd0(const struct device *dev) { mchp_xec_ecia_info_girq_src_clr(xec_p80bd0_cfg.ecia_info); IRQ_CONNECT(DT_IRQN(DT_NODELABEL(p80bd0)), DT_IRQ(DT_NODELABEL(acpi_ec1), priority), p80bd0_isr, DEVICE_DT_GET(DT_NODELABEL(espi0)), 0); irq_enable(DT_IRQN(DT_NODELABEL(p80bd0))); mchp_xec_ecia_info_girq_src_en(xec_p80bd0_cfg.ecia_info); return 0; } static int init_p80bd0(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; struct p80bd_regs *p80bd_hw = (struct p80bd_regs *)xec_p80bd0_cfg.regbase; regs->IOHBAR[IOB_P80BD] = ESPI_XEC_PORT80_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; p80bd_hw->ACTV = 1; p80bd_hw->STS_IEN = MCHP_P80BD_SI_THR_IEN; return 0; } #undef CONNECT_IRQ_P80BD0 #define CONNECT_IRQ_P80BD0 connect_irq_p80bd0 #undef INIT_P80BD0 #define INIT_P80BD0 init_p80bd0 #endif /* CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 */ #ifdef CONFIG_ESPI_PERIPHERAL_UART #if CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING == 0 int init_uart0(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; regs->IOHBAR[IOB_UART0] = ESPI_XEC_UART0_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; return 0; } #undef INIT_UART0 #define INIT_UART0 init_uart0 #elif CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING == 1 int init_uart1(const struct device *dev) { struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev); struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr; regs->IOHBAR[IOB_UART1] = ESPI_XEC_UART0_BAR_ADDRESS | MCHP_ESPI_IO_BAR_HOST_VALID; return 0; } #undef INIT_UART1 #define INIT_UART1 init_uart1 #endif /* CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING */ #endif /* CONFIG_ESPI_PERIPHERAL_UART */ typedef int (*host_dev_irq_connect)(const struct device *dev); static const host_dev_irq_connect hdic_tbl[] = { CONNECT_IRQ_MBOX0, CONNECT_IRQ_KBC0, CONNECT_IRQ_ACPI_EC0, CONNECT_IRQ_ACPI_EC1, CONNECT_IRQ_ACPI_EC2, CONNECT_IRQ_ACPI_EC3, CONNECT_IRQ_ACPI_EC4, CONNECT_IRQ_ACPI_PM1, CONNECT_IRQ_EMI0, CONNECT_IRQ_EMI1, CONNECT_IRQ_EMI2, CONNECT_IRQ_RTC0, CONNECT_IRQ_P80BD0, }; typedef int (*host_dev_init)(const struct device *dev); static const host_dev_init hd_init_tbl[] = { INIT_MBOX0, INIT_KBC0, INIT_ACPI_EC0, INIT_ACPI_EC1, INIT_ACPI_EC2, INIT_ACPI_EC3, INIT_ACPI_EC4, INIT_ACPI_PM1, INIT_EMI0, INIT_EMI1, INIT_EMI2, INIT_RTC0, INIT_P80BD0, INIT_UART0, INIT_UART1, }; int xec_host_dev_connect_irqs(const struct device *dev) { int ret = 0; for (int i = 0; i < ARRAY_SIZE(hdic_tbl); i++) { if (hdic_tbl[i] == NULL) { continue; } ret = hdic_tbl[i](dev); if (ret < 0) { break; } } return ret; } int xec_host_dev_init(const struct device *dev) { int ret = 0; for (int i = 0; i < ARRAY_SIZE(hd_init_tbl); i++) { if (hd_init_tbl[i] == NULL) { continue; } ret = hd_init_tbl[i](dev); if (ret < 0) { break; } } return ret; } #ifdef CONFIG_ESPI_PERIPHERAL_CHANNEL typedef int (*xec_lpc_req)(const struct device *, enum lpc_peripheral_opcode, uint32_t *); struct espi_lpc_req { uint16_t opcode_start; uint16_t opcode_max; xec_lpc_req rd_req; xec_lpc_req wr_req; }; static const struct espi_lpc_req espi_lpc_req_tbl[] = { #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC { E8042_START_OPCODE, E8042_MAX_OPCODE, kbc0_rd_req, kbc0_wr_req }, #endif #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO { EACPI_START_OPCODE, EACPI_MAX_OPCODE, eacpi_rd_req, eacpi_wr_req }, #endif #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) && \ defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) { EACPI_GET_SHARED_MEMORY, EACPI_GET_SHARED_MEMORY, eacpi_shm_rd_req, eacpi_shm_wr_req}, #endif #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE { ECUSTOM_START_OPCODE, ECUSTOM_MAX_OPCODE, ecust_rd_req, ecust_wr_req}, #endif }; static int espi_xec_lpc_req(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data, uint8_t write) { ARG_UNUSED(dev); for (int i = 0; i < ARRAY_SIZE(espi_lpc_req_tbl); i++) { const struct espi_lpc_req *req = &espi_lpc_req_tbl[i]; if ((op >= req->opcode_start) && (op <= req->opcode_max)) { if (write) { return req->wr_req(dev, op, data); } else { return req->rd_req(dev, op, data); } } } return -ENOTSUP; } /* dev = pointer to espi0 device */ int espi_xec_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { return espi_xec_lpc_req(dev, op, data, 0); } int espi_xec_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { return espi_xec_lpc_req(dev, op, data, 1); } #else int espi_xec_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); ARG_UNUSED(op); ARG_UNUSED(data); return -ENOTSUP; } int espi_xec_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { ARG_UNUSED(dev); ARG_UNUSED(op); ARG_UNUSED(data); return -ENOTSUP; } #endif /* CONFIG_ESPI_PERIPHERAL_CHANNEL */ #if defined(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE) static void host_cus_opcode_enable_interrupts(void) { /* Enable host KBC sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) { mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.obe_ecia_info); } /* Enable host ACPI EC0 (Host IO) and * ACPI EC1 (Host CMD) sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO) || IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) { mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.obe_ecia_info); mchp_xec_ecia_info_girq_src_en(xec_acpi_ec1_cfg.ibf_ecia_info); } /* Enable host Port80 sub-device interrupt installation */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80)) { mchp_xec_ecia_info_girq_src_en(xec_p80bd0_cfg.ecia_info); } } static void host_cus_opcode_disable_interrupts(void) { /* Disable host KBC sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) { mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.obe_ecia_info); } /* Disable host ACPI EC0 (Host IO) and * ACPI EC1 (Host CMD) sub-device interrupt */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO) || IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) { mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec0_cfg.ibf_ecia_info); mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec0_cfg.obe_ecia_info); mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec1_cfg.ibf_ecia_info); } /* Disable host Port80 sub-device interrupt installation */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80)) { mchp_xec_ecia_info_girq_src_dis(xec_p80bd0_cfg.ecia_info); } } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ ```
/content/code_sandbox/drivers/espi/espi_mchp_xec_host_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,068
```c /* * * * This driver creates fake MSPI buses which can contain emulated devices, * implemented by separate emulation drivers. * The API between this driver and its emulators is defined by * struct mspi_emul_driver_api. */ #define DT_DRV_COMPAT zephyr_mspi_emul_controller #define LOG_LEVEL CONFIG_MSPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mspi_emul_controller); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/mspi.h> #include <zephyr/drivers/mspi_emul.h> #define MSPI_MAX_FREQ 250000000 #define MSPI_MAX_DEVICE 2 #define MSPI_TIMEOUT_US 1000000 #define EMUL_MSPI_INST_ID 0 struct mspi_emul_context { /* the request entity currently owns the lock */ const struct mspi_dev_id *owner; /* the current transfer context */ struct mspi_xfer xfer; /* the transfer controls */ bool asynchronous; int packets_done; /* the transfer callback and callback context */ mspi_callback_handler_t callback; struct mspi_callback_context *callback_ctx; /** the transfer lock */ struct k_sem lock; }; struct mspi_emul_data { /* List of struct mspi_emul associated with the device */ sys_slist_t emuls; /* common mspi hardware configurations */ struct mspi_cfg mspicfg; /* device id of the current device occupied the bus */ const struct mspi_dev_id *dev_id; /* controller access mutex */ struct k_mutex lock; /* device specific hardware settings */ struct mspi_dev_cfg dev_cfg; /* XIP configurations */ struct mspi_xip_cfg xip_cfg; /* scrambling configurations */ struct mspi_scramble_cfg scramble_cfg; /* Timing configurations */ struct mspi_timing_cfg timing_cfg; /* local storage of mspi callback hanlder */ mspi_callback_handler_t cbs[MSPI_BUS_EVENT_MAX]; /* local storage of mspi callback context */ struct mspi_callback_context *cb_ctxs[MSPI_BUS_EVENT_MAX]; /* local mspi context */ struct mspi_emul_context ctx; }; /** * Verify if the device with dev_id is on this MSPI bus. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @return 0 The device is on this MSPI bus. * @return -ENODEV The device is not on this MSPI bus. */ static inline int mspi_verify_device(const struct device *controller, const struct mspi_dev_id *dev_id) { const struct mspi_emul_data *data = controller->data; int device_index = data->mspicfg.num_periph; int ret = 0; if (data->mspicfg.num_ce_gpios != 0) { for (int i = 0; i < data->mspicfg.num_periph; i++) { if (dev_id->ce.port == data->mspicfg.ce_group[i].port && dev_id->ce.pin == data->mspicfg.ce_group[i].pin && dev_id->ce.dt_flags == data->mspicfg.ce_group[i].dt_flags) { device_index = i; } } if (device_index >= data->mspicfg.num_periph || device_index != dev_id->dev_idx) { LOG_ERR("%u, invalid device ID.", __LINE__); return -ENODEV; } } else { if (dev_id->dev_idx >= data->mspicfg.num_periph) { LOG_ERR("%u, invalid device ID.", __LINE__); return -ENODEV; } } return ret; } /** * Check if the MSPI bus is busy. * * @param controller MSPI emulation controller device. * @return true The MSPI bus is busy. * @return false The MSPI bus is idle. */ static inline bool mspi_is_inp(const struct device *controller) { struct mspi_emul_data *data = controller->data; return (k_sem_count_get(&data->ctx.lock) == 0); } /** * Lock MSPI context. * * @param ctx Pointer to the MSPI context. * @param req Pointer to the request entity represented by mspi_dev_id. * @param xfer Pointer to the MSPI transfer started by req. * @param callback MSPI call back function pointer. * @param callback_ctx Pointer to the mspi callback context. * @return 0 if allowed for hardware configuration. * @return 1 if not allowed for hardware configuration. */ static inline int mspi_context_lock(struct mspi_emul_context *ctx, const struct mspi_dev_id *req, const struct mspi_xfer *xfer, mspi_callback_handler_t callback, struct mspi_callback_context *callback_ctx) { int ret = 0; if (k_sem_take(&ctx->lock, K_MSEC(xfer->timeout))) { return ret; } if (ctx->callback) { if ((xfer->tx_dummy == ctx->xfer.tx_dummy) && (xfer->rx_dummy == ctx->xfer.rx_dummy) && (xfer->cmd_length == ctx->xfer.cmd_length) && (xfer->addr_length == ctx->xfer.addr_length)) { ret = 1; } else { ret = 0; } } ctx->owner = req; ctx->xfer = *xfer; ctx->packets_done = 0; ctx->asynchronous = ctx->xfer.async; ctx->callback = callback; ctx->callback_ctx = callback_ctx; return ret; } /** * release MSPI context. * * @param ctx Pointer to the MSPI context. */ static inline void mspi_context_release(struct mspi_emul_context *ctx) { ctx->owner = NULL; k_sem_give(&ctx->lock); } /** * Configure hardware before a transfer. * * @param controller Pointer to the MSPI controller instance. * @param xfer Pointer to the MSPI transfer started by the request entity. * @return 0 if successful. */ static int mspi_xfer_config(const struct device *controller, const struct mspi_xfer *xfer) { struct mspi_emul_data *data = controller->data; data->dev_cfg.cmd_length = xfer->cmd_length; data->dev_cfg.addr_length = xfer->addr_length; data->dev_cfg.tx_dummy = xfer->tx_dummy; data->dev_cfg.rx_dummy = xfer->rx_dummy; return 0; } /** * Check and save dev_cfg to controller data->dev_cfg. * * @param controller Pointer to the device structure for the driver instance. * @param param_mask Macro definition of what to be configured in cfg. * @param dev_cfg The device runtime configuration for the MSPI controller. * @return 0 MSPI device configuration successful. * @return -Error MSPI device configuration fail. */ static inline int mspi_dev_cfg_check_save(const struct device *controller, const enum mspi_dev_cfg_mask param_mask, const struct mspi_dev_cfg *dev_cfg) { struct mspi_emul_data *data = controller->data; if (param_mask & MSPI_DEVICE_CONFIG_CE_NUM) { data->dev_cfg.ce_num = dev_cfg->ce_num; } if (param_mask & MSPI_DEVICE_CONFIG_FREQUENCY) { if (dev_cfg->freq > MSPI_MAX_FREQ) { LOG_ERR("%u, freq is too large.", __LINE__); return -ENOTSUP; } data->dev_cfg.freq = dev_cfg->freq; } if (param_mask & MSPI_DEVICE_CONFIG_IO_MODE) { if (dev_cfg->io_mode >= MSPI_IO_MODE_MAX) { LOG_ERR("%u, Invalid io_mode.", __LINE__); return -EINVAL; } data->dev_cfg.io_mode = dev_cfg->io_mode; } if (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE) { if (dev_cfg->data_rate >= MSPI_DATA_RATE_MAX) { LOG_ERR("%u, Invalid data_rate.", __LINE__); return -EINVAL; } data->dev_cfg.data_rate = dev_cfg->data_rate; } if (param_mask & MSPI_DEVICE_CONFIG_CPP) { if (dev_cfg->cpp > MSPI_CPP_MODE_3) { LOG_ERR("%u, Invalid cpp.", __LINE__); return -EINVAL; } data->dev_cfg.cpp = dev_cfg->cpp; } if (param_mask & MSPI_DEVICE_CONFIG_ENDIAN) { if (dev_cfg->endian > MSPI_XFER_BIG_ENDIAN) { LOG_ERR("%u, Invalid endian.", __LINE__); return -EINVAL; } data->dev_cfg.endian = dev_cfg->endian; } if (param_mask & MSPI_DEVICE_CONFIG_CE_POL) { if (dev_cfg->ce_polarity > MSPI_CE_ACTIVE_HIGH) { LOG_ERR("%u, Invalid ce_polarity.", __LINE__); return -EINVAL; } data->dev_cfg.ce_polarity = dev_cfg->ce_polarity; } if (param_mask & MSPI_DEVICE_CONFIG_DQS) { if (dev_cfg->dqs_enable && !data->mspicfg.dqs_support) { LOG_ERR("%u, DQS mode not supported.", __LINE__); return -ENOTSUP; } data->dev_cfg.dqs_enable = dev_cfg->dqs_enable; } if (param_mask & MSPI_DEVICE_CONFIG_RX_DUMMY) { data->dev_cfg.rx_dummy = dev_cfg->rx_dummy; } if (param_mask & MSPI_DEVICE_CONFIG_TX_DUMMY) { data->dev_cfg.tx_dummy = dev_cfg->tx_dummy; } if (param_mask & MSPI_DEVICE_CONFIG_READ_CMD) { data->dev_cfg.read_cmd = dev_cfg->read_cmd; } if (param_mask & MSPI_DEVICE_CONFIG_WRITE_CMD) { data->dev_cfg.write_cmd = dev_cfg->write_cmd; } if (param_mask & MSPI_DEVICE_CONFIG_CMD_LEN) { data->dev_cfg.cmd_length = dev_cfg->cmd_length; } if (param_mask & MSPI_DEVICE_CONFIG_ADDR_LEN) { data->dev_cfg.addr_length = dev_cfg->addr_length; } if (param_mask & MSPI_DEVICE_CONFIG_MEM_BOUND) { data->dev_cfg.mem_boundary = dev_cfg->mem_boundary; } if (param_mask & MSPI_DEVICE_CONFIG_BREAK_TIME) { data->dev_cfg.time_to_break = dev_cfg->time_to_break; } return 0; } /** * Check the transfer context from the request entity. * * @param xfer Pointer to the MSPI transfer started by the request entity. * @return 0 if successful. * @return -EINVAL invalid parameter detected. */ static inline int mspi_xfer_check(const struct mspi_xfer *xfer) { if (xfer->xfer_mode > MSPI_DMA) { LOG_ERR("%u, Invalid xfer xfer_mode.", __LINE__); return -EINVAL; } if (!xfer->packets || !xfer->num_packet) { LOG_ERR("%u, Invalid xfer payload.", __LINE__); return -EINVAL; } for (int i = 0; i < xfer->num_packet; ++i) { if (!xfer->packets[i].data_buf || !xfer->packets[i].num_bytes) { LOG_ERR("%u, Invalid xfer payload num: %u.", __LINE__, i); return -EINVAL; } if (xfer->packets[i].dir > MSPI_TX) { LOG_ERR("%u, Invalid xfer direction.", __LINE__); return -EINVAL; } if (xfer->packets[i].cb_mask > MSPI_BUS_XFER_COMPLETE_CB) { LOG_ERR("%u, Invalid xfer cb_mask.", __LINE__); return -EINVAL; } } return 0; } /** * find_emul API implementation. * * @param controller Pointer to MSPI controller instance. * @param dev_idx The device index of a mspi_emul. * @return Pointer to a mspi_emul entity if successful. * @return NULL if mspi_emul entity not found. */ static struct mspi_emul *mspi_emul_find(const struct device *controller, uint16_t dev_idx) { struct mspi_emul_data *data = controller->data; sys_snode_t *node; SYS_SLIST_FOR_EACH_NODE(&data->emuls, node) { struct mspi_emul *emul; emul = CONTAINER_OF(node, struct mspi_emul, node); if (emul->dev_idx == dev_idx) { return emul; } } return NULL; } /** * trigger_event API implementation. * * @param controller Pointer to MSPI controller instance. * @param evt_type The bus event to trigger * @return 0 if successful. */ static int emul_mspi_trigger_event(const struct device *controller, enum mspi_bus_event evt_type) { struct mspi_emul_data *data = controller->data; struct mspi_emul_context *ctx = &data->ctx; mspi_callback_handler_t cb; struct mspi_callback_context *cb_context; if (evt_type == MSPI_BUS_XFER_COMPLETE) { if (ctx->callback && ctx->callback_ctx) { struct mspi_event *evt = &ctx->callback_ctx->mspi_evt; const struct mspi_xfer_packet *packet; packet = &ctx->xfer.packets[ctx->packets_done]; evt->evt_type = MSPI_BUS_XFER_COMPLETE; evt->evt_data.controller = controller; evt->evt_data.dev_id = ctx->owner; evt->evt_data.packet = packet; evt->evt_data.packet_idx = ctx->packets_done; ctx->packets_done++; if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { cb = ctx->callback; cb_context = ctx->callback_ctx; cb(cb_context); } } else { LOG_WRN("%u, MSPI_BUS_XFER_COMPLETE callback not registered.", __LINE__); } } else { cb = data->cbs[evt_type]; cb_context = data->cb_ctxs[evt_type]; if (cb) { cb(cb_context); } else { LOG_ERR("%u, mspi callback type %u not registered.", __LINE__, evt_type); return -EINVAL; } } return 0; } /** * API implementation of mspi_config. * * @param spec Pointer to MSPI device tree spec. * @return 0 if successful. * @return -Error if fail. */ static int mspi_emul_config(const struct mspi_dt_spec *spec) { const struct mspi_cfg *config = &spec->config; struct mspi_emul_data *data = spec->bus->data; int ret = 0; if (config->op_mode > MSPI_OP_MODE_PERIPHERAL) { LOG_ERR("%u, Invalid MSPI OP mode.", __LINE__); return -EINVAL; } if (config->max_freq > MSPI_MAX_FREQ) { LOG_ERR("%u, Invalid MSPI Frequency", __LINE__); return -ENOTSUP; } if (config->duplex > MSPI_FULL_DUPLEX) { LOG_ERR("%u, Invalid MSPI duplexity.", __LINE__); return -EINVAL; } if (config->num_periph > MSPI_MAX_DEVICE) { LOG_ERR("%u, Invalid MSPI peripheral number.", __LINE__); return -ENOTSUP; } if (config->num_ce_gpios != 0 && config->num_ce_gpios != config->num_periph) { LOG_ERR("%u, Invalid number of ce_gpios.", __LINE__); return -EINVAL; } if (config->re_init) { if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) { LOG_ERR("%u, Failed to access controller.", __LINE__); return -EBUSY; } while (mspi_is_inp(spec->bus)) { } } /* emulate controller hardware initialization */ k_busy_wait(10); if (!k_sem_count_get(&data->ctx.lock)) { data->ctx.owner = NULL; k_sem_give(&data->ctx.lock); } if (config->re_init) { k_mutex_unlock(&data->lock); } data->mspicfg = *config; return ret; } /** * API implementation of mspi_dev_config. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @param param_mask Macro definition of what to be configured in cfg. * @param dev_cfg The device runtime configuration for the MSPI controller. * * @retval 0 if successful. * @retval -EINVAL invalid capabilities, failed to configure device. * @retval -ENOTSUP capability not supported by MSPI peripheral. */ static int mspi_emul_dev_config(const struct device *controller, const struct mspi_dev_id *dev_id, const enum mspi_dev_cfg_mask param_mask, const struct mspi_dev_cfg *dev_cfg) { struct mspi_emul_data *data = controller->data; int ret = 0; if (data->dev_id != dev_id) { if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) { LOG_ERR("%u, Failed to access controller.", __LINE__); return -EBUSY; } ret = mspi_verify_device(controller, dev_id); if (ret) { goto e_return; } } while (mspi_is_inp(controller)) { } if (param_mask == MSPI_DEVICE_CONFIG_NONE && !data->mspicfg.sw_multi_periph) { /* Do nothing except obtaining the controller lock */ } else if (param_mask < MSPI_DEVICE_CONFIG_ALL) { if (data->dev_id != dev_id) { /* MSPI_DEVICE_CONFIG_ALL should be used */ LOG_ERR("%u, config failed, must be the same device.", __LINE__); ret = -ENOTSUP; goto e_return; } ret = mspi_dev_cfg_check_save(controller, param_mask, dev_cfg); if (ret) { goto e_return; } } else if (param_mask == MSPI_DEVICE_CONFIG_ALL) { ret = mspi_dev_cfg_check_save(controller, param_mask, dev_cfg); if (ret) { goto e_return; } if (data->dev_id != dev_id) { /* Conduct device switching */ } } else { LOG_ERR("%u, Invalid param_mask.", __LINE__); ret = -EINVAL; goto e_return; } data->dev_id = dev_id; return ret; e_return: k_mutex_unlock(&data->lock); return ret; } /** * API implementation of mspi_xip_config. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @param xip_cfg The controller XIP configuration for MSPI. * * @retval 0 if successful. * @retval -ESTALE device ID don't match, need to call mspi_dev_config first. */ static int mspi_emul_xip_config(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_xip_cfg *xip_cfg) { struct mspi_emul_data *data = controller->data; int ret = 0; if (dev_id != data->dev_id) { LOG_ERR("%u, dev_id don't match.", __LINE__); return -ESTALE; } data->xip_cfg = *xip_cfg; return ret; } /** * API implementation of mspi_scramble_config. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @param scramble_cfg The controller scramble configuration for MSPI. * * @retval 0 if successful. * @retval -ESTALE device ID don't match, need to call mspi_dev_config first. */ static int mspi_emul_scramble_config(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_scramble_cfg *scramble_cfg) { struct mspi_emul_data *data = controller->data; int ret = 0; while (mspi_is_inp(controller)) { } if (dev_id != data->dev_id) { LOG_ERR("%u, dev_id don't match.", __LINE__); return -ESTALE; } data->scramble_cfg = *scramble_cfg; return ret; } /** * API implementation of mspi_timing_config. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @param param_mask The macro definition of what should be configured in cfg. * @param timing_cfg The controller timing configuration for MSPI. * * @retval 0 if successful. * @retval -ESTALE device ID don't match, need to call mspi_dev_config first. * @retval -ENOTSUP param_mask value is not supported. */ static int mspi_emul_timing_config(const struct device *controller, const struct mspi_dev_id *dev_id, const uint32_t param_mask, void *timing_cfg) { struct mspi_emul_data *data = controller->data; int ret = 0; while (mspi_is_inp(controller)) { } if (dev_id != data->dev_id) { LOG_ERR("%u, dev_id don't match.", __LINE__); return -ESTALE; } if (param_mask == MSPI_TIMING_PARAM_DUMMY) { data->timing_cfg = *(struct mspi_timing_cfg *)timing_cfg; } else { LOG_ERR("%u, param_mask not supported.", __LINE__); return -ENOTSUP; } return ret; } /** * API implementation of mspi_get_channel_status. * * @param controller Pointer to the device structure for the driver instance. * @param ch Not used. * * @retval 0 if successful. * @retval -EBUSY MSPI bus is busy */ static int mspi_emul_get_channel_status(const struct device *controller, uint8_t ch) { struct mspi_emul_data *data = controller->data; ARG_UNUSED(ch); if (mspi_is_inp(controller)) { return -EBUSY; } k_mutex_unlock(&data->lock); data->dev_id = NULL; return 0; } /** * API implementation of mspi_register_callback. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @param evt_type The event type associated the callback. * @param cb Pointer to the user implemented callback function. * @param ctx Pointer to the callback context. * * @retval 0 if successful. * @retval -ESTALE device ID don't match, need to call mspi_dev_config first. * @retval -ENOTSUP evt_type not supported. */ static int mspi_emul_register_callback(const struct device *controller, const struct mspi_dev_id *dev_id, const enum mspi_bus_event evt_type, mspi_callback_handler_t cb, struct mspi_callback_context *ctx) { struct mspi_emul_data *data = controller->data; while (mspi_is_inp(controller)) { } if (dev_id != data->dev_id) { LOG_ERR("%u, dev_id don't match.", __LINE__); return -ESTALE; } if (evt_type >= MSPI_BUS_EVENT_MAX) { LOG_ERR("%u, callback types not supported.", __LINE__); return -ENOTSUP; } data->cbs[evt_type] = cb; data->cb_ctxs[evt_type] = ctx; return 0; } /** * API implementation of mspi_transceive. * * @param controller Pointer to the device structure for the driver instance. * @param dev_id Pointer to the device ID structure from a device. * @param xfer Pointer to the MSPI transfer started by dev_id. * * @retval 0 if successful. * @retval -ESTALE device ID don't match, need to call mspi_dev_config first. * @retval -Error transfer failed. */ static int mspi_emul_transceive(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_xfer *xfer) { struct mspi_emul_data *data = controller->data; struct mspi_emul_context *ctx = &data->ctx; struct mspi_emul *emul; mspi_callback_handler_t cb = NULL; struct mspi_callback_context *cb_ctx = NULL; int ret = 0; int cfg_flag = 0; emul = mspi_emul_find(controller, dev_id->dev_idx); if (!emul) { LOG_ERR("%u, mspi_emul not found.", __LINE__); return -EIO; } if (dev_id != data->dev_id) { LOG_ERR("%u, dev_id don't match.", __LINE__); return -ESTALE; } ret = mspi_xfer_check(xfer); if (ret) { return ret; } __ASSERT_NO_MSG(emul->api); __ASSERT_NO_MSG(emul->api->transceive); if (xfer->async) { cb = data->cbs[MSPI_BUS_XFER_COMPLETE]; cb_ctx = data->cb_ctxs[MSPI_BUS_XFER_COMPLETE]; } cfg_flag = mspi_context_lock(ctx, dev_id, xfer, cb, cb_ctx); if (cfg_flag) { if (cfg_flag == 1) { ret = mspi_xfer_config(controller, xfer); if (ret) { LOG_ERR("%u, xfer config fail.", __LINE__); goto trans_err; } } else { ret = cfg_flag; LOG_ERR("%u, xfer fail.", __LINE__); goto trans_err; } } ret = emul->api->transceive(emul->target, ctx->xfer.packets, ctx->xfer.num_packet, ctx->asynchronous, MSPI_TIMEOUT_US); trans_err: mspi_context_release(ctx); return ret; } /** * Set up a new emulator and add its child to the list. * * @param dev MSPI emulation controller. * * @retval 0 if successful. */ static int mspi_emul_init(const struct device *dev) { struct mspi_emul_data *data = dev->data; const struct mspi_dt_spec spec = { .bus = dev, .config = data->mspicfg, }; int ret = 0; ret = mspi_emul_config(&spec); if (ret) { return ret; } sys_slist_init(&data->emuls); return emul_init_for_bus(dev); } /** * add its child to the list. * * @param dev MSPI emulation controller. * @param emul MSPI emulation device. * * @retval 0 if successful. */ int mspi_emul_register(const struct device *dev, struct mspi_emul *emul) { struct mspi_emul_data *data = dev->data; const char *name = emul->target->dev->name; sys_slist_append(&data->emuls, &emul->node); LOG_INF("Register emulator '%s', id:%x\n", name, emul->dev_idx); return 0; } /* Device instantiation */ static struct emul_mspi_driver_api emul_mspi_driver_api = { .mspi_api = { .config = mspi_emul_config, .dev_config = mspi_emul_dev_config, .xip_config = mspi_emul_xip_config, .scramble_config = mspi_emul_scramble_config, .timing_config = mspi_emul_timing_config, .get_channel_status = mspi_emul_get_channel_status, .register_callback = mspi_emul_register_callback, .transceive = mspi_emul_transceive, }, .trigger_event = emul_mspi_trigger_event, .find_emul = mspi_emul_find, }; #define MSPI_CONFIG(n) \ { \ .channel_num = EMUL_MSPI_INST_ID, \ .op_mode = DT_ENUM_IDX_OR(n, op_mode, MSPI_OP_MODE_CONTROLLER), \ .duplex = DT_ENUM_IDX_OR(n, duplex, MSPI_HALF_DUPLEX), \ .max_freq = DT_INST_PROP(n, clock_frequency), \ .dqs_support = DT_INST_PROP_OR(n, dqs_support, false), \ .sw_multi_periph = DT_INST_PROP(n, software_multiperipheral), \ } #define EMUL_LINK_AND_COMMA(node_id) \ { \ .dev = DEVICE_DT_GET(node_id), \ }, #define MSPI_EMUL_INIT(n) \ static const struct emul_link_for_bus emuls_##n[] = { \ DT_FOREACH_CHILD_STATUS_OKAY(DT_DRV_INST(n), EMUL_LINK_AND_COMMA)}; \ static struct emul_list_for_bus mspi_emul_cfg_##n = { \ .children = emuls_##n, \ .num_children = ARRAY_SIZE(emuls_##n), \ }; \ static struct gpio_dt_spec ce_gpios##n[] = MSPI_CE_GPIOS_DT_SPEC_INST_GET(n); \ static struct mspi_emul_data mspi_emul_data_##n = { \ .mspicfg = MSPI_CONFIG(n), \ .mspicfg.ce_group = (struct gpio_dt_spec *)ce_gpios##n, \ .mspicfg.num_ce_gpios = ARRAY_SIZE(ce_gpios##n), \ .mspicfg.num_periph = DT_INST_CHILD_NUM(n), \ .mspicfg.re_init = false, \ .dev_id = 0, \ .lock = Z_MUTEX_INITIALIZER(mspi_emul_data_##n.lock), \ .dev_cfg = {0}, \ .xip_cfg = {0}, \ .scramble_cfg = {0}, \ .cbs = {0}, \ .cb_ctxs = {0}, \ .ctx.lock = Z_SEM_INITIALIZER(mspi_emul_data_##n.ctx.lock, 0, 1), \ .ctx.callback = 0, \ .ctx.callback_ctx = 0, \ }; \ DEVICE_DT_INST_DEFINE(n, \ &mspi_emul_init, \ NULL, \ &mspi_emul_data_##n, \ &mspi_emul_cfg_##n, \ POST_KERNEL, \ CONFIG_MSPI_INIT_PRIORITY, \ &emul_mspi_driver_api); DT_INST_FOREACH_STATUS_OKAY(MSPI_EMUL_INIT) ```
/content/code_sandbox/drivers/mspi/mspi_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,891
```unknown config MSPI_EMUL bool "MSPI emulator" default y depends on DT_HAS_ZEPHYR_MSPI_EMUL_CONTROLLER_ENABLED depends on EMUL select MSPI_XIP select MSPI_SCRAMBLE select MSPI_TIMING select GPIO help Enable the MSPI emulator driver. This is a fake driver in that it does not talk to real hardware. Instead it talks to emulation drivers that pretend to be devices on the emulated MSPI bus. It is used for testing drivers for MSPI devices. ```
/content/code_sandbox/drivers/mspi/Kconfig.mspi_emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
122
```unknown config MSPI_AMBIQ_AP3 bool "Ambiq Apollo3 series MSPI driver" default y depends on DT_HAS_AMBIQ_MSPI_CONTROLLER_ENABLED select AMBIQ_HAL select AMBIQ_HAL_USE_MSPI select MSPI_XIP select MSPI_SCRAMBLE select MSPI_TIMING select GPIO help Enable driver for Ambiq MSPI. config MSPI_AMBIQ_BUFF_RAM_LOCATION hex "byte offset to SRAM_BASE_ADDRESS" default 0x50000 help This option specifies the mspi buffer/heap start address config MSPI_AMBIQ_BUFF_ALIGNMENT int "byte alignment of the MSPI buffer" default 8 if MSPI_AMBIQ_AP3 default 4 help This option specifies the mspi buffer alignment ```
/content/code_sandbox/drivers/mspi/Kconfig.ambiq
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
176
```objective-c /* * */ #ifndef MSPI_AMBIQ_H_ #define MSPI_AMBIQ_H_ #include <am_mcu_apollo.h> /* Hand-calculated minimum heap sizes needed to return a successful * 1-byte allocation. See details in lib/os/heap.[ch] */ #define MSPI_AMBIQ_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44) #define MSPI_AMBIQ_HEAP_DEFINE(name, bytes) \ char __attribute__((section(".mspi_buff"))) \ kheap_##name[MAX(bytes, MSPI_AMBIQ_HEAP_MIN_SIZE)]; \ STRUCT_SECTION_ITERABLE(k_heap, name) = { \ .heap = \ { \ .init_mem = kheap_##name, \ .init_bytes = MAX(bytes, MSPI_AMBIQ_HEAP_MIN_SIZE), \ }, \ } struct mspi_ambiq_timing_cfg { uint8_t ui8WriteLatency; uint8_t ui8TurnAround; bool bTxNeg; bool bRxNeg; bool bRxCap; uint32_t ui32TxDQSDelay; uint32_t ui32RxDQSDelay; uint32_t ui32RXDQSDelayEXT; }; enum mspi_ambiq_timing_param { MSPI_AMBIQ_SET_WLC = BIT(0), MSPI_AMBIQ_SET_RLC = BIT(1), MSPI_AMBIQ_SET_TXNEG = BIT(2), MSPI_AMBIQ_SET_RXNEG = BIT(3), MSPI_AMBIQ_SET_RXCAP = BIT(4), MSPI_AMBIQ_SET_TXDQSDLY = BIT(5), MSPI_AMBIQ_SET_RXDQSDLY = BIT(6), MSPI_AMBIQ_SET_RXDQSDLYEXT = BIT(7), }; #define MSPI_PORT(n) ((DT_REG_ADDR(DT_INST_BUS(n)) - MSPI0_BASE) / \ (DT_REG_SIZE(DT_INST_BUS(n)) * 4)) #define TIMING_CFG_GET_RX_DUMMY(cfg) \ { \ mspi_timing_cfg *timing = (mspi_timing_cfg *)cfg; \ timing->ui8TurnAround; \ } #define TIMING_CFG_SET_RX_DUMMY(cfg, num) \ { \ mspi_timing_cfg *timing = (mspi_timing_cfg *)cfg; \ timing->ui8TurnAround = num; \ } #endif ```
/content/code_sandbox/drivers/mspi/mspi_ambiq.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
552
```unknown # MSPI driver configuration options # # MSPI Drivers # menuconfig MSPI bool "Multi-bit Serial Peripheral Interface (MSPI) bus drivers" help Enable support for the MSPI hardware bus. if MSPI config MSPI_ASYNC bool "Asynchronous call support" select POLL help This option enables the asynchronous API calls. config MSPI_PERIPHERAL bool "Peripheral support" help Enables Driver MSPI peripheral mode operations. Peripheral mode support depends on the driver and the hardware it runs on. config MSPI_INIT_PRIORITY int "Init priority" default 70 help Device driver initialization priority. config MSPI_COMPLETION_TIMEOUT_TOLERANCE int "Completion timeout tolerance (ms)" default 200 help The tolerance value in ms for the MSPI completion timeout logic. config MSPI_XIP bool "XIP eXecute In Place" help Describes controller hardware XIP capability and enables mspi_xip_config calls in device drivers. config MSPI_SCRAMBLE bool "Scrambling support" help Describes controller hardware scrambling capability and enables mspi_scramble_config calls in device drivers. config MSPI_TIMING bool "Timing support" help Enables mspi_timing_config calls in device drivers for those controllers that need this to proper function at high frequencies. module = MSPI module-str = mspi source "subsys/logging/Kconfig.template.log_config" source "drivers/mspi/Kconfig.ambiq" source "drivers/mspi/Kconfig.mspi_emul" endif # MSPI ```
/content/code_sandbox/drivers/mspi/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
343
```c /* * */ #define DT_DRV_COMPAT ite_it8xxx2_espi #include <assert.h> #include <zephyr/drivers/espi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/interrupt_controller/wuc_ite_it8xxx2.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <soc.h> #include <soc_dt.h> #include "soc_espi.h" #include "espi_utils.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(espi, CONFIG_ESPI_LOG_LEVEL); #define ESPI_IT8XXX2_GET_GCTRL_BASE \ ((struct gctrl_it8xxx2_regs *)DT_REG_ADDR(DT_NODELABEL(gctrl))) #define IT8XXX2_ESPI_IRQ DT_INST_IRQ_BY_IDX(0, 0, irq) #define IT8XXX2_ESPI_VW_IRQ DT_INST_IRQ_BY_IDX(0, 1, irq) #define IT8XXX2_KBC_IBF_IRQ DT_INST_IRQ_BY_IDX(0, 2, irq) #define IT8XXX2_KBC_OBE_IRQ DT_INST_IRQ_BY_IDX(0, 3, irq) #define IT8XXX2_PMC1_IBF_IRQ DT_INST_IRQ_BY_IDX(0, 4, irq) #define IT8XXX2_PORT_80_IRQ DT_INST_IRQ_BY_IDX(0, 5, irq) #define IT8XXX2_PMC2_IBF_IRQ DT_INST_IRQ_BY_IDX(0, 6, irq) #define IT8XXX2_TRANS_IRQ DT_INST_IRQ_BY_IDX(0, 7, irq) /* General Capabilities and Configuration 1 */ #define IT8XXX2_ESPI_MAX_FREQ_MASK GENMASK(2, 0) #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_20 0 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_25 1 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_33 2 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_50 3 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_66 4 #define IT8XXX2_ESPI_PC_READY_MASK BIT(1) #define IT8XXX2_ESPI_VW_READY_MASK BIT(1) #define IT8XXX2_ESPI_OOB_READY_MASK BIT(1) #define IT8XXX2_ESPI_FC_READY_MASK BIT(1) #define IT8XXX2_ESPI_INTERRUPT_ENABLE BIT(7) #define IT8XXX2_ESPI_TO_WUC_ENABLE BIT(4) #define IT8XXX2_ESPI_VW_INTERRUPT_ENABLE BIT(7) #define IT8XXX2_ESPI_INTERRUPT_PUT_PC BIT(7) /* * VWCTRL2 register: * bit4 = 1b: Refers to ESPI_RESET# for PLTRST#. */ #define IT8XXX2_ESPI_VW_RESET_PLTRST BIT(4) #define IT8XXX2_ESPI_UPSTREAM_ENABLE BIT(7) #define IT8XXX2_ESPI_UPSTREAM_GO BIT(6) #define IT8XXX2_ESPI_UPSTREAM_INTERRUPT_ENABLE BIT(5) #define IT8XXX2_ESPI_UPSTREAM_CHANNEL_DISABLE BIT(2) #define IT8XXX2_ESPI_UPSTREAM_DONE BIT(1) #define IT8XXX2_ESPI_UPSTREAM_BUSY BIT(0) #define IT8XXX2_ESPI_CYCLE_TYPE_OOB 0x07 #define IT8XXX2_ESPI_PUT_OOB_STATUS BIT(7) #define IT8XXX2_ESPI_PUT_OOB_INTERRUPT_ENABLE BIT(7) #define IT8XXX2_ESPI_PUT_OOB_LEN_MASK GENMASK(6, 0) #define IT8XXX2_ESPI_INPUT_PAD_GATING BIT(6) #define IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE 64 #define IT8XXX2_ESPI_PUT_FLASH_TAG_MASK GENMASK(7, 4) #define IT8XXX2_ESPI_PUT_FLASH_LEN_MASK GENMASK(6, 0) struct espi_it8xxx2_wuc { /* WUC control device structure */ const struct device *wucs; /* WUC pin mask */ uint8_t mask; }; struct espi_it8xxx2_config { uintptr_t base_espi_slave; uintptr_t base_espi_vw; uintptr_t base_espi_queue1; uintptr_t base_espi_queue0; uintptr_t base_ec2i; uintptr_t base_kbc; uintptr_t base_pmc; uintptr_t base_smfi; const struct espi_it8xxx2_wuc wuc; }; struct espi_it8xxx2_data { sys_slist_t callbacks; #ifdef CONFIG_ESPI_OOB_CHANNEL struct k_sem oob_upstream_go; #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL struct k_sem flash_upstream_go; uint8_t put_flash_cycle_type; uint8_t put_flash_tag; uint8_t put_flash_len; uint8_t flash_buf[IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE]; #endif }; struct vw_channel_t { uint8_t vw_index; /* VW index of signal */ uint8_t level_mask; /* level bit of signal */ uint8_t valid_mask; /* valid bit of signal */ }; struct vwidx_isr_t { void (*vwidx_isr)(const struct device *dev, uint8_t update_flag); uint8_t vw_index; }; enum espi_ch_enable_isr_type { DEASSERTED_FLAG = 0, ASSERTED_FLAG = 1, }; struct espi_isr_t { void (*espi_isr)(const struct device *dev, bool enable); enum espi_ch_enable_isr_type isr_type; }; struct espi_vw_signal_t { enum espi_vwire_signal signal; void (*vw_signal_isr)(const struct device *dev); }; /* EC2I bridge and PNPCFG devices */ static const struct ec2i_t kbc_settings[] = { /* Select logical device 06h(keyboard) */ {HOST_INDEX_LDN, LDN_KBC_KEYBOARD}, /* Set IRQ=01h for logical device */ {HOST_INDEX_IRQNUMX, 0x01}, /* Configure IRQTP for KBC. */ /* * Interrupt request type select (IRQTP) for KBC. * bit 1, 0: IRQ request is buffered and applied to SERIRQ * 1: IRQ request is inverted before being applied to SERIRQ * bit 0, 0: Edge triggered mode * 1: Level triggered mode * * This interrupt configuration should the same on both host and EC side */ {HOST_INDEX_IRQTP, 0x02}, /* Enable logical device */ {HOST_INDEX_LDA, 0x01}, #ifdef CONFIG_ESPI_IT8XXX2_PNPCFG_DEVICE_KBC_MOUSE /* Select logical device 05h(mouse) */ {HOST_INDEX_LDN, LDN_KBC_MOUSE}, /* Set IRQ=0Ch for logical device */ {HOST_INDEX_IRQNUMX, 0x0C}, /* Enable logical device */ {HOST_INDEX_LDA, 0x01}, #endif }; static const struct ec2i_t pmc1_settings[] = { /* Select logical device 11h(PM1 ACPI) */ {HOST_INDEX_LDN, LDN_PMC1}, /* Set IRQ=00h for logical device */ {HOST_INDEX_IRQNUMX, 0x00}, /* Enable logical device */ {HOST_INDEX_LDA, 0x01}, }; #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD #define IT8XXX2_ESPI_HC_DATA_PORT_MSB \ ((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM >> 8) & 0xff) #define IT8XXX2_ESPI_HC_DATA_PORT_LSB \ (CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM & 0xff) #define IT8XXX2_ESPI_HC_CMD_PORT_MSB \ (((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM + 4) >> 8) & 0xff) #define IT8XXX2_ESPI_HC_CMD_PORT_LSB \ ((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM + 4) & 0xff) static const struct ec2i_t pmc2_settings[] = { /* Select logical device 12h(PM2 host command) */ {HOST_INDEX_LDN, LDN_PMC2}, /* I/O Port Base Address (data/command ports) */ {HOST_INDEX_IOBAD0_MSB, IT8XXX2_ESPI_HC_DATA_PORT_MSB}, {HOST_INDEX_IOBAD0_LSB, IT8XXX2_ESPI_HC_DATA_PORT_LSB}, {HOST_INDEX_IOBAD1_MSB, IT8XXX2_ESPI_HC_CMD_PORT_MSB}, {HOST_INDEX_IOBAD1_LSB, IT8XXX2_ESPI_HC_CMD_PORT_LSB}, /* Set IRQ=00h for logical device */ {HOST_INDEX_IRQNUMX, 0x00}, /* Enable logical device */ {HOST_INDEX_LDA, 0x01}, }; #endif #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \ defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) /* * Host to RAM (H2RAM) memory mapping. * This feature allows host access EC's memory directly by eSPI I/O cycles. * Mapping range is 4K bytes and base address is adjustable. * Eg. the I/O cycle 800h~8ffh from host can be mapped to x800h~x8ffh. * Linker script will make the pool 4K aligned. */ #define IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX 0x1000 #define IT8XXX2_ESPI_H2RAM_OFFSET_MASK GENMASK(5, 0) #define IT8XXX2_ESPI_H2RAM_BASEADDR_MASK (KB(CONFIG_SRAM_SIZE) - 1) #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) #define H2RAM_ACPI_SHM_MAX ((CONFIG_ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE) + \ (CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM)) #if (H2RAM_ACPI_SHM_MAX > IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX) #error "ACPI shared memory region out of h2ram" #endif #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */ #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) #define H2RAM_EC_HOST_CMD_MAX ((CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE) + \ (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM)) #if (H2RAM_EC_HOST_CMD_MAX > IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX) #error "EC host command parameters out of h2ram" #endif #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD */ #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) && \ defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) #if (MIN(H2RAM_ACPI_SHM_MAX, H2RAM_EC_HOST_CMD_MAX) > \ MAX(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM, \ CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM)) #error "ACPI and HC sections of h2ram overlap" #endif #endif static uint8_t h2ram_pool[MAX(H2RAM_ACPI_SHM_MAX, H2RAM_EC_HOST_CMD_MAX)] __attribute__((section(".h2ram_pool"))); #define H2RAM_WINDOW_SIZE(ram_size) ((find_msb_set((ram_size) / 16) - 1) & 0x7) static const struct ec2i_t smfi_settings[] = { /* Select logical device 0Fh(SMFI) */ {HOST_INDEX_LDN, LDN_SMFI}, /* Internal RAM base address on eSPI I/O space */ {HOST_INDEX_DSLDC6, 0x00}, /* Enable H2RAM eSPI I/O cycle */ {HOST_INDEX_DSLDC7, 0x01}, /* Enable logical device */ {HOST_INDEX_LDA, 0x01}, }; static void smfi_it8xxx2_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct smfi_it8xxx2_regs *const smfi_reg = (struct smfi_it8xxx2_regs *)config->base_smfi; struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE; uint8_t h2ram_offset; /* Set the host to RAM cycle address offset */ h2ram_offset = ((uint32_t)h2ram_pool & IT8XXX2_ESPI_H2RAM_BASEADDR_MASK) / IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX; gctrl->GCTRL_H2ROFSR = (gctrl->GCTRL_H2ROFSR & ~IT8XXX2_ESPI_H2RAM_OFFSET_MASK) | h2ram_offset; #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD memset(&h2ram_pool[CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM], 0, CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE); /* Set host RAM window 0 base address */ smfi_reg->SMFI_HRAMW0BA = (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM >> 4) & 0xff; /* Set host RAM window 0 size. (allow R/W) */ smfi_reg->SMFI_HRAMW0AAS = H2RAM_WINDOW_SIZE(CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE); /* Enable window 0, H2RAM through IO cycle */ smfi_reg->SMFI_HRAMWC |= (SMFI_H2RAMPS | SMFI_H2RAMW0E); #endif #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION memset(&h2ram_pool[CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM], 0, CONFIG_ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE); /* Set host RAM window 1 base address */ smfi_reg->SMFI_HRAMW1BA = (CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM >> 4) & 0xff; /* Set host RAM window 1 size. (read-only) */ smfi_reg->SMFI_HRAMW1AAS = H2RAM_WINDOW_SIZE(CONFIG_ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE) | SMFI_HRAMWXWPE_ALL; /* Enable window 1, H2RAM through IO cycle */ smfi_reg->SMFI_HRAMWC |= (SMFI_H2RAMPS | SMFI_H2RAMW1E); #endif } #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD || * CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */ static void ec2i_it8xxx2_wait_status_cleared(const struct device *dev, uint8_t mask) { const struct espi_it8xxx2_config *const config = dev->config; struct ec2i_regs *const ec2i = (struct ec2i_regs *)config->base_ec2i; while (ec2i->IBCTL & mask) { ; } } static void ec2i_it8xxx2_write_pnpcfg(const struct device *dev, enum ec2i_access sel, uint8_t data) { const struct espi_it8xxx2_config *const config = dev->config; struct ec2i_regs *const ec2i = (struct ec2i_regs *)config->base_ec2i; /* bit0: EC to I-Bus access enabled. */ ec2i->IBCTL |= EC2I_IBCTL_CSAE; /* * Wait that both CRIB and CWIB bits in IBCTL register * are cleared. */ ec2i_it8xxx2_wait_status_cleared(dev, EC2I_IBCTL_CRWIB); /* Enable EC access to the PNPCFG registers */ ec2i->IBMAE |= EC2I_IBMAE_CFGAE; /* Set indirect host I/O offset. */ ec2i->IHIOA = sel; /* Write the data to IHD register */ ec2i->IHD = data; /* Wait the CWIB bit in IBCTL cleared. */ ec2i_it8xxx2_wait_status_cleared(dev, EC2I_IBCTL_CWIB); /* Disable EC access to the PNPCFG registers. */ ec2i->IBMAE &= ~EC2I_IBMAE_CFGAE; /* Disable EC to I-Bus access. */ ec2i->IBCTL &= ~EC2I_IBCTL_CSAE; } static void ec2i_it8xxx2_write(const struct device *dev, enum host_pnpcfg_index index, uint8_t data) { /* Set index */ ec2i_it8xxx2_write_pnpcfg(dev, EC2I_ACCESS_INDEX, index); /* Set data */ ec2i_it8xxx2_write_pnpcfg(dev, EC2I_ACCESS_DATA, data); } static void pnpcfg_it8xxx2_configure(const struct device *dev, const struct ec2i_t *settings, size_t entries) { for (size_t i = 0; i < entries; i++) { ec2i_it8xxx2_write(dev, settings[i].index_port, settings[i].data_port); } } #define PNPCFG(_s) \ pnpcfg_it8xxx2_configure(dev, _s##_settings, ARRAY_SIZE(_s##_settings)) static void pnpcfg_it8xxx2_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct ec2i_regs *const ec2i = (struct ec2i_regs *)config->base_ec2i; struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE; /* The register pair to access PNPCFG is 004Eh and 004Fh */ gctrl->GCTRL_BADRSEL = 0x1; /* Host access is disabled */ ec2i->LSIOHA |= 0x3; /* configure pnpcfg devices */ if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) { PNPCFG(kbc); } if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO)) { PNPCFG(pmc1); } #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD PNPCFG(pmc2); #endif #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \ defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) PNPCFG(smfi); #endif } /* KBC (port 60h/64h) */ #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC static void kbc_it8xxx2_ibf_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_8042_KBC, ESPI_PERIPHERAL_NODATA }; struct espi_evt_data_kbc *kbc_evt = (struct espi_evt_data_kbc *)&evt.evt_data; /* KBC Input Buffer Full event */ kbc_evt->evt = HOST_KBC_EVT_IBF; /* * Indicates if the host sent a command or data. * 0 = data * 1 = Command. */ kbc_evt->type = !!(kbc_reg->KBHISR & KBC_KBHISR_A2_ADDR); /* The data in KBC Input Buffer */ kbc_evt->data = kbc_reg->KBHIDIR; espi_send_callbacks(&data->callbacks, dev, evt); } static void kbc_it8xxx2_obe_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_8042_KBC, ESPI_PERIPHERAL_NODATA }; struct espi_evt_data_kbc *kbc_evt = (struct espi_evt_data_kbc *)&evt.evt_data; /* Disable KBC OBE interrupt first */ kbc_reg->KBHICR &= ~KBC_KBHICR_OBECIE; /* Notify application that host already read out data. */ kbc_evt->evt = HOST_KBC_EVT_OBE; kbc_evt->data = 0; kbc_evt->type = 0; espi_send_callbacks(&data->callbacks, dev, evt); } static void kbc_it8xxx2_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc; /* Disable KBC serirq IRQ */ kbc_reg->KBIRQR = 0; /* * bit3: Input Buffer Full CPU Interrupt Enable. * bit1: Enable the interrupt to mouse driver in the host processor via * SERIRQ when the output buffer is full. * bit0: Enable the interrupt to keyboard driver in the host processor * via SERIRQ when the output buffer is full */ kbc_reg->KBHICR |= (KBC_KBHICR_IBFCIE | KBC_KBHICR_OBFKIE | KBC_KBHICR_OBFMIE); /* Input Buffer Full CPU Interrupt Enable. */ IRQ_CONNECT(IT8XXX2_KBC_IBF_IRQ, 0, kbc_it8xxx2_ibf_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_KBC_IBF_IRQ); /* Output Buffer Empty CPU Interrupt Enable */ IRQ_CONNECT(IT8XXX2_KBC_OBE_IRQ, 0, kbc_it8xxx2_obe_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_KBC_OBE_IRQ); } #endif /* PMC 1 (APCI port 62h/66h) */ #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO static void pmc1_it8xxx2_ibf_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_HOST_IO, ESPI_PERIPHERAL_NODATA }; struct espi_evt_data_acpi *acpi_evt = (struct espi_evt_data_acpi *)&evt.evt_data; /* * Indicates if the host sent a command or data. * 0 = data * 1 = Command. */ acpi_evt->type = !!(pmc_reg->PM1STS & PMC_PM1STS_A2_ADDR); /* Set processing flag before reading command byte */ pmc_reg->PM1STS |= PMC_PM1STS_GPF; acpi_evt->data = pmc_reg->PM1DI; espi_send_callbacks(&data->callbacks, dev, evt); } static void pmc1_it8xxx2_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; /* Enable pmc1 input buffer full interrupt */ pmc_reg->PM1CTL |= PMC_PM1CTL_IBFIE; IRQ_CONNECT(IT8XXX2_PMC1_IBF_IRQ, 0, pmc1_it8xxx2_ibf_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_PMC1_IBF_IRQ); } #endif /* Port 80 */ #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 static void port80_it8xxx2_isr(const struct device *dev) { struct espi_it8xxx2_data *const data = dev->data; struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, (ESPI_PERIPHERAL_INDEX_0 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80, ESPI_PERIPHERAL_NODATA }; if (IS_ENABLED(CONFIG_ESPI_IT8XXX2_PORT_81_CYCLE)) { evt.evt_data = gctrl->GCTRL_P80HDR | (gctrl->GCTRL_P81HDR << 8); } else { evt.evt_data = gctrl->GCTRL_P80HDR; } /* Write 1 to clear this bit */ gctrl->GCTRL_P80H81HSR |= BIT(0); espi_send_callbacks(&data->callbacks, dev, evt); } static void port80_it8xxx2_init(const struct device *dev) { ARG_UNUSED(dev); struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE; /* Accept Port 80h (and 81h) Cycle */ if (IS_ENABLED(CONFIG_ESPI_IT8XXX2_PORT_81_CYCLE)) { gctrl->GCTRL_SPCTRL1 |= (IT8XXX2_GCTRL_ACP80 | IT8XXX2_GCTRL_ACP81); } else { gctrl->GCTRL_SPCTRL1 |= IT8XXX2_GCTRL_ACP80; } IRQ_CONNECT(IT8XXX2_PORT_80_IRQ, 0, port80_it8xxx2_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_PORT_80_IRQ); } #endif #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD /* PMC 2 (Host command port CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM) */ static void pmc2_it8xxx2_ibf_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, ESPI_PERIPHERAL_EC_HOST_CMD, ESPI_PERIPHERAL_NODATA }; /* Set processing flag before reading command byte */ pmc_reg->PM2STS |= PMC_PM2STS_GPF; evt.evt_data = pmc_reg->PM2DI; espi_send_callbacks(&data->callbacks, dev, evt); } static void pmc2_it8xxx2_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; /* Dedicated interrupt for PMC2 */ pmc_reg->MBXCTRL |= PMC_MBXCTRL_DINT; /* Enable pmc2 input buffer full interrupt */ pmc_reg->PM2CTL |= PMC_PM2CTL_IBFIE; IRQ_CONNECT(IT8XXX2_PMC2_IBF_IRQ, 0, pmc2_it8xxx2_ibf_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_PMC2_IBF_IRQ); } #endif /* eSPI api functions */ #define VW_CHAN(signal, index, level, valid) \ [signal] = {.vw_index = index, .level_mask = level, .valid_mask = valid} /* VW signals used in eSPI */ static const struct vw_channel_t vw_channel_list[] = { VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_S3, 0x02, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_S4, 0x02, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_S5, 0x02, BIT(2), BIT(6)), VW_CHAN(ESPI_VWIRE_SIGNAL_OOB_RST_WARN, 0x03, BIT(2), BIT(6)), VW_CHAN(ESPI_VWIRE_SIGNAL_PLTRST, 0x03, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_STAT, 0x03, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_NMIOUT, 0x07, BIT(2), BIT(6)), VW_CHAN(ESPI_VWIRE_SIGNAL_SMIOUT, 0x07, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_HOST_RST_WARN, 0x07, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_A, 0x41, BIT(3), BIT(7)), VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, 0x41, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_WARN, 0x41, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_WLAN, 0x42, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_LAN, 0x42, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_HOST_C10, 0x47, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_DNX_WARN, 0x4a, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_PME, 0x04, BIT(3), BIT(7)), VW_CHAN(ESPI_VWIRE_SIGNAL_WAKE, 0x04, BIT(2), BIT(6)), VW_CHAN(ESPI_VWIRE_SIGNAL_OOB_RST_ACK, 0x04, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 0x05, BIT(3), BIT(7)), VW_CHAN(ESPI_VWIRE_SIGNAL_ERR_NON_FATAL, 0x05, BIT(2), BIT(6)), VW_CHAN(ESPI_VWIRE_SIGNAL_ERR_FATAL, 0x05, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, 0x05, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_HOST_RST_ACK, 0x06, BIT(3), BIT(7)), VW_CHAN(ESPI_VWIRE_SIGNAL_RST_CPU_INIT, 0x06, BIT(2), BIT(6)), VW_CHAN(ESPI_VWIRE_SIGNAL_SMI, 0x06, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_SCI, 0x06, BIT(0), BIT(4)), VW_CHAN(ESPI_VWIRE_SIGNAL_DNX_ACK, 0x40, BIT(1), BIT(5)), VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_ACK, 0x40, BIT(0), BIT(4)), }; static int espi_it8xxx2_configure(const struct device *dev, struct espi_cfg *cfg) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; uint8_t capcfg1 = 0; /* Set frequency */ switch (cfg->max_freq) { case 20: capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_20; break; case 25: capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_25; break; case 33: capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_33; break; case 50: capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_50; break; case 66: capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_66; break; default: return -EINVAL; } slave_reg->GCAPCFG1 = (slave_reg->GCAPCFG1 & ~IT8XXX2_ESPI_MAX_FREQ_MASK) | capcfg1; /* * Configure eSPI I/O mode. (Register read only) * Supported I/O mode : single, dual and quad. */ /* Configure eSPI supported channels. (Register read only) * Supported channels: peripheral, virtual wire, OOB, and flash access. */ return 0; } static bool espi_it8xxx2_channel_ready(const struct device *dev, enum espi_channel ch) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; bool sts = false; switch (ch) { case ESPI_CHANNEL_PERIPHERAL: sts = slave_reg->CH_PC_CAPCFG3 & IT8XXX2_ESPI_PC_READY_MASK; break; case ESPI_CHANNEL_VWIRE: sts = slave_reg->CH_VW_CAPCFG3 & IT8XXX2_ESPI_VW_READY_MASK; break; case ESPI_CHANNEL_OOB: sts = slave_reg->CH_OOB_CAPCFG3 & IT8XXX2_ESPI_OOB_READY_MASK; break; case ESPI_CHANNEL_FLASH: sts = slave_reg->CH_FLASH_CAPCFG3 & IT8XXX2_ESPI_FC_READY_MASK; break; default: break; } return sts; } static int espi_it8xxx2_send_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t level) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_vw_regs *const vw_reg = (struct espi_vw_regs *)config->base_espi_vw; uint8_t vw_index = vw_channel_list[signal].vw_index; uint8_t level_mask = vw_channel_list[signal].level_mask; uint8_t valid_mask = vw_channel_list[signal].valid_mask; if (signal > ARRAY_SIZE(vw_channel_list)) { return -EIO; } if (level) { vw_reg->VW_INDEX[vw_index] |= level_mask; } else { vw_reg->VW_INDEX[vw_index] &= ~level_mask; } vw_reg->VW_INDEX[vw_index] |= valid_mask; return 0; } static int espi_it8xxx2_receive_vwire(const struct device *dev, enum espi_vwire_signal signal, uint8_t *level) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_vw_regs *const vw_reg = (struct espi_vw_regs *)config->base_espi_vw; uint8_t vw_index = vw_channel_list[signal].vw_index; uint8_t level_mask = vw_channel_list[signal].level_mask; uint8_t valid_mask = vw_channel_list[signal].valid_mask; if (signal > ARRAY_SIZE(vw_channel_list)) { return -EIO; } if (vw_reg->VW_INDEX[vw_index] & valid_mask) { *level = !!(vw_reg->VW_INDEX[vw_index] & level_mask); } else { /* Not valid */ *level = 0; } return 0; } static int espi_it8xxx2_manage_callback(const struct device *dev, struct espi_callback *callback, bool set) { struct espi_it8xxx2_data *const data = dev->data; return espi_manage_callback(&data->callbacks, callback, set); } static int espi_it8xxx2_read_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { const struct espi_it8xxx2_config *const config = dev->config; if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc; switch (op) { case E8042_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = !!(kbc_reg->KBHISR & KBC_KBHISR_OBF); break; case E8042_IBF_HAS_CHAR: *data = !!(kbc_reg->KBHISR & KBC_KBHISR_IBF); break; case E8042_READ_KB_STS: *data = kbc_reg->KBHISR; break; default: return -EINVAL; } } else if (op >= EACPI_START_OPCODE && op <= EACPI_MAX_OPCODE) { struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; switch (op) { case EACPI_OBF_HAS_CHAR: /* EC has written data back to host. OBF is * automatically cleared after host reads * the data */ *data = !!(pmc_reg->PM1STS & PMC_PM1STS_OBF); break; case EACPI_IBF_HAS_CHAR: *data = !!(pmc_reg->PM1STS & PMC_PM1STS_IBF); break; case EACPI_READ_STS: *data = pmc_reg->PM1STS; break; #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION case EACPI_GET_SHARED_MEMORY: *data = (uint32_t)&h2ram_pool[ CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM]; break; #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */ default: return -EINVAL; } } #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE else if (op >= ECUSTOM_START_OPCODE && op <= ECUSTOM_MAX_OPCODE) { switch (op) { case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY: *data = (uint32_t)&h2ram_pool[ CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM]; break; case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY_SIZE: *data = CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE; break; default: return -EINVAL; } } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ else { return -ENOTSUP; } return 0; } static int espi_it8xxx2_write_lpc_request(const struct device *dev, enum lpc_peripheral_opcode op, uint32_t *data) { const struct espi_it8xxx2_config *const config = dev->config; if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) { struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc; switch (op) { case E8042_WRITE_KB_CHAR: kbc_reg->KBHIKDOR = (*data & 0xff); /* * Enable OBE interrupt after putting data in * data register. */ kbc_reg->KBHICR |= KBC_KBHICR_OBECIE; break; case E8042_WRITE_MB_CHAR: kbc_reg->KBHIMDOR = (*data & 0xff); /* * Enable OBE interrupt after putting data in * data register. */ kbc_reg->KBHICR |= KBC_KBHICR_OBECIE; break; case E8042_RESUME_IRQ: /* Enable KBC IBF interrupt */ irq_enable(IT8XXX2_KBC_IBF_IRQ); break; case E8042_PAUSE_IRQ: /* Disable KBC IBF interrupt */ irq_disable(IT8XXX2_KBC_IBF_IRQ); break; case E8042_CLEAR_OBF: volatile uint8_t _kbhicr __unused; /* * After enabling IBF/OBF clear mode, we have to make * sure that IBF interrupt is not triggered before * disabling the clear mode. Or the interrupt will keep * triggering until the watchdog is reset. */ unsigned int key = irq_lock(); /* * When IBFOBFCME is enabled, write 1 to COBF bit to * clear KBC OBF. */ kbc_reg->KBHICR |= KBC_KBHICR_IBFOBFCME; kbc_reg->KBHICR |= KBC_KBHICR_COBF; kbc_reg->KBHICR &= ~KBC_KBHICR_COBF; /* Disable clear mode */ kbc_reg->KBHICR &= ~KBC_KBHICR_IBFOBFCME; /* * I/O access synchronization, this load operation will * guarantee the above modification of SOC's register * can be seen by any following instructions. */ _kbhicr = kbc_reg->KBHICR; irq_unlock(key); break; case E8042_SET_FLAG: kbc_reg->KBHISR |= (*data & 0xff); break; case E8042_CLEAR_FLAG: kbc_reg->KBHISR &= ~(*data & 0xff); break; default: return -EINVAL; } } else if (op >= EACPI_START_OPCODE && op <= EACPI_MAX_OPCODE) { struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; switch (op) { case EACPI_WRITE_CHAR: pmc_reg->PM1DO = (*data & 0xff); break; case EACPI_WRITE_STS: pmc_reg->PM1STS = (*data & 0xff); break; default: return -EINVAL; } } #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE else if (op >= ECUSTOM_START_OPCODE && op <= ECUSTOM_MAX_OPCODE) { struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc; switch (op) { /* Enable/Disable PMC1 (port 62h/66h) interrupt */ case ECUSTOM_HOST_SUBS_INTERRUPT_EN: if (*data) { irq_enable(IT8XXX2_PMC1_IBF_IRQ); } else { irq_disable(IT8XXX2_PMC1_IBF_IRQ); } break; case ECUSTOM_HOST_CMD_SEND_RESULT: /* Write result to data output port (set OBF status) */ pmc_reg->PM2DO = (*data & 0xff); /* Clear processing flag */ pmc_reg->PM2STS &= ~PMC_PM2STS_GPF; break; default: return -EINVAL; } } #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */ else { return -ENOTSUP; } return 0; } #ifdef CONFIG_ESPI_OOB_CHANNEL /* eSPI cycle type field */ #define ESPI_OOB_CYCLE_TYPE 0x21 #define ESPI_OOB_TAG 0x00 #define ESPI_OOB_TIMEOUT_MS 200 /* eSPI tag + len[11:8] field */ #define ESPI_TAG_LEN_FIELD(tag, len) \ ((((tag) & 0xF) << 4) | (((len) >> 8) & 0xF)) struct espi_oob_msg_packet { FLEXIBLE_ARRAY_DECLARE(uint8_t, data_byte); }; static int espi_it8xxx2_send_oob(const struct device *dev, struct espi_oob_packet *pckt) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; struct espi_queue1_regs *const queue1_reg = (struct espi_queue1_regs *)config->base_espi_queue1; struct espi_oob_msg_packet *oob_pckt = (struct espi_oob_msg_packet *)pckt->buf; if (!(slave_reg->CH_OOB_CAPCFG3 & IT8XXX2_ESPI_OOB_READY_MASK)) { LOG_ERR("%s: OOB channel isn't ready", __func__); return -EIO; } if (slave_reg->ESUCTRL0 & IT8XXX2_ESPI_UPSTREAM_BUSY) { LOG_ERR("%s: OOB upstream busy", __func__); return -EIO; } if (pckt->len > ESPI_IT8XXX2_OOB_MAX_PAYLOAD_SIZE) { LOG_ERR("%s: Out of OOB queue space", __func__); return -EINVAL; } /* Set cycle type */ slave_reg->ESUCTRL1 = IT8XXX2_ESPI_CYCLE_TYPE_OOB; /* Set tag and length[11:8] */ slave_reg->ESUCTRL2 = ESPI_TAG_LEN_FIELD(0, pckt->len); /* Set length [7:0] */ slave_reg->ESUCTRL3 = pckt->len & 0xff; /* Set data byte */ for (int i = 0; i < pckt->len; i++) { queue1_reg->UPSTREAM_DATA[i] = oob_pckt->data_byte[i]; } /* Set upstream enable */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE; /* Set upstream go */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO; return 0; } static int espi_it8xxx2_receive_oob(const struct device *dev, struct espi_oob_packet *pckt) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; struct espi_queue0_regs *const queue0_reg = (struct espi_queue0_regs *)config->base_espi_queue0; struct espi_oob_msg_packet *oob_pckt = (struct espi_oob_msg_packet *)pckt->buf; uint8_t oob_len; if (!(slave_reg->CH_OOB_CAPCFG3 & IT8XXX2_ESPI_OOB_READY_MASK)) { LOG_ERR("%s: OOB channel isn't ready", __func__); return -EIO; } #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC struct espi_it8xxx2_data *const data = dev->data; int ret; /* Wait until receive OOB message or timeout */ ret = k_sem_take(&data->oob_upstream_go, K_MSEC(ESPI_OOB_TIMEOUT_MS)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } #endif /* Get length */ oob_len = (slave_reg->ESOCTRL4 & IT8XXX2_ESPI_PUT_OOB_LEN_MASK); /* * Buffer passed to driver isn't enough. * The first three bytes of buffer are cycle type, tag, and length. */ if (oob_len > pckt->len) { LOG_ERR("%s: Out of rx buf %d vs %d", __func__, oob_len, pckt->len); return -EINVAL; } pckt->len = oob_len; /* Get data byte */ for (int i = 0; i < oob_len; i++) { oob_pckt->data_byte[i] = queue0_reg->PUT_OOB_DATA[i]; } return 0; } static void espi_it8xxx2_oob_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC struct espi_it8xxx2_data *const data = dev->data; k_sem_init(&data->oob_upstream_go, 0, 1); #endif /* Upstream interrupt enable */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_INTERRUPT_ENABLE; /* PUT_OOB interrupt enable */ slave_reg->ESOCTRL1 |= IT8XXX2_ESPI_PUT_OOB_INTERRUPT_ENABLE; } #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL #define ESPI_FLASH_TAG 0x01 #define ESPI_FLASH_READ_TIMEOUT_MS 200 #define ESPI_FLASH_WRITE_TIMEOUT_MS 500 #define ESPI_FLASH_ERASE_TIMEOUT_MS 1000 /* Successful completion without data */ #define ESPI_IT8XXX2_PUT_FLASH_C_SCWOD 0 /* Successful completion with data */ #define ESPI_IT8XXX2_PUT_FLASH_C_SCWD 4 enum espi_flash_cycle_type { IT8XXX2_ESPI_CYCLE_TYPE_FLASH_READ = 0x08, IT8XXX2_ESPI_CYCLE_TYPE_FLASH_WRITE = 0x09, IT8XXX2_ESPI_CYCLE_TYPE_FLASH_ERASE = 0x0A, }; static int espi_it8xxx2_flash_trans(const struct device *dev, struct espi_flash_packet *pckt, enum espi_flash_cycle_type tran) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; struct espi_queue1_regs *const queue1_reg = (struct espi_queue1_regs *)config->base_espi_queue1; if (!(slave_reg->CH_FLASH_CAPCFG3 & IT8XXX2_ESPI_FC_READY_MASK)) { LOG_ERR("%s: Flash channel isn't ready (tran:%d)", __func__, tran); return -EIO; } if (slave_reg->ESUCTRL0 & IT8XXX2_ESPI_UPSTREAM_BUSY) { LOG_ERR("%s: Upstream busy (tran:%d)", __func__, tran); return -EIO; } if (pckt->len > IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE) { LOG_ERR("%s: Invalid size request (tran:%d)", __func__, tran); return -EINVAL; } /* Set cycle type */ slave_reg->ESUCTRL1 = tran; /* Set tag and length[11:8] */ slave_reg->ESUCTRL2 = (ESPI_FLASH_TAG << 4); /* * Set length [7:0] * Note: for erasing, the least significant 3 bit of the length field * specifies the size of the block to be erased: * 001b: 4 Kbytes * 010b: 64Kbytes * 100b: 128 Kbytes * 101b: 256 Kbytes */ slave_reg->ESUCTRL3 = pckt->len; /* Set flash address */ queue1_reg->UPSTREAM_DATA[0] = (pckt->flash_addr >> 24) & 0xff; queue1_reg->UPSTREAM_DATA[1] = (pckt->flash_addr >> 16) & 0xff; queue1_reg->UPSTREAM_DATA[2] = (pckt->flash_addr >> 8) & 0xff; queue1_reg->UPSTREAM_DATA[3] = pckt->flash_addr & 0xff; return 0; } static int espi_it8xxx2_flash_read(const struct device *dev, struct espi_flash_packet *pckt) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; int ret; ret = espi_it8xxx2_flash_trans(dev, pckt, IT8XXX2_ESPI_CYCLE_TYPE_FLASH_READ); if (ret) { return ret; } /* Set upstream enable */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE; /* Set upstream go */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO; /* Wait until upstream done or timeout */ ret = k_sem_take(&data->flash_upstream_go, K_MSEC(ESPI_FLASH_READ_TIMEOUT_MS)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } if (data->put_flash_cycle_type != ESPI_IT8XXX2_PUT_FLASH_C_SCWD) { LOG_ERR("%s: Unsuccessful completion", __func__); return -EIO; } memcpy(pckt->buf, data->flash_buf, pckt->len); LOG_INF("%s: read (%d) bytes from flash over espi", __func__, data->put_flash_len); return 0; } static int espi_it8xxx2_flash_write(const struct device *dev, struct espi_flash_packet *pckt) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; struct espi_queue1_regs *const queue1_reg = (struct espi_queue1_regs *)config->base_espi_queue1; int ret; ret = espi_it8xxx2_flash_trans(dev, pckt, IT8XXX2_ESPI_CYCLE_TYPE_FLASH_WRITE); if (ret) { return ret; } /* Set data byte */ for (int i = 0; i < pckt->len; i++) { queue1_reg->UPSTREAM_DATA[4 + i] = pckt->buf[i]; } /* Set upstream enable */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE; /* Set upstream go */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO; /* Wait until upstream done or timeout */ ret = k_sem_take(&data->flash_upstream_go, K_MSEC(ESPI_FLASH_WRITE_TIMEOUT_MS)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } if (data->put_flash_cycle_type != ESPI_IT8XXX2_PUT_FLASH_C_SCWOD) { LOG_ERR("%s: Unsuccessful completion", __func__); return -EIO; } return 0; } static int espi_it8xxx2_flash_erase(const struct device *dev, struct espi_flash_packet *pckt) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; int ret; ret = espi_it8xxx2_flash_trans(dev, pckt, IT8XXX2_ESPI_CYCLE_TYPE_FLASH_ERASE); if (ret) { return ret; } /* Set upstream enable */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE; /* Set upstream go */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO; /* Wait until upstream done or timeout */ ret = k_sem_take(&data->flash_upstream_go, K_MSEC(ESPI_FLASH_ERASE_TIMEOUT_MS)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } if (data->put_flash_cycle_type != ESPI_IT8XXX2_PUT_FLASH_C_SCWOD) { LOG_ERR("%s: Unsuccessful completion", __func__); return -EIO; } return 0; } static void espi_it8xxx2_flash_upstream_done_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; struct espi_queue1_regs *const queue1_reg = (struct espi_queue1_regs *)config->base_espi_queue1; data->put_flash_cycle_type = slave_reg->ESUCTRL6; data->put_flash_tag = slave_reg->ESUCTRL7 & IT8XXX2_ESPI_PUT_FLASH_TAG_MASK; data->put_flash_len = slave_reg->ESUCTRL8 & IT8XXX2_ESPI_PUT_FLASH_LEN_MASK; if (slave_reg->ESUCTRL1 == IT8XXX2_ESPI_CYCLE_TYPE_FLASH_READ) { if (data->put_flash_len > IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE) { LOG_ERR("%s: Invalid size (%d)", __func__, data->put_flash_len); } else { for (int i = 0; i < data->put_flash_len; i++) { data->flash_buf[i] = queue1_reg->UPSTREAM_DATA[i]; } } } k_sem_give(&data->flash_upstream_go); } static void espi_it8xxx2_flash_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; k_sem_init(&data->flash_upstream_go, 0, 1); /* Upstream interrupt enable */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_INTERRUPT_ENABLE; } #endif /* CONFIG_ESPI_FLASH_CHANNEL */ /* eSPI driver registration */ static int espi_it8xxx2_init(const struct device *dev); static const struct espi_driver_api espi_it8xxx2_driver_api = { .config = espi_it8xxx2_configure, .get_channel_status = espi_it8xxx2_channel_ready, .send_vwire = espi_it8xxx2_send_vwire, .receive_vwire = espi_it8xxx2_receive_vwire, .manage_callback = espi_it8xxx2_manage_callback, .read_lpc_request = espi_it8xxx2_read_lpc_request, .write_lpc_request = espi_it8xxx2_write_lpc_request, #ifdef CONFIG_ESPI_OOB_CHANNEL .send_oob = espi_it8xxx2_send_oob, .receive_oob = espi_it8xxx2_receive_oob, #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL .flash_read = espi_it8xxx2_flash_read, .flash_write = espi_it8xxx2_flash_write, .flash_erase = espi_it8xxx2_flash_erase, #endif }; static void espi_it8xxx2_vw_notify_system_state(const struct device *dev, enum espi_vwire_signal signal) { struct espi_it8xxx2_data *const data = dev->data; struct espi_event evt = {ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0}; uint8_t level = 0; espi_it8xxx2_receive_vwire(dev, signal, &level); evt.evt_details = signal; evt.evt_data = level; espi_send_callbacks(&data->callbacks, dev, evt); } static void espi_vw_signal_no_isr(const struct device *dev) { ARG_UNUSED(dev); } static const struct espi_vw_signal_t vwidx2_signals[] = { {ESPI_VWIRE_SIGNAL_SLP_S3, NULL}, {ESPI_VWIRE_SIGNAL_SLP_S4, NULL}, {ESPI_VWIRE_SIGNAL_SLP_S5, NULL}, }; static void espi_it8xxx2_vwidx2_isr(const struct device *dev, uint8_t updated_flag) { for (int i = 0; i < ARRAY_SIZE(vwidx2_signals); i++) { enum espi_vwire_signal vw_signal = vwidx2_signals[i].signal; if (updated_flag & vw_channel_list[vw_signal].level_mask) { espi_it8xxx2_vw_notify_system_state(dev, vw_signal); } } } static void espi_vw_oob_rst_warn_isr(const struct device *dev) { uint8_t level = 0; espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_WARN, &level); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_ACK, level); } static void espi_vw_pltrst_isr(const struct device *dev) { uint8_t pltrst = 0; espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_PLTRST, &pltrst); if (pltrst) { espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_SMI, 1); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_SCI, 1); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, 1); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_RST_CPU_INIT, 1); } LOG_INF("VW PLTRST_L %sasserted", pltrst ? "de" : ""); } static const struct espi_vw_signal_t vwidx3_signals[] = { {ESPI_VWIRE_SIGNAL_OOB_RST_WARN, espi_vw_oob_rst_warn_isr}, {ESPI_VWIRE_SIGNAL_PLTRST, espi_vw_pltrst_isr}, }; static void espi_it8xxx2_vwidx3_isr(const struct device *dev, uint8_t updated_flag) { for (int i = 0; i < ARRAY_SIZE(vwidx3_signals); i++) { enum espi_vwire_signal vw_signal = vwidx3_signals[i].signal; if (updated_flag & vw_channel_list[vw_signal].level_mask) { vwidx3_signals[i].vw_signal_isr(dev); espi_it8xxx2_vw_notify_system_state(dev, vw_signal); } } } static void espi_vw_host_rst_warn_isr(const struct device *dev) { uint8_t level = 0; espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_WARN, &level); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, level); } static const struct espi_vw_signal_t vwidx7_signals[] = { {ESPI_VWIRE_SIGNAL_HOST_RST_WARN, espi_vw_host_rst_warn_isr}, }; static void espi_it8xxx2_vwidx7_isr(const struct device *dev, uint8_t updated_flag) { for (int i = 0; i < ARRAY_SIZE(vwidx7_signals); i++) { enum espi_vwire_signal vw_signal = vwidx7_signals[i].signal; if (updated_flag & vw_channel_list[vw_signal].level_mask) { vwidx7_signals[i].vw_signal_isr(dev); espi_it8xxx2_vw_notify_system_state(dev, vw_signal); } } } static void espi_vw_sus_warn_isr(const struct device *dev) { uint8_t level = 0; espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_WARN, &level); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_ACK, level); } static const struct espi_vw_signal_t vwidx41_signals[] = { {ESPI_VWIRE_SIGNAL_SUS_WARN, espi_vw_sus_warn_isr}, {ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, espi_vw_signal_no_isr}, {ESPI_VWIRE_SIGNAL_SLP_A, espi_vw_signal_no_isr}, }; static void espi_it8xxx2_vwidx41_isr(const struct device *dev, uint8_t updated_flag) { for (int i = 0; i < ARRAY_SIZE(vwidx41_signals); i++) { enum espi_vwire_signal vw_signal = vwidx41_signals[i].signal; if (updated_flag & vw_channel_list[vw_signal].level_mask) { vwidx41_signals[i].vw_signal_isr(dev); espi_it8xxx2_vw_notify_system_state(dev, vw_signal); } } } static const struct espi_vw_signal_t vwidx42_signals[] = { {ESPI_VWIRE_SIGNAL_SLP_LAN, NULL}, {ESPI_VWIRE_SIGNAL_SLP_WLAN, NULL}, }; static void espi_it8xxx2_vwidx42_isr(const struct device *dev, uint8_t updated_flag) { for (int i = 0; i < ARRAY_SIZE(vwidx42_signals); i++) { enum espi_vwire_signal vw_signal = vwidx42_signals[i].signal; if (updated_flag & vw_channel_list[vw_signal].level_mask) { espi_it8xxx2_vw_notify_system_state(dev, vw_signal); } } } static void espi_it8xxx2_vwidx43_isr(const struct device *dev, uint8_t updated_flag) { ARG_UNUSED(dev); /* * We haven't send callback to system because there is no index 43 * virtual wire signal is listed in enum espi_vwire_signal. */ LOG_INF("vw isr %s is ignored!", __func__); } static void espi_it8xxx2_vwidx44_isr(const struct device *dev, uint8_t updated_flag) { ARG_UNUSED(dev); /* * We haven't send callback to system because there is no index 44 * virtual wire signal is listed in enum espi_vwire_signal. */ LOG_INF("vw isr %s is ignored!", __func__); } static const struct espi_vw_signal_t vwidx47_signals[] = { {ESPI_VWIRE_SIGNAL_HOST_C10, NULL}, }; static void espi_it8xxx2_vwidx47_isr(const struct device *dev, uint8_t updated_flag) { for (int i = 0; i < ARRAY_SIZE(vwidx47_signals); i++) { enum espi_vwire_signal vw_signal = vwidx47_signals[i].signal; if (updated_flag & vw_channel_list[vw_signal].level_mask) { espi_it8xxx2_vw_notify_system_state(dev, vw_signal); } } } /* * The ISR of espi VW interrupt in array needs to match bit order in * ESPI VW VWCTRL1 register. */ static const struct vwidx_isr_t vwidx_isr_list[] = { [0] = {espi_it8xxx2_vwidx2_isr, 0x02}, [1] = {espi_it8xxx2_vwidx3_isr, 0x03}, [2] = {espi_it8xxx2_vwidx7_isr, 0x07}, [3] = {espi_it8xxx2_vwidx41_isr, 0x41}, [4] = {espi_it8xxx2_vwidx42_isr, 0x42}, [5] = {espi_it8xxx2_vwidx43_isr, 0x43}, [6] = {espi_it8xxx2_vwidx44_isr, 0x44}, [7] = {espi_it8xxx2_vwidx47_isr, 0x47}, }; /* * This is used to record the previous VW valid/level field state to discover * changes. Then do following sequence only when state is changed. */ static uint8_t vwidx_cached_flag[ARRAY_SIZE(vwidx_isr_list)]; static void espi_it8xxx2_reset_vwidx_cache(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_vw_regs *const vw_reg = (struct espi_vw_regs *)config->base_espi_vw; /* reset vwidx_cached_flag */ for (int i = 0; i < ARRAY_SIZE(vwidx_isr_list); i++) { vwidx_cached_flag[i] = vw_reg->VW_INDEX[vwidx_isr_list[i].vw_index]; } } static void espi_it8xxx2_vw_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_vw_regs *const vw_reg = (struct espi_vw_regs *)config->base_espi_vw; uint8_t vwidx_updated = vw_reg->VWCTRL1; /* write-1 to clear */ vw_reg->VWCTRL1 = vwidx_updated; for (int i = 0; i < ARRAY_SIZE(vwidx_isr_list); i++) { if (vwidx_updated & BIT(i)) { uint8_t vw_flag; vw_flag = vw_reg->VW_INDEX[vwidx_isr_list[i].vw_index]; vwidx_isr_list[i].vwidx_isr(dev, vwidx_cached_flag[i] ^ vw_flag); vwidx_cached_flag[i] = vw_flag; } } } static void espi_it8xxx2_ch_notify_system_state(const struct device *dev, enum espi_channel ch, bool en) { struct espi_it8xxx2_data *const data = dev->data; struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_CHANNEL_READY, .evt_details = ch, .evt_data = en, }; espi_send_callbacks(&data->callbacks, dev, evt); } /* * Peripheral channel enable asserted flag. * A 0-to-1 or 1-to-0 transition on "Peripheral Channel Enable" bit. */ static void espi_it8xxx2_peripheral_ch_en_isr(const struct device *dev, bool enable) { espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_PERIPHERAL, enable); } /* * VW channel enable asserted flag. * A 0-to-1 or 1-to-0 transition on "Virtual Wire Channel Enable" bit. */ static void espi_it8xxx2_vw_ch_en_isr(const struct device *dev, bool enable) { espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_VWIRE, enable); } /* * OOB message channel enable asserted flag. * A 0-to-1 or 1-to-0 transition on "OOB Message Channel Enable" bit. */ static void espi_it8xxx2_oob_ch_en_isr(const struct device *dev, bool enable) { espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_OOB, enable); } /* * Flash channel enable asserted flag. * A 0-to-1 or 1-to-0 transition on "Flash Access Channel Enable" bit. */ static void espi_it8xxx2_flash_ch_en_isr(const struct device *dev, bool enable) { if (enable) { espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 1); espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, 1); } espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_FLASH, enable); } static void espi_it8xxx2_put_pc_status_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; /* * TODO: To check cycle type (bit[3-0] at ESPCTRL0) and make * corresponding modification if needed. */ LOG_INF("isr %s is ignored!", __func__); /* write-1-clear to release PC_FREE */ slave_reg->ESPCTRL0 = IT8XXX2_ESPI_INTERRUPT_PUT_PC; } #ifdef CONFIG_ESPI_OOB_CHANNEL static void espi_it8xxx2_upstream_channel_disable_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; LOG_INF("isr %s is ignored!", __func__); /* write-1 to clear this bit */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_CHANNEL_DISABLE; } static void espi_it8xxx2_put_oob_status_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_it8xxx2_data *const data = dev->data; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; #ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_OOB_RECEIVED, .evt_details = 0, .evt_data = 0 }; #endif /* Write-1 to clear this bit for the next coming posted transaction. */ slave_reg->ESOCTRL0 |= IT8XXX2_ESPI_PUT_OOB_STATUS; #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC k_sem_give(&data->oob_upstream_go); #else /* Additional detail is length field of PUT_OOB message packet. */ evt.evt_details = (slave_reg->ESOCTRL4 & IT8XXX2_ESPI_PUT_OOB_LEN_MASK); espi_send_callbacks(&data->callbacks, dev, evt); #endif } #endif #if defined(CONFIG_ESPI_OOB_CHANNEL) || defined(CONFIG_ESPI_FLASH_CHANNEL) static void espi_it8xxx2_upstream_done_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; #ifdef CONFIG_ESPI_FLASH_CHANNEL /* cycle type is flash read, write, or erase */ if (slave_reg->ESUCTRL1 != IT8XXX2_ESPI_CYCLE_TYPE_OOB) { espi_it8xxx2_flash_upstream_done_isr(dev); } #endif /* write-1 to clear this bit */ slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_DONE; /* upstream disable */ slave_reg->ESUCTRL0 &= ~IT8XXX2_ESPI_UPSTREAM_ENABLE; } #endif /* * The ISR of espi interrupt event in array need to be matched bit order in * IT8XXX2 ESPI ESGCTRL0 register. */ static const struct espi_isr_t espi_isr_list[] = { [0] = {espi_it8xxx2_peripheral_ch_en_isr, ASSERTED_FLAG}, [1] = {espi_it8xxx2_vw_ch_en_isr, ASSERTED_FLAG}, [2] = {espi_it8xxx2_oob_ch_en_isr, ASSERTED_FLAG}, [3] = {espi_it8xxx2_flash_ch_en_isr, ASSERTED_FLAG}, [4] = {espi_it8xxx2_peripheral_ch_en_isr, DEASSERTED_FLAG}, [5] = {espi_it8xxx2_vw_ch_en_isr, DEASSERTED_FLAG}, [6] = {espi_it8xxx2_oob_ch_en_isr, DEASSERTED_FLAG}, [7] = {espi_it8xxx2_flash_ch_en_isr, DEASSERTED_FLAG}, }; static void espi_it8xxx2_isr(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; /* get espi interrupt events */ uint8_t espi_event = slave_reg->ESGCTRL0; #if defined(CONFIG_ESPI_OOB_CHANNEL) || defined(CONFIG_ESPI_FLASH_CHANNEL) uint8_t espi_upstream = slave_reg->ESUCTRL0; #endif /* write-1 to clear */ slave_reg->ESGCTRL0 = espi_event; /* process espi interrupt events */ for (int i = 0; i < ARRAY_SIZE(espi_isr_list); i++) { if (espi_event & BIT(i)) { espi_isr_list[i].espi_isr(dev, espi_isr_list[i].isr_type); } } /* * bit7: the peripheral has received a peripheral posted/completion. * This bit indicates the peripheral has received a packet from eSPI * peripheral channel. */ if (slave_reg->ESPCTRL0 & IT8XXX2_ESPI_INTERRUPT_PUT_PC) { espi_it8xxx2_put_pc_status_isr(dev); } #ifdef CONFIG_ESPI_OOB_CHANNEL /* * The corresponding channel of the eSPI upstream transaction is * disabled. */ if (espi_upstream & IT8XXX2_ESPI_UPSTREAM_CHANNEL_DISABLE) { espi_it8xxx2_upstream_channel_disable_isr(dev); } /* The eSPI slave has received a PUT_OOB message. */ if (slave_reg->ESOCTRL0 & IT8XXX2_ESPI_PUT_OOB_STATUS) { espi_it8xxx2_put_oob_status_isr(dev); } #endif /* eSPI oob and flash channels use the same interrupt of upstream. */ #if defined(CONFIG_ESPI_OOB_CHANNEL) || defined(CONFIG_ESPI_FLASH_CHANNEL) /* The eSPI upstream transaction is done. */ if (espi_upstream & IT8XXX2_ESPI_UPSTREAM_DONE) { espi_it8xxx2_upstream_done_isr(dev); } #endif } void espi_it8xxx2_enable_pad_ctrl(const struct device *dev, bool enable) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; if (enable) { /* Enable eSPI pad. */ slave_reg->ESGCTRL2 &= ~IT8XXX2_ESPI_INPUT_PAD_GATING; } else { /* Disable eSPI pad. */ slave_reg->ESGCTRL2 |= IT8XXX2_ESPI_INPUT_PAD_GATING; } } void espi_it8xxx2_enable_trans_irq(const struct device *dev, bool enable) { const struct espi_it8xxx2_config *const config = dev->config; if (enable) { irq_enable(IT8XXX2_TRANS_IRQ); } else { irq_disable(IT8XXX2_TRANS_IRQ); /* Clear pending interrupt */ it8xxx2_wuc_clear_status(config->wuc.wucs, config->wuc.mask); } } static void espi_it8xxx2_trans_isr(const struct device *dev) { /* * This interrupt is only used to wake up CPU, there is no need to do * anything in the isr in addition to disable interrupt. */ espi_it8xxx2_enable_trans_irq(dev, false); } void espi_it8xxx2_espi_reset_isr(const struct device *port, struct gpio_callback *cb, uint32_t pins) { struct espi_it8xxx2_data *const data = ESPI_IT8XXX2_SOC_DEV->data; struct espi_event evt = {ESPI_BUS_RESET, 0, 0}; bool espi_reset = gpio_pin_get(port, (find_msb_set(pins) - 1)); if (!(espi_reset)) { /* Reset vwidx_cached_flag[] when espi_reset# asserted. */ espi_it8xxx2_reset_vwidx_cache(ESPI_IT8XXX2_SOC_DEV); } evt.evt_data = espi_reset; espi_send_callbacks(&data->callbacks, ESPI_IT8XXX2_SOC_DEV, evt); LOG_INF("eSPI reset %sasserted", espi_reset ? "de" : ""); } /* eSPI reset# is enabled on GPD2 */ #define ESPI_IT8XXX2_ESPI_RESET_PORT DEVICE_DT_GET(DT_NODELABEL(gpiod)) #define ESPI_IT8XXX2_ESPI_RESET_PIN 2 static void espi_it8xxx2_enable_reset(void) { struct gpio_it8xxx2_regs *const gpio_regs = GPIO_IT8XXX2_REG_BASE; static struct gpio_callback espi_reset_cb; /* eSPI reset is enabled on GPD2 */ gpio_regs->GPIO_GCR = (gpio_regs->GPIO_GCR & ~IT8XXX2_GPIO_GCR_ESPI_RST_EN_MASK) | (IT8XXX2_GPIO_GCR_ESPI_RST_D2 << IT8XXX2_GPIO_GCR_ESPI_RST_POS); /* enable eSPI reset isr */ gpio_init_callback(&espi_reset_cb, espi_it8xxx2_espi_reset_isr, BIT(ESPI_IT8XXX2_ESPI_RESET_PIN)); gpio_add_callback(ESPI_IT8XXX2_ESPI_RESET_PORT, &espi_reset_cb); gpio_pin_interrupt_configure(ESPI_IT8XXX2_ESPI_RESET_PORT, ESPI_IT8XXX2_ESPI_RESET_PIN, GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_BOTH); } static struct espi_it8xxx2_data espi_it8xxx2_data_0; static const struct espi_it8xxx2_config espi_it8xxx2_config_0 = { .base_espi_slave = DT_INST_REG_ADDR_BY_IDX(0, 0), .base_espi_vw = DT_INST_REG_ADDR_BY_IDX(0, 1), .base_espi_queue0 = DT_INST_REG_ADDR_BY_IDX(0, 2), .base_espi_queue1 = DT_INST_REG_ADDR_BY_IDX(0, 3), .base_ec2i = DT_INST_REG_ADDR_BY_IDX(0, 4), .base_kbc = DT_INST_REG_ADDR_BY_IDX(0, 5), .base_pmc = DT_INST_REG_ADDR_BY_IDX(0, 6), .base_smfi = DT_INST_REG_ADDR_BY_IDX(0, 7), .wuc = IT8XXX2_DT_WUC_ITEMS_FUNC(0, 0), }; DEVICE_DT_INST_DEFINE(0, &espi_it8xxx2_init, NULL, &espi_it8xxx2_data_0, &espi_it8xxx2_config_0, PRE_KERNEL_2, CONFIG_ESPI_INIT_PRIORITY, &espi_it8xxx2_driver_api); static int espi_it8xxx2_init(const struct device *dev) { const struct espi_it8xxx2_config *const config = dev->config; struct espi_vw_regs *const vw_reg = (struct espi_vw_regs *)config->base_espi_vw; struct espi_slave_regs *const slave_reg = (struct espi_slave_regs *)config->base_espi_slave; struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE; /* configure VCC detector */ gctrl->GCTRL_RSTS = (gctrl->GCTRL_RSTS & ~(IT8XXX2_GCTRL_VCCDO_MASK | IT8XXX2_GCTRL_HGRST)) | (IT8XXX2_GCTRL_VCCDO_VCC_ON | IT8XXX2_GCTRL_GRST); /* enable PNPCFG devices */ pnpcfg_it8xxx2_init(dev); #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC /* enable kbc port (60h/64h) */ kbc_it8xxx2_init(dev); #endif #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO /* enable pmc1 for ACPI port (62h/66h) */ pmc1_it8xxx2_init(dev); #endif #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 /* Accept Port 80h Cycle */ port80_it8xxx2_init(dev); #endif #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \ defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION) smfi_it8xxx2_init(dev); #endif #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD /* enable pmc2 for host command port */ pmc2_it8xxx2_init(dev); #endif /* Reset vwidx_cached_flag[] at initialization */ espi_it8xxx2_reset_vwidx_cache(dev); /* Enable espi vw interrupt */ vw_reg->VWCTRL0 |= IT8XXX2_ESPI_VW_INTERRUPT_ENABLE; IRQ_CONNECT(IT8XXX2_ESPI_VW_IRQ, 0, espi_it8xxx2_vw_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_ESPI_VW_IRQ); /* Reset PLTRST# virtual wire signal during eSPI reset */ vw_reg->VWCTRL2 |= IT8XXX2_ESPI_VW_RESET_PLTRST; #ifdef CONFIG_ESPI_OOB_CHANNEL espi_it8xxx2_oob_init(dev); #endif #ifdef CONFIG_ESPI_FLASH_CHANNEL espi_it8xxx2_flash_init(dev); #endif /* Enable espi interrupt */ slave_reg->ESGCTRL1 |= IT8XXX2_ESPI_INTERRUPT_ENABLE; IRQ_CONNECT(IT8XXX2_ESPI_IRQ, 0, espi_it8xxx2_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IT8XXX2_ESPI_IRQ); /* enable interrupt and reset from eSPI_reset# */ espi_it8xxx2_enable_reset(); /* * Enable eSPI to WUC. * If an eSPI transaction is accepted, WU42 interrupt will be asserted. */ slave_reg->ESGCTRL2 |= IT8XXX2_ESPI_TO_WUC_ENABLE; /* Enable WU42 of WUI */ it8xxx2_wuc_clear_status(config->wuc.wucs, config->wuc.mask); it8xxx2_wuc_enable(config->wuc.wucs, config->wuc.mask); /* * Only register isr here, the interrupt only need to be enabled * before CPU and RAM clocks gated in the idle function. */ IRQ_CONNECT(IT8XXX2_TRANS_IRQ, 0, espi_it8xxx2_trans_isr, DEVICE_DT_INST_GET(0), 0); return 0; } ```
/content/code_sandbox/drivers/espi/espi_it8xxx2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
19,233
```c /* * */ #define DT_DRV_COMPAT ambiq_mspi_controller #include <zephyr/logging/log.h> #include <zephyr/logging/log_instance.h> LOG_MODULE_REGISTER(mspi_ambiq_ap3); #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/mspi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/sys_clock.h> #include <zephyr/irq.h> #include "mspi_ambiq.h" #define MSPI_MAX_FREQ 48000000 #define MSPI_MAX_DEVICE 2 #define MSPI_TIMEOUT_US 1000000 #define PWRCTRL_MAX_WAIT_US 5 #define MSPI_BUSY BIT(2) typedef int (*mspi_ambiq_pwr_func_t)(void); typedef void (*irq_config_func_t)(void); struct mspi_context { const struct mspi_dev_id *owner; struct mspi_xfer xfer; int packets_left; int packets_done; mspi_callback_handler_t callback; struct mspi_callback_context *callback_ctx; bool asynchronous; struct k_sem lock; }; struct mspi_ambiq_config { uint32_t reg_base; uint32_t reg_size; struct mspi_cfg mspicfg; const struct pinctrl_dev_config *pcfg; irq_config_func_t irq_cfg_func; LOG_INSTANCE_PTR_DECLARE(log); }; struct mspi_ambiq_data { void *mspiHandle; am_hal_mspi_dev_config_t hal_dev_cfg; struct mspi_dev_id *dev_id; struct k_mutex lock; struct mspi_dev_cfg dev_cfg; struct mspi_xip_cfg xip_cfg; struct mspi_scramble_cfg scramble_cfg; mspi_callback_handler_t cbs[MSPI_BUS_EVENT_MAX]; struct mspi_callback_context *cb_ctxs[MSPI_BUS_EVENT_MAX]; struct mspi_context ctx; }; static int mspi_set_freq(const struct mspi_ambiq_config *cfg, uint32_t freq) { uint32_t d = MSPI_MAX_FREQ / freq; switch (d) { case AM_HAL_MSPI_CLK_48MHZ: case AM_HAL_MSPI_CLK_24MHZ: case AM_HAL_MSPI_CLK_16MHZ: case AM_HAL_MSPI_CLK_12MHZ: case AM_HAL_MSPI_CLK_8MHZ: case AM_HAL_MSPI_CLK_6MHZ: case AM_HAL_MSPI_CLK_4MHZ: case AM_HAL_MSPI_CLK_3MHZ: break; default: LOG_INST_ERR(cfg->log, "%u,Frequency not supported!", __LINE__); d = 0; break; } return d; } static am_hal_mspi_device_e mspi_set_line(const struct mspi_ambiq_config *cfg, enum mspi_io_mode io_mode, enum mspi_data_rate data_rate, uint8_t ce_num) { if (data_rate != MSPI_DATA_RATE_SINGLE) { LOG_INST_ERR(cfg->log, "%u, incorrect data rate, only SDR is supported.", __LINE__); return AM_HAL_MSPI_FLASH_MAX; } if (ce_num == 0) { switch (io_mode) { case MSPI_IO_MODE_SINGLE: return AM_HAL_MSPI_FLASH_SERIAL_CE0; case MSPI_IO_MODE_DUAL: return AM_HAL_MSPI_FLASH_DUAL_CE0; case MSPI_IO_MODE_DUAL_1_1_2: return AM_HAL_MSPI_FLASH_DUAL_CE0_1_1_2; case MSPI_IO_MODE_DUAL_1_2_2: return AM_HAL_MSPI_FLASH_DUAL_CE0_1_2_2; case MSPI_IO_MODE_QUAD: return AM_HAL_MSPI_FLASH_QUAD_CE0; case MSPI_IO_MODE_QUAD_1_1_4: return AM_HAL_MSPI_FLASH_QUAD_CE0_1_1_4; case MSPI_IO_MODE_QUAD_1_4_4: return AM_HAL_MSPI_FLASH_QUAD_CE0_1_4_4; case MSPI_IO_MODE_OCTAL: return AM_HAL_MSPI_FLASH_OCTAL_CE0; default: return AM_HAL_MSPI_FLASH_MAX; } } else if (ce_num == 1) { switch (io_mode) { case MSPI_IO_MODE_SINGLE: return AM_HAL_MSPI_FLASH_SERIAL_CE1; case MSPI_IO_MODE_DUAL: return AM_HAL_MSPI_FLASH_DUAL_CE1; case MSPI_IO_MODE_DUAL_1_1_2: return AM_HAL_MSPI_FLASH_DUAL_CE1_1_1_2; case MSPI_IO_MODE_DUAL_1_2_2: return AM_HAL_MSPI_FLASH_DUAL_CE1_1_2_2; case MSPI_IO_MODE_QUAD: return AM_HAL_MSPI_FLASH_QUAD_CE1; case MSPI_IO_MODE_QUAD_1_1_4: return AM_HAL_MSPI_FLASH_QUAD_CE1_1_1_4; case MSPI_IO_MODE_QUAD_1_4_4: return AM_HAL_MSPI_FLASH_QUAD_CE1_1_4_4; case MSPI_IO_MODE_OCTAL: return AM_HAL_MSPI_FLASH_OCTAL_CE1; default: return AM_HAL_MSPI_FLASH_MAX; } } else { return AM_HAL_MSPI_FLASH_MAX; } } static am_hal_mspi_dma_boundary_e mspi_set_mem_boundary(uint32_t mem_boundary) { switch (mem_boundary) { case 0: return AM_HAL_MSPI_BOUNDARY_NONE; case 32: return AM_HAL_MSPI_BOUNDARY_BREAK32; case 64: return AM_HAL_MSPI_BOUNDARY_BREAK64; case 128: return AM_HAL_MSPI_BOUNDARY_BREAK128; case 256: return AM_HAL_MSPI_BOUNDARY_BREAK256; case 512: return AM_HAL_MSPI_BOUNDARY_BREAK512; case 1024: return AM_HAL_MSPI_BOUNDARY_BREAK1K; case 2048: return AM_HAL_MSPI_BOUNDARY_BREAK2K; case 4096: return AM_HAL_MSPI_BOUNDARY_BREAK4K; case 8192: return AM_HAL_MSPI_BOUNDARY_BREAK8K; case 16384: return AM_HAL_MSPI_BOUNDARY_BREAK16K; default: return AM_HAL_MSPI_BOUNDARY_MAX; } } static inline void mspi_context_ce_control(struct mspi_context *ctx, bool on) { if (ctx->owner) { if (ctx->xfer.hold_ce && ctx->xfer.ce_sw_ctrl.gpio.port != NULL) { if (on) { gpio_pin_set_dt(&ctx->xfer.ce_sw_ctrl.gpio, 1); k_busy_wait(ctx->xfer.ce_sw_ctrl.delay); } else { k_busy_wait(ctx->xfer.ce_sw_ctrl.delay); gpio_pin_set_dt(&ctx->xfer.ce_sw_ctrl.gpio, 0); } } } } static inline void mspi_context_release(struct mspi_context *ctx) { ctx->owner = NULL; k_sem_give(&ctx->lock); } static inline void mspi_context_unlock_unconditionally(struct mspi_context *ctx) { mspi_context_ce_control(ctx, false); if (!k_sem_count_get(&ctx->lock)) { ctx->owner = NULL; k_sem_give(&ctx->lock); } } static inline int mspi_context_lock(struct mspi_context *ctx, const struct mspi_dev_id *req, const struct mspi_xfer *xfer, mspi_callback_handler_t callback, struct mspi_callback_context *callback_ctx, bool lockon) { int ret = 1; if ((k_sem_count_get(&ctx->lock) == 0) && !lockon && (ctx->owner == req)) { return 0; } if (k_sem_take(&ctx->lock, K_MSEC(xfer->timeout))) { return -EBUSY; } if (ctx->xfer.async) { if ((xfer->tx_dummy == ctx->xfer.tx_dummy) && (xfer->rx_dummy == ctx->xfer.rx_dummy) && (xfer->cmd_length == ctx->xfer.cmd_length) && (xfer->addr_length == ctx->xfer.addr_length)) { ret = 0; } else if (ctx->packets_left == 0) { if (ctx->callback_ctx) { volatile struct mspi_event_data *evt_data; evt_data = &ctx->callback_ctx->mspi_evt.evt_data; while (evt_data->status != 0) { } ret = 1; } else { ret = 0; } } else { return -EIO; } } ctx->owner = req; ctx->xfer = *xfer; ctx->packets_done = 0; ctx->packets_left = ctx->xfer.num_packet; ctx->callback = callback; ctx->callback_ctx = callback_ctx; return ret; } static inline bool mspi_is_inp(const struct device *controller) { struct mspi_ambiq_data *data = controller->data; return (k_sem_count_get(&data->ctx.lock) == 0); } static inline int mspi_verify_device(const struct device *controller, const struct mspi_dev_id *dev_id) { const struct mspi_ambiq_config *cfg = controller->config; int device_index = cfg->mspicfg.num_periph; int ret = 0; for (int i = 0; i < cfg->mspicfg.num_periph; i++) { if (dev_id->ce.port == cfg->mspicfg.ce_group[i].port && dev_id->ce.pin == cfg->mspicfg.ce_group[i].pin && dev_id->ce.dt_flags == cfg->mspicfg.ce_group[i].dt_flags) { device_index = i; } } if (device_index >= cfg->mspicfg.num_periph || device_index != dev_id->dev_idx) { LOG_INST_ERR(cfg->log, "%u, invalid device ID.", __LINE__); return -ENODEV; } return ret; } static int mspi_ambiq_deinit(const struct device *controller) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; int ret = 0; if (!data->mspiHandle) { LOG_INST_ERR(cfg->log, "%u, the mspi not yet initialized.", __LINE__); return -ENODEV; } if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) { LOG_INST_ERR(cfg->log, "%u, fail to gain controller access.", __LINE__); return -EBUSY; } ret = am_hal_mspi_interrupt_disable(data->mspiHandle, 0xFFFFFFFF); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to disable interrupt, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_deinit_return; } ret = am_hal_mspi_interrupt_clear(data->mspiHandle, 0xFFFFFFFF); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to clear interrupt, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_deinit_return; } ret = am_hal_mspi_disable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_deinit_return; } ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_DEEPSLEEP, false); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to power off MSPI, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_deinit_return; } ret = am_hal_mspi_deinitialize(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to deinit MSPI.", __LINE__); ret = -ENODEV; goto e_deinit_return; } return ret; e_deinit_return: k_mutex_unlock(&data->lock); return ret; } /** DMA specific config */ static int mspi_xfer_config(const struct device *controller, const struct mspi_xfer *xfer) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; am_hal_mspi_request_e eRequest; int ret = 0; if (data->scramble_cfg.enable) { eRequest = AM_HAL_MSPI_REQ_SCRAMB_EN; } else { eRequest = AM_HAL_MSPI_REQ_SCRAMB_DIS; } ret = am_hal_mspi_disable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL); if (ret) { LOG_INST_ERR(cfg->log, "%u,Unable to complete scramble config:%d.", __LINE__, data->scramble_cfg.enable); return -EHOSTDOWN; } if (xfer->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, cmd_length is too large.", __LINE__); return -ENOTSUP; } if (xfer->cmd_length == 0) { hal_dev_cfg.bSendInstr = false; } else { hal_dev_cfg.bSendInstr = true; hal_dev_cfg.eInstrCfg = xfer->cmd_length - 1; } if (xfer->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, addr_length is too large.", __LINE__); return -ENOTSUP; } if (xfer->addr_length == 0) { hal_dev_cfg.bSendAddr = false; } else { hal_dev_cfg.bSendAddr = true; hal_dev_cfg.eAddrCfg = xfer->addr_length - 1; } hal_dev_cfg.bTurnaround = (xfer->rx_dummy != 0); hal_dev_cfg.ui8TurnAround = (uint8_t)xfer->rx_dummy; hal_dev_cfg.bEnWriteLatency = (xfer->tx_dummy != 0); hal_dev_cfg.ui8WriteLatency = (uint8_t)xfer->tx_dummy; ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = am_hal_mspi_enable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } data->hal_dev_cfg = hal_dev_cfg; return ret; } static int mspi_ambiq_config(const struct mspi_dt_spec *spec) { const struct mspi_cfg *config = &spec->config; const struct mspi_ambiq_config *cfg = spec->bus->config; struct mspi_ambiq_data *data = spec->bus->data; int ret = 0; if (config->op_mode != MSPI_OP_MODE_CONTROLLER) { LOG_INST_ERR(cfg->log, "%u, only support MSPI controller mode.", __LINE__); return -ENOTSUP; } if (config->max_freq > MSPI_MAX_FREQ) { LOG_INST_ERR(cfg->log, "%u, max_freq too large.", __LINE__); return -ENOTSUP; } if (config->duplex != MSPI_HALF_DUPLEX) { LOG_INST_ERR(cfg->log, "%u, only support half duplex mode.", __LINE__); return -ENOTSUP; } if (config->dqs_support) { LOG_INST_ERR(cfg->log, "%u, only support non-DQS mode.", __LINE__); return -ENOTSUP; } if (config->re_init) { ret = mspi_ambiq_deinit(spec->bus); if (ret) { return ret; } } ret = am_hal_mspi_initialize(config->channel_num, &data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to initialize MSPI, code:%d.", __LINE__, ret); return -EPERM; } ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_WAKE, false); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to power on MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = am_hal_mspi_enable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to Enable MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } ret = am_hal_mspi_interrupt_clear(data->mspiHandle, AM_HAL_MSPI_INT_CQUPD | AM_HAL_MSPI_INT_ERR); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to clear interrupt, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_CQUPD | AM_HAL_MSPI_INT_ERR); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to turn on interrupt, code:%d.", __LINE__, ret); return -EHOSTDOWN; } cfg->irq_cfg_func(); mspi_context_unlock_unconditionally(&data->ctx); if (config->re_init) { k_mutex_unlock(&data->lock); } return ret; } static int mspi_ambiq_dev_config(const struct device *controller, const struct mspi_dev_id *dev_id, const enum mspi_dev_cfg_mask param_mask, const struct mspi_dev_cfg *dev_cfg) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; int ret = 0; if (data->dev_id != dev_id) { if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) { LOG_INST_ERR(cfg->log, "%u, fail to gain controller access.", __LINE__); return -EBUSY; } ret = mspi_verify_device(controller, dev_id); if (ret) { goto e_return; } } if (mspi_is_inp(controller)) { ret = -EBUSY; goto e_return; } if (param_mask == MSPI_DEVICE_CONFIG_NONE && !cfg->mspicfg.sw_multi_periph) { /* Do nothing except obtaining the controller lock */ data->dev_id = (struct mspi_dev_id *)dev_id; return ret; } else if (param_mask != MSPI_DEVICE_CONFIG_ALL) { if (data->dev_id != dev_id) { LOG_INST_ERR(cfg->log, "%u, config failed, must be the same device.", __LINE__); ret = -ENOTSUP; goto e_return; } if ((param_mask & (~(MSPI_DEVICE_CONFIG_FREQUENCY | MSPI_DEVICE_CONFIG_IO_MODE | MSPI_DEVICE_CONFIG_CE_NUM | MSPI_DEVICE_CONFIG_DATA_RATE | MSPI_DEVICE_CONFIG_CMD_LEN | MSPI_DEVICE_CONFIG_ADDR_LEN)))) { LOG_INST_ERR(cfg->log, "%u, config type not supported.", __LINE__); ret = -ENOTSUP; goto e_return; } if (param_mask & MSPI_DEVICE_CONFIG_FREQUENCY) { hal_dev_cfg.eClockFreq = mspi_set_freq(cfg, dev_cfg->freq); if (hal_dev_cfg.eClockFreq == 0) { ret = -ENOTSUP; goto e_return; } ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_CLOCK_CONFIG, &hal_dev_cfg.eClockFreq); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure eClockFreq.", __LINE__); ret = -EHOSTDOWN; goto e_return; } data->dev_cfg.freq = dev_cfg->freq; } if ((param_mask & MSPI_DEVICE_CONFIG_IO_MODE) || (param_mask & MSPI_DEVICE_CONFIG_CE_NUM) || (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE)) { hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode, dev_cfg->data_rate, dev_cfg->ce_num); if (hal_dev_cfg.eDeviceConfig == AM_HAL_MSPI_FLASH_MAX) { ret = -ENOTSUP; goto e_return; } ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_DEVICE_CONFIG, &hal_dev_cfg.eDeviceConfig); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure device.", __LINE__); ret = -EHOSTDOWN; goto e_return; } data->dev_cfg.freq = dev_cfg->io_mode; data->dev_cfg.data_rate = dev_cfg->data_rate; data->dev_cfg.ce_num = dev_cfg->ce_num; } if (param_mask & MSPI_DEVICE_CONFIG_CMD_LEN) { if (dev_cfg->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1 || dev_cfg->cmd_length == 0) { LOG_INST_ERR(cfg->log, "%u, invalid cmd_length.", __LINE__); ret = -ENOTSUP; goto e_return; } hal_dev_cfg.eInstrCfg = dev_cfg->cmd_length - 1; ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ISIZE_SET, &hal_dev_cfg.eInstrCfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure cmd_length.", __LINE__); ret = -EHOSTDOWN; goto e_return; } data->dev_cfg.cmd_length = dev_cfg->cmd_length; } if (param_mask & MSPI_DEVICE_CONFIG_ADDR_LEN) { if (dev_cfg->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1 || dev_cfg->addr_length == 0) { LOG_INST_ERR(cfg->log, "%u, invalid addr_length.", __LINE__); ret = -ENOTSUP; goto e_return; } hal_dev_cfg.eAddrCfg = dev_cfg->addr_length - 1; ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ASIZE_SET, &hal_dev_cfg.eAddrCfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure addr_length.", __LINE__); ret = -EHOSTDOWN; goto e_return; } data->dev_cfg.addr_length = dev_cfg->addr_length; } } else { if (data->dev_id != dev_id) { ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_PRIV_START + dev_id->dev_idx); if (ret) { goto e_return; } } if (memcmp(&data->dev_cfg, dev_cfg, sizeof(struct mspi_dev_cfg)) == 0) { /** Nothing to config */ data->dev_id = (struct mspi_dev_id *)dev_id; return ret; } if (dev_cfg->endian != MSPI_XFER_LITTLE_ENDIAN) { LOG_INST_ERR(cfg->log, "%u, only support MSB first.", __LINE__); ret = -ENOTSUP; goto e_return; } if (dev_cfg->dqs_enable && !cfg->mspicfg.dqs_support) { LOG_INST_ERR(cfg->log, "%u, only support non-DQS mode.", __LINE__); ret = -ENOTSUP; goto e_return; } hal_dev_cfg.eSpiMode = dev_cfg->cpp; hal_dev_cfg.bEnWriteLatency = (dev_cfg->tx_dummy != 0); hal_dev_cfg.ui8WriteLatency = dev_cfg->tx_dummy; hal_dev_cfg.bTurnaround = (dev_cfg->rx_dummy != 0); hal_dev_cfg.ui8TurnAround = dev_cfg->rx_dummy; hal_dev_cfg.eClockFreq = mspi_set_freq(cfg, dev_cfg->freq); if (hal_dev_cfg.eClockFreq == 0) { ret = -ENOTSUP; goto e_return; } hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode, dev_cfg->data_rate, dev_cfg->ce_num); if (hal_dev_cfg.eDeviceConfig == AM_HAL_MSPI_FLASH_MAX) { ret = -ENOTSUP; goto e_return; } if (dev_cfg->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, cmd_length too large.", __LINE__); ret = -ENOTSUP; goto e_return; } if (dev_cfg->cmd_length == 0) { hal_dev_cfg.bSendInstr = false; } else { hal_dev_cfg.bSendInstr = true; hal_dev_cfg.eInstrCfg = dev_cfg->cmd_length - 1; } if (dev_cfg->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, addr_length too large.", __LINE__); ret = -ENOTSUP; goto e_return; } if (dev_cfg->addr_length == 0) { hal_dev_cfg.bSendAddr = false; } else { hal_dev_cfg.bSendAddr = true; hal_dev_cfg.eAddrCfg = dev_cfg->addr_length - 1; } hal_dev_cfg.ui8ReadInstr = (uint8_t)dev_cfg->read_cmd; hal_dev_cfg.ui8WriteInstr = (uint8_t)dev_cfg->write_cmd; hal_dev_cfg.eDMABoundary = mspi_set_mem_boundary(dev_cfg->mem_boundary); if (hal_dev_cfg.eDMABoundary >= AM_HAL_MSPI_BOUNDARY_MAX) { LOG_INST_ERR(cfg->log, "%u, mem_boundary too large.", __LINE__); ret = -ENOTSUP; goto e_return; } /** ui16DMATimeLimit unit is in 0.1us */ hal_dev_cfg.ui16DMATimeLimit = dev_cfg->time_to_break * 10; ret = am_hal_mspi_disable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_return; } ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_return; } ret = am_hal_mspi_enable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret); ret = -EHOSTDOWN; goto e_return; } data->dev_cfg = *dev_cfg; data->dev_id = (struct mspi_dev_id *)dev_id; } data->hal_dev_cfg = hal_dev_cfg; return ret; e_return: k_mutex_unlock(&data->lock); return ret; } static int mspi_ambiq_xip_config(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_xip_cfg *xip_cfg) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; am_hal_mspi_request_e eRequest; int ret = 0; if (dev_id != data->dev_id) { LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__); return -ESTALE; } if (xip_cfg->enable) { eRequest = AM_HAL_MSPI_REQ_XIP_EN; } else { eRequest = AM_HAL_MSPI_REQ_XIP_DIS; } ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL); if (ret) { LOG_INST_ERR(cfg->log, "%u,Unable to complete xip config:%d.", __LINE__, xip_cfg->enable); return -EHOSTDOWN; } data->xip_cfg = *xip_cfg; return ret; } static int mspi_ambiq_scramble_config(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_scramble_cfg *scramble_cfg) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; am_hal_mspi_request_e eRequest; int ret = 0; if (mspi_is_inp(controller)) { return -EBUSY; } if (dev_id != data->dev_id) { LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__); return -ESTALE; } if (scramble_cfg->enable) { eRequest = AM_HAL_MSPI_REQ_SCRAMB_EN; } else { eRequest = AM_HAL_MSPI_REQ_SCRAMB_DIS; } ret = am_hal_mspi_disable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL); if (ret) { LOG_INST_ERR(cfg->log, "%u,Unable to complete scramble config:%d.", __LINE__, scramble_cfg->enable); return -EHOSTDOWN; } hal_dev_cfg.scramblingStartAddr = 0 + scramble_cfg->address_offset; hal_dev_cfg.scramblingEndAddr = hal_dev_cfg.scramblingStartAddr + scramble_cfg->size; ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } ret = am_hal_mspi_enable(data->mspiHandle); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } data->scramble_cfg = *scramble_cfg; data->hal_dev_cfg = hal_dev_cfg; return ret; } static int mspi_ambiq_timing_config(const struct device *controller, const struct mspi_dev_id *dev_id, const uint32_t param_mask, void *timing_cfg) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; struct mspi_ambiq_timing_cfg *time_cfg = timing_cfg; am_hal_mspi_timing_scan_t timing; int ret = 0; if (mspi_is_inp(controller)) { return -EBUSY; } if (dev_id != data->dev_id) { LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__); return -ESTALE; } if (param_mask & (~(MSPI_AMBIQ_SET_WLC | MSPI_AMBIQ_SET_RLC))) { LOG_INST_ERR(cfg->log, "%u, config type not supported.", __LINE__); return -ENOTSUP; } if (param_mask & MSPI_AMBIQ_SET_WLC) { if (time_cfg->ui8WriteLatency) { hal_dev_cfg.bEnWriteLatency = true; } else { hal_dev_cfg.bEnWriteLatency = false; } hal_dev_cfg.ui8WriteLatency = time_cfg->ui8WriteLatency; } if (param_mask & MSPI_AMBIQ_SET_RLC) { if (time_cfg->ui8TurnAround) { hal_dev_cfg.bTurnaround = true; } else { hal_dev_cfg.bTurnaround = false; } hal_dev_cfg.ui8TurnAround = time_cfg->ui8TurnAround; } timing.ui8Turnaround = hal_dev_cfg.ui8TurnAround; timing.ui8WriteLatency = hal_dev_cfg.ui8WriteLatency; ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_TIMING_SCAN, &timing); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to configure timing.", __LINE__); return -EHOSTDOWN; } data->hal_dev_cfg = hal_dev_cfg; return ret; } static int mspi_ambiq_get_channel_status(const struct device *controller, uint8_t ch) { ARG_UNUSED(ch); const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; int ret = 0; if (sys_read32(cfg->reg_base) & MSPI_BUSY) { ret = -EBUSY; } if (mspi_is_inp(controller)) { return -EBUSY; } data->dev_id = NULL; k_mutex_unlock(&data->lock); return ret; } static void mspi_ambiq_isr(const struct device *dev) { struct mspi_ambiq_data *data = dev->data; uint32_t ui32Status; am_hal_mspi_interrupt_status_get(data->mspiHandle, &ui32Status, false); am_hal_mspi_interrupt_clear(data->mspiHandle, ui32Status); am_hal_mspi_interrupt_service(data->mspiHandle, ui32Status); } /** Manage sync dma transceive */ static void hal_mspi_callback(void *pCallbackCtxt, uint32_t status) { const struct device *controller = pCallbackCtxt; struct mspi_ambiq_data *data = controller->data; data->ctx.packets_done++; } static int mspi_pio_prepare(const struct device *controller, am_hal_mspi_pio_transfer_t *trans) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; const struct mspi_xfer *xfer = &data->ctx.xfer; int ret = 0; trans->bScrambling = false; trans->bSendAddr = (xfer->addr_length != 0); trans->bSendInstr = (xfer->cmd_length != 0); trans->bTurnaround = (xfer->rx_dummy != 0); trans->bEnWRLatency = (xfer->tx_dummy != 0); trans->bDCX = false; trans->bQuadCmd = false; trans->bContinue = false; if (xfer->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, invalid cmd_length.", __LINE__); return -ENOTSUP; } if (xfer->cmd_length != 0) { am_hal_mspi_instr_e eInstrCfg = xfer->cmd_length - 1; ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ISIZE_SET, &eInstrCfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure cmd_length.", __LINE__); return -EHOSTDOWN; } data->hal_dev_cfg.eInstrCfg = eInstrCfg; } data->dev_cfg.cmd_length = xfer->cmd_length; if (xfer->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, invalid addr_length.", __LINE__); return -ENOTSUP; } if (xfer->addr_length != 0) { am_hal_mspi_addr_e eAddrCfg = xfer->addr_length - 1; ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ASIZE_SET, &eAddrCfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure addr_length.", __LINE__); return -EHOSTDOWN; } data->hal_dev_cfg.eAddrCfg = eAddrCfg; } data->dev_cfg.addr_length = xfer->addr_length; return ret; } static int mspi_pio_transceive(const struct device *controller, const struct mspi_xfer *xfer, mspi_callback_handler_t cb, struct mspi_callback_context *cb_ctx) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; struct mspi_context *ctx = &data->ctx; const struct mspi_xfer_packet *packet; uint32_t packet_idx; am_hal_mspi_pio_transfer_t trans; int ret = 0; int cfg_flag = 0; if (xfer->num_packet == 0 || !xfer->packets || xfer->timeout > CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE) { return -EFAULT; } cfg_flag = mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true); /** For async, user must make sure when cfg_flag = 0 the dummy and instr addr length * in mspi_xfer of the two calls are the same if the first one has not finished yet. */ if (cfg_flag) { if (cfg_flag == 1) { ret = mspi_pio_prepare(controller, &trans); if (ret) { goto pio_err; } } else { ret = cfg_flag; goto pio_err; } } if (!ctx->xfer.async) { while (ctx->packets_left > 0) { packet_idx = ctx->xfer.num_packet - ctx->packets_left; packet = &ctx->xfer.packets[packet_idx]; trans.eDirection = packet->dir; trans.ui16DeviceInstr = (uint16_t)packet->cmd; trans.ui32DeviceAddr = packet->address; trans.ui32NumBytes = packet->num_bytes; trans.pui32Buffer = (uint32_t *)packet->data_buf; ret = am_hal_mspi_blocking_transfer(data->mspiHandle, &trans, MSPI_TIMEOUT_US); ctx->packets_left--; if (ret) { ret = -EIO; goto pio_err; } } } else { ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_DMACMP); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to enable interrupt.", __LINE__); ret = -EHOSTDOWN; goto pio_err; } while (ctx->packets_left > 0) { packet_idx = ctx->xfer.num_packet - ctx->packets_left; packet = &ctx->xfer.packets[packet_idx]; trans.eDirection = packet->dir; trans.ui16DeviceInstr = (uint16_t)packet->cmd; trans.ui32DeviceAddr = packet->address; trans.ui32NumBytes = packet->num_bytes; trans.pui32Buffer = (uint32_t *)packet->data_buf; if (ctx->callback && packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { ctx->callback_ctx->mspi_evt.evt_type = MSPI_BUS_XFER_COMPLETE; ctx->callback_ctx->mspi_evt.evt_data.controller = controller; ctx->callback_ctx->mspi_evt.evt_data.dev_id = data->ctx.owner; ctx->callback_ctx->mspi_evt.evt_data.packet = packet; ctx->callback_ctx->mspi_evt.evt_data.packet_idx = packet_idx; ctx->callback_ctx->mspi_evt.evt_data.status = ~0; } am_hal_mspi_callback_t callback = NULL; if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { callback = (am_hal_mspi_callback_t)ctx->callback; } ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_PIO, callback, (void *)ctx->callback_ctx); ctx->packets_left--; if (ret) { if (ret == AM_HAL_STATUS_OUT_OF_RANGE) { ret = -ENOMEM; } else { ret = -EIO; } goto pio_err; } } } pio_err: mspi_context_release(ctx); return ret; } static int mspi_dma_transceive(const struct device *controller, const struct mspi_xfer *xfer, mspi_callback_handler_t cb, struct mspi_callback_context *cb_ctx) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; struct mspi_context *ctx = &data->ctx; am_hal_mspi_dma_transfer_t trans; int ret = 0; int cfg_flag = 0; if (xfer->num_packet == 0 || !xfer->packets || xfer->timeout > CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE) { return -EFAULT; } cfg_flag = mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true); /** For async, user must make sure when cfg_flag = 0 the dummy and instr addr length * in mspi_xfer of the two calls are the same if the first one has not finished yet. */ if (cfg_flag) { if (cfg_flag == 1) { ret = mspi_xfer_config(controller, xfer); if (ret) { goto dma_err; } } else { ret = cfg_flag; goto dma_err; } } ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_DMACMP); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to enable interrupt.", __LINE__); ret = -EHOSTDOWN; goto dma_err; } while (ctx->packets_left > 0) { uint32_t packet_idx = ctx->xfer.num_packet - ctx->packets_left; const struct mspi_xfer_packet *packet; packet = &ctx->xfer.packets[packet_idx]; trans.ui8Priority = ctx->xfer.priority; trans.eDirection = packet->dir; trans.ui32TransferCount = packet->num_bytes; trans.ui32DeviceAddress = packet->address; trans.ui32SRAMAddress = (uint32_t)packet->data_buf; trans.ui32PauseCondition = 0; trans.ui32StatusSetClr = 0; if (ctx->xfer.async) { if (ctx->callback && packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { ctx->callback_ctx->mspi_evt.evt_type = MSPI_BUS_XFER_COMPLETE; ctx->callback_ctx->mspi_evt.evt_data.controller = controller; ctx->callback_ctx->mspi_evt.evt_data.dev_id = data->ctx.owner; ctx->callback_ctx->mspi_evt.evt_data.packet = packet; ctx->callback_ctx->mspi_evt.evt_data.packet_idx = packet_idx; ctx->callback_ctx->mspi_evt.evt_data.status = ~0; } am_hal_mspi_callback_t callback = NULL; if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { callback = (am_hal_mspi_callback_t)ctx->callback; } ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_DMA, callback, (void *)ctx->callback_ctx); } else { ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_DMA, hal_mspi_callback, (void *)controller); } ctx->packets_left--; if (ret) { if (ret == AM_HAL_STATUS_OUT_OF_RANGE) { ret = -ENOMEM; } else { ret = -EIO; } goto dma_err; } } if (!ctx->xfer.async) { while (ctx->packets_done < ctx->xfer.num_packet) { k_busy_wait(10); } } dma_err: mspi_context_release(ctx); return ret; } static int mspi_ambiq_transceive(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_xfer *xfer) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; mspi_callback_handler_t cb = NULL; struct mspi_callback_context *cb_ctx = NULL; if (dev_id != data->dev_id) { LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__); return -ESTALE; } if (xfer->async) { cb = data->cbs[MSPI_BUS_XFER_COMPLETE]; cb_ctx = data->cb_ctxs[MSPI_BUS_XFER_COMPLETE]; } if (xfer->xfer_mode == MSPI_PIO) { return mspi_pio_transceive(controller, xfer, cb, cb_ctx); } else if (xfer->xfer_mode == MSPI_DMA) { return mspi_dma_transceive(controller, xfer, cb, cb_ctx); } else { return -EIO; } } static int mspi_ambiq_register_callback(const struct device *controller, const struct mspi_dev_id *dev_id, const enum mspi_bus_event evt_type, mspi_callback_handler_t cb, struct mspi_callback_context *ctx) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; if (mspi_is_inp(controller)) { return -EBUSY; } if (dev_id != data->dev_id) { LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__); return -ESTALE; } if (evt_type != MSPI_BUS_XFER_COMPLETE) { LOG_INST_ERR(cfg->log, "%u, callback types not supported.", __LINE__); return -ENOTSUP; } data->cbs[evt_type] = cb; data->cb_ctxs[evt_type] = ctx; return 0; } #if CONFIG_PM_DEVICE static int mspi_ambiq_pm_action(const struct device *controller, enum pm_device_action action) { const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; int ret = 0; if (mspi_is_inp(controller)) { return -EBUSY; } switch (action) { case PM_DEVICE_ACTION_TURN_ON: ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_WAKE, true); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to power on MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } break; case PM_DEVICE_ACTION_TURN_OFF: ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_DEEPSLEEP, true); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to power off MSPI, code:%d.", __LINE__, ret); return -EHOSTDOWN; } break; default: return -ENOTSUP; } return 0; } #endif static int mspi_ambiq_init(const struct device *controller) { const struct mspi_ambiq_config *cfg = controller->config; const struct mspi_dt_spec spec = { .bus = controller, .config = cfg->mspicfg, }; return mspi_ambiq_config(&spec); } static struct mspi_driver_api mspi_ambiq_driver_api = { .config = mspi_ambiq_config, .dev_config = mspi_ambiq_dev_config, .xip_config = mspi_ambiq_xip_config, .scramble_config = mspi_ambiq_scramble_config, .timing_config = mspi_ambiq_timing_config, .get_channel_status = mspi_ambiq_get_channel_status, .register_callback = mspi_ambiq_register_callback, .transceive = mspi_ambiq_transceive, }; #define MSPI_PINCTRL_STATE_INIT(state_idx, node_id) \ COND_CODE_1(Z_PINCTRL_SKIP_STATE(state_idx, node_id), (), \ ({ \ .id = state_idx, \ .pins = Z_PINCTRL_STATE_PINS_NAME(state_idx, node_id), \ .pin_cnt = ARRAY_SIZE(Z_PINCTRL_STATE_PINS_NAME(state_idx, node_id)) \ })) #define MSPI_PINCTRL_STATES_DEFINE(node_id) \ static const struct pinctrl_state \ Z_PINCTRL_STATES_NAME(node_id)[] = { \ LISTIFY(DT_NUM_PINCTRL_STATES(node_id), \ MSPI_PINCTRL_STATE_INIT, (,), node_id) \ }; #define MSPI_PINCTRL_DT_DEFINE(node_id) \ LISTIFY(DT_NUM_PINCTRL_STATES(node_id), \ Z_PINCTRL_STATE_PINS_DEFINE, (;), node_id); \ MSPI_PINCTRL_STATES_DEFINE(node_id) \ Z_PINCTRL_DEV_CONFIG_STATIC Z_PINCTRL_DEV_CONFIG_CONST \ struct pinctrl_dev_config Z_PINCTRL_DEV_CONFIG_NAME(node_id) = \ Z_PINCTRL_DEV_CONFIG_INIT(node_id) #define MSPI_CONFIG(n) \ { \ .channel_num = (DT_INST_REG_ADDR(n) - MSPI0_BASE) / \ (DT_INST_REG_SIZE(n) * 4), \ .op_mode = MSPI_OP_MODE_CONTROLLER, \ .duplex = MSPI_HALF_DUPLEX, \ .max_freq = MSPI_MAX_FREQ, \ .dqs_support = false, \ .num_periph = DT_INST_CHILD_NUM(n), \ .sw_multi_periph = DT_INST_PROP(n, software_multiperipheral), \ } #define MSPI_HAL_DEVICE_CONFIG(n, cmdq, cmdq_size) \ { \ .ui8WriteLatency = 0, \ .ui8TurnAround = 0, \ .eAddrCfg = 0, \ .eInstrCfg = 0, \ .ui8ReadInstr = 0, \ .ui8WriteInstr = 0, \ .eDeviceConfig = AM_HAL_MSPI_FLASH_SERIAL_CE0, \ .eSpiMode = AM_HAL_MSPI_SPI_MODE_0, \ .eClockFreq = MSPI_MAX_FREQ / DT_INST_PROP_OR(n, \ clock_frequency, \ MSPI_MAX_FREQ), \ .bEnWriteLatency = false, \ .bSendAddr = false, \ .bSendInstr = false, \ .bTurnaround = false, \ .bEmulateDDR = false, \ .ui16DMATimeLimit = 0, \ .eDMABoundary = AM_HAL_MSPI_BOUNDARY_NONE, \ .ui32TCBSize = cmdq_size, \ .pTCB = cmdq, \ .scramblingStartAddr = 0, \ .scramblingEndAddr = 0, \ } #define AMBIQ_MSPI_DEFINE(n) \ LOG_INSTANCE_REGISTER(DT_DRV_INST(n), mspi##n, CONFIG_MSPI_LOG_LEVEL); \ MSPI_PINCTRL_DT_DEFINE(DT_DRV_INST(n)); \ static void mspi_ambiq_irq_cfg_func_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ mspi_ambiq_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } \ static uint32_t mspi_ambiq_cmdq##n[DT_INST_PROP_OR(n, cmdq_buffer_size, 1024) / 4] \ __attribute__((section(DT_INST_PROP_OR(n, cmdq_buffer_location, ".mspi_buff")))); \ static struct gpio_dt_spec ce_gpios##n[] = MSPI_CE_GPIOS_DT_SPEC_INST_GET(n); \ static struct mspi_ambiq_data mspi_ambiq_data##n = { \ .mspiHandle = NULL, \ .hal_dev_cfg = MSPI_HAL_DEVICE_CONFIG(n, mspi_ambiq_cmdq##n, \ DT_INST_PROP_OR(n, cmdq_buffer_size, 1024)), \ .dev_id = 0, \ .lock = Z_MUTEX_INITIALIZER(mspi_ambiq_data##n.lock), \ .dev_cfg = {0}, \ .xip_cfg = {0}, \ .scramble_cfg = {0}, \ .cbs = {0}, \ .cb_ctxs = {0}, \ .ctx.lock = Z_SEM_INITIALIZER(mspi_ambiq_data##n.ctx.lock, 0, 1), \ .ctx.callback = 0, \ .ctx.callback_ctx = 0, \ }; \ static const struct mspi_ambiq_config mspi_ambiq_config##n = { \ .reg_base = DT_INST_REG_ADDR(n), \ .reg_size = DT_INST_REG_SIZE(n), \ .mspicfg = MSPI_CONFIG(n), \ .mspicfg.ce_group = (struct gpio_dt_spec *)ce_gpios##n, \ .mspicfg.num_ce_gpios = ARRAY_SIZE(ce_gpios##n), \ .mspicfg.re_init = false, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq_cfg_func = mspi_ambiq_irq_cfg_func_##n, \ LOG_INSTANCE_PTR_INIT(log, DT_DRV_INST(n), mspi##n) \ }; \ PM_DEVICE_DT_INST_DEFINE(n, mspi_ambiq_pm_action); \ DEVICE_DT_INST_DEFINE(n, \ mspi_ambiq_init, \ PM_DEVICE_DT_INST_GET(n), \ &mspi_ambiq_data##n, \ &mspi_ambiq_config##n, \ POST_KERNEL, \ CONFIG_MSPI_INIT_PRIORITY, \ &mspi_ambiq_driver_api); DT_INST_FOREACH_STATUS_OKAY(AMBIQ_MSPI_DEFINE) ```
/content/code_sandbox/drivers/mspi/mspi_ambiq_ap3.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,630
```c /* * */ #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 200809L /* Required for gmtime_r */ #include <zephyr/drivers/gnss.h> #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <string.h> #include <time.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(gnss_emul, CONFIG_GNSS_LOG_LEVEL); #define DT_DRV_COMPAT zephyr_gnss_emul #define GNSS_EMUL_DEFAULT_FIX_INTERVAL_MS 1000 #define GNSS_EMUL_MIN_FIX_INTERVAL_MS 100 #define GNSS_EMUL_FIX_ACQUIRE_TIME_MS 5000 #define GNSS_EMUL_DEFAULT_NAV_MODE GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS #define GNSS_EMUL_SUPPORTED_SYSTEMS_MASK 0xFF #define GNSS_EMUL_SUPPORTED_SYSTEMS_COUNT 8 #define GNSS_EMUL_DEFAULT_ENABLED_SYSTEMS_MASK GNSS_EMUL_SUPPORTED_SYSTEMS_MASK struct gnss_emul_data { const struct device *dev; struct k_work_delayable data_dwork; struct k_sem lock; int64_t resume_timestamp_ms; int64_t fix_timestamp_ms; uint32_t fix_interval_ms; enum gnss_navigation_mode nav_mode; gnss_systems_t enabled_systems; struct gnss_data data; #ifdef CONFIG_GNSS_SATELLITES struct gnss_satellite satellites[GNSS_EMUL_SUPPORTED_SYSTEMS_COUNT]; uint8_t satellites_len; #endif }; static void gnss_emul_lock_sem(const struct device *dev) { struct gnss_emul_data *data = dev->data; (void)k_sem_take(&data->lock, K_FOREVER); } static void gnss_emul_unlock_sem(const struct device *dev) { struct gnss_emul_data *data = dev->data; k_sem_give(&data->lock); } static void gnss_emul_update_fix_timestamp(const struct device *dev, bool resuming) { struct gnss_emul_data *data = dev->data; int64_t uptime_ms; uptime_ms = k_uptime_get(); data->fix_timestamp_ms = ((uptime_ms / data->fix_interval_ms) + 1) * data->fix_interval_ms; if (resuming) { data->resume_timestamp_ms = data->fix_timestamp_ms; } } static bool gnss_emul_fix_is_acquired(const struct device *dev) { struct gnss_emul_data *data = dev->data; int64_t time_since_resume; time_since_resume = data->fix_timestamp_ms - data->resume_timestamp_ms; return time_since_resume >= GNSS_EMUL_FIX_ACQUIRE_TIME_MS; } #ifdef CONFIG_PM_DEVICE static void gnss_emul_clear_fix_timestamp(const struct device *dev) { struct gnss_emul_data *data = dev->data; data->fix_timestamp_ms = 0; } #endif static void gnss_emul_schedule_work(const struct device *dev) { struct gnss_emul_data *data = dev->data; k_work_schedule(&data->data_dwork, K_TIMEOUT_ABS_MS(data->fix_timestamp_ms)); } static bool gnss_emul_cancel_work(const struct device *dev) { struct gnss_emul_data *data = dev->data; struct k_work_sync sync; return k_work_cancel_delayable_sync(&data->data_dwork, &sync); } static bool gnss_emul_is_resumed(const struct device *dev) { struct gnss_emul_data *data = dev->data; return data->fix_timestamp_ms > 0; } static void gnss_emul_lock(const struct device *dev) { gnss_emul_lock_sem(dev); gnss_emul_cancel_work(dev); } static void gnss_emul_unlock(const struct device *dev) { if (gnss_emul_is_resumed(dev)) { gnss_emul_schedule_work(dev); } gnss_emul_unlock_sem(dev); } static int gnss_emul_set_fix_rate(const struct device *dev, uint32_t fix_interval_ms) { struct gnss_emul_data *data = dev->data; if (fix_interval_ms < GNSS_EMUL_MIN_FIX_INTERVAL_MS) { return -EINVAL; } data->fix_interval_ms = fix_interval_ms; return 0; } static int gnss_emul_get_fix_rate(const struct device *dev, uint32_t *fix_interval_ms) { struct gnss_emul_data *data = dev->data; *fix_interval_ms = data->fix_interval_ms; return 0; } static int gnss_emul_set_navigation_mode(const struct device *dev, enum gnss_navigation_mode mode) { struct gnss_emul_data *data = dev->data; if (mode > GNSS_NAVIGATION_MODE_HIGH_DYNAMICS) { return -EINVAL; } data->nav_mode = mode; return 0; } static int gnss_emul_get_navigation_mode(const struct device *dev, enum gnss_navigation_mode *mode) { struct gnss_emul_data *data = dev->data; *mode = data->nav_mode; return 0; } static int gnss_emul_set_enabled_systems(const struct device *dev, gnss_systems_t systems) { struct gnss_emul_data *data = dev->data; if (systems > GNSS_EMUL_SUPPORTED_SYSTEMS_MASK) { return -EINVAL; } data->enabled_systems = systems; return 0; } static int gnss_emul_get_enabled_systems(const struct device *dev, gnss_systems_t *systems) { struct gnss_emul_data *data = dev->data; *systems = data->enabled_systems; return 0; } #ifdef CONFIG_PM_DEVICE static void gnss_emul_resume(const struct device *dev) { gnss_emul_update_fix_timestamp(dev, true); } static void gnss_emul_suspend(const struct device *dev) { gnss_emul_clear_fix_timestamp(dev); } static int gnss_emul_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; gnss_emul_lock(dev); switch (action) { case PM_DEVICE_ACTION_SUSPEND: gnss_emul_suspend(dev); break; case PM_DEVICE_ACTION_RESUME: gnss_emul_resume(dev); break; default: ret = -ENOTSUP; break; } gnss_emul_unlock(dev); return ret; } #endif static int gnss_emul_api_set_fix_rate(const struct device *dev, uint32_t fix_interval_ms) { int ret = -ENODEV; gnss_emul_lock(dev); if (!gnss_emul_is_resumed(dev)) { goto unlock_return; } ret = gnss_emul_set_fix_rate(dev, fix_interval_ms); unlock_return: gnss_emul_unlock(dev); return ret; } static int gnss_emul_api_get_fix_rate(const struct device *dev, uint32_t *fix_interval_ms) { int ret = -ENODEV; gnss_emul_lock(dev); if (!gnss_emul_is_resumed(dev)) { goto unlock_return; } ret = gnss_emul_get_fix_rate(dev, fix_interval_ms); unlock_return: gnss_emul_unlock(dev); return ret; } static int gnss_emul_api_set_navigation_mode(const struct device *dev, enum gnss_navigation_mode mode) { int ret = -ENODEV; gnss_emul_lock(dev); if (!gnss_emul_is_resumed(dev)) { goto unlock_return; } ret = gnss_emul_set_navigation_mode(dev, mode); unlock_return: gnss_emul_unlock(dev); return ret; } static int gnss_emul_api_get_navigation_mode(const struct device *dev, enum gnss_navigation_mode *mode) { int ret = -ENODEV; gnss_emul_lock(dev); if (!gnss_emul_is_resumed(dev)) { goto unlock_return; } ret = gnss_emul_get_navigation_mode(dev, mode); unlock_return: gnss_emul_unlock(dev); return ret; } static int gnss_emul_api_set_enabled_systems(const struct device *dev, gnss_systems_t systems) { int ret = -ENODEV; gnss_emul_lock(dev); if (!gnss_emul_is_resumed(dev)) { goto unlock_return; } ret = gnss_emul_set_enabled_systems(dev, systems); unlock_return: gnss_emul_unlock(dev); return ret; } static int gnss_emul_api_get_enabled_systems(const struct device *dev, gnss_systems_t *systems) { int ret = -ENODEV; gnss_emul_lock(dev); if (!gnss_emul_is_resumed(dev)) { goto unlock_return; } ret = gnss_emul_get_enabled_systems(dev, systems); unlock_return: gnss_emul_unlock(dev); return ret; } static int gnss_emul_api_get_supported_systems(const struct device *dev, gnss_systems_t *systems) { *systems = GNSS_EMUL_SUPPORTED_SYSTEMS_MASK; return 0; } static const struct gnss_driver_api api = { .set_fix_rate = gnss_emul_api_set_fix_rate, .get_fix_rate = gnss_emul_api_get_fix_rate, .set_navigation_mode = gnss_emul_api_set_navigation_mode, .get_navigation_mode = gnss_emul_api_get_navigation_mode, .set_enabled_systems = gnss_emul_api_set_enabled_systems, .get_enabled_systems = gnss_emul_api_get_enabled_systems, .get_supported_systems = gnss_emul_api_get_supported_systems, }; static void gnss_emul_clear_data(const struct device *dev) { struct gnss_emul_data *data = dev->data; memset(&data->data, 0, sizeof(data->data)); } static void gnss_emul_set_fix(const struct device *dev) { struct gnss_emul_data *data = dev->data; data->data.info.satellites_cnt = 8; data->data.info.hdop = 100; data->data.info.fix_status = GNSS_FIX_STATUS_GNSS_FIX; data->data.info.fix_quality = GNSS_FIX_QUALITY_GNSS_SPS; } static void gnss_emul_set_utc(const struct device *dev) { struct gnss_emul_data *data = dev->data; time_t timestamp; struct tm datetime; uint16_t millisecond; timestamp = (time_t)(data->fix_timestamp_ms / 1000); gmtime_r(&timestamp, &datetime); millisecond = (uint16_t)(data->fix_timestamp_ms % 1000) + (uint16_t)(datetime.tm_sec * 1000); data->data.utc.hour = datetime.tm_hour; data->data.utc.millisecond = millisecond; data->data.utc.minute = datetime.tm_min; data->data.utc.month = datetime.tm_mon + 1; data->data.utc.century_year = datetime.tm_year % 100; } static void gnss_emul_set_nav_data(const struct device *dev) { struct gnss_emul_data *data = dev->data; data->data.nav_data.latitude = 10000000000; data->data.nav_data.longitude = -10000000000; data->data.nav_data.bearing = 3000; data->data.nav_data.speed = 0; data->data.nav_data.altitude = 20000; } #ifdef CONFIG_GNSS_SATELLITES static void gnss_emul_clear_satellites(const struct device *dev) { struct gnss_emul_data *data = dev->data; data->satellites_len = 0; } static bool gnss_emul_system_enabled(const struct device *dev, uint8_t system_bit) { struct gnss_emul_data *data = dev->data; return BIT(system_bit) & data->enabled_systems; } static void gnss_emul_add_satellite(const struct device *dev, uint8_t system_bit) { struct gnss_emul_data *data = dev->data; /* Unique values synthesized from GNSS system */ data->satellites[data->satellites_len].prn = system_bit; data->satellites[data->satellites_len].snr = system_bit + 20; data->satellites[data->satellites_len].elevation = system_bit + 40; data->satellites[data->satellites_len].azimuth = system_bit + 60; data->satellites[data->satellites_len].system = BIT(system_bit); data->satellites[data->satellites_len].is_tracked = true; data->satellites_len++; } static void gnss_emul_set_satellites(const struct device *dev) { gnss_emul_clear_satellites(dev); for (uint8_t i = 0; i < GNSS_EMUL_SUPPORTED_SYSTEMS_COUNT; i++) { if (!gnss_emul_system_enabled(dev, i)) { continue; } gnss_emul_add_satellite(dev, i); } } #endif static void gnss_emul_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct gnss_emul_data *data = CONTAINER_OF(dwork, struct gnss_emul_data, data_dwork); const struct device *dev = data->dev; if (!gnss_emul_fix_is_acquired(dev)) { gnss_emul_clear_data(dev); } else { gnss_emul_set_fix(dev); gnss_emul_set_utc(dev); gnss_emul_set_nav_data(dev); } gnss_publish_data(dev, &data->data); #ifdef CONFIG_GNSS_SATELLITES gnss_emul_set_satellites(dev); gnss_publish_satellites(dev, data->satellites, data->satellites_len); #endif gnss_emul_update_fix_timestamp(dev, false); gnss_emul_schedule_work(dev); } static void gnss_emul_init_data(const struct device *dev) { struct gnss_emul_data *data = dev->data; data->dev = dev; k_sem_init(&data->lock, 1, 1); k_work_init_delayable(&data->data_dwork, gnss_emul_work_handler); } static int gnss_emul_init(const struct device *dev) { gnss_emul_init_data(dev); if (pm_device_is_powered(dev)) { gnss_emul_update_fix_timestamp(dev, true); gnss_emul_schedule_work(dev); } else { pm_device_init_off(dev); } return pm_device_runtime_enable(dev); } #define GNSS_EMUL_NAME(inst, name) _CONCAT(name, inst) #define GNSS_EMUL_DEVICE(inst) \ static struct gnss_emul_data GNSS_EMUL_NAME(inst, data) = { \ .fix_interval_ms = GNSS_EMUL_DEFAULT_FIX_INTERVAL_MS, \ .nav_mode = GNSS_EMUL_DEFAULT_NAV_MODE, \ .enabled_systems = GNSS_EMUL_DEFAULT_ENABLED_SYSTEMS_MASK, \ }; \ \ PM_DEVICE_DT_INST_DEFINE(inst, gnss_emul_pm_action); \ \ DEVICE_DT_INST_DEFINE( \ inst, \ gnss_emul_init, \ PM_DEVICE_DT_INST_GET(inst), \ &GNSS_EMUL_NAME(inst, data), \ NULL, \ POST_KERNEL, \ CONFIG_GNSS_INIT_PRIORITY, \ &api \ ); DT_INST_FOREACH_STATUS_OKAY(GNSS_EMUL_DEVICE) ```
/content/code_sandbox/drivers/gnss/gnss_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,377
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_GNSS_GNSS_PARSE_H_ #define ZEPHYR_DRIVERS_GNSS_GNSS_PARSE_H_ #include <zephyr/types.h> /** * @brief Parse decimal string to nano parts * * @example "-1231.3512" -> -1231351200000 * * @param str The decimal string to be parsed * @param nano Destination for parsed decimal * * @retval -EINVAL if str could not be parsed * @retval 0 if str successfully parsed */ int gnss_parse_dec_to_nano(const char *str, int64_t *nano); /** * @brief Parse decimal string to micro parts * * @example "-1231.3512" -> -1231351200 * * @param str The decimal string to be parsed * @param milli Destination for parsed decimal * * @retval -EINVAL if str could not be parsed * @retval 0 if str successfully parsed */ int gnss_parse_dec_to_micro(const char *str, uint64_t *micro); /** * @brief Parse decimal string to milli parts * * @example "-1231.3512" -> -1231351 * * @param str The decimal string to be parsed * @param milli Destination for parsed decimal * * @retval -EINVAL if str could not be parsed * @retval 0 if str successfully parsed */ int gnss_parse_dec_to_milli(const char *str, int64_t *milli); /** * @brief Parse integer string of configurable base to integer * * @example "-1231" -> -1231 * * @param str Decimal string to be parsed * @param base Base of decimal string to be parsed * @param integer Destination for parsed integer * * @retval -EINVAL if str could not be parsed * @retval 0 if str successfully parsed */ int gnss_parse_atoi(const char *str, uint8_t base, int32_t *integer); #endif /* ZEPHYR_DRIVERS_GNSS_GNSS_PARSE_H_ */ ```
/content/code_sandbox/drivers/gnss/gnss_parse.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
439
```unknown config GNSS_U_BLOX_M10 bool "U-BLOX M10 GNSS Module" default y depends on GNSS depends on DT_HAS_U_BLOX_M10_ENABLED depends on GNSS_REFERENCE_FRAME_WGS84 select MODEM_MODULES select MODEM_BACKEND_UART select MODEM_CHAT select MODEM_UBX select GNSS_PARSE select GNSS_NMEA0183 select GNSS_NMEA0183_MATCH select GNSS_U_BLOX_PROTOCOL select UART_USE_RUNTIME_CONFIGURE help Enable U-BLOX M10 GNSS modem driver. config GNSS_U_BLOX_M10_SATELLITES_COUNT int "Maximum satellite count" depends on GNSS_SATELLITES default 24 help Maximum number of satellite that the driver that can be decoded from the GNSS device. This does not affect the number of devices that the device is actually tracking, just how many of those can be reported in the satellites callback. ```
/content/code_sandbox/drivers/gnss/Kconfig.u_blox_m10
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
222
```unknown config GNSS_LUATOS_AIR530Z bool "Luatos Air530z GNSS device" default y depends on GNSS depends on DT_HAS_LUATOS_AIR530Z_ENABLED depends on GNSS_REFERENCE_FRAME_WGS84 select MODEM_MODULES select MODEM_BACKEND_UART select MODEM_CHAT select GNSS_PARSE select GNSS_NMEA0183 select GNSS_NMEA0183_MATCH help Enable Luatos Air530z GNSS driver. config GNSS_LUATOS_AIR530Z_SATELLITES_COUNT int "Maximum satellite count" depends on GNSS_SATELLITES default 24 help Maximum number of satellites that can be decoded from the GNSS device. This does not affect the number of devices that the device is actually tracking, just how many of those can be reported in the satellites callback. ```
/content/code_sandbox/drivers/gnss/Kconfig.luatos_air530z
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
199
```c /* * */ #include <zephyr/drivers/gnss.h> #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/modem/chat.h> #include <zephyr/modem/ubx.h> #include <zephyr/modem/backend/uart.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <string.h> #include <stdlib.h> #include "gnss_nmea0183.h" #include "gnss_nmea0183_match.h" #include "gnss_parse.h" #include "gnss_u_blox_protocol/gnss_u_blox_protocol.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ubx_m10, CONFIG_GNSS_LOG_LEVEL); #define DT_DRV_COMPAT u_blox_m10 #define UART_RECV_BUF_SZ 128 #define UART_TRNF_BUF_SZ 128 #define CHAT_RECV_BUF_SZ 256 #define CHAT_ARGV_SZ 32 #define UBX_RECV_BUF_SZ UBX_FRM_SZ_MAX #define UBX_TRNS_BUF_SZ UBX_FRM_SZ_MAX #define UBX_WORK_BUF_SZ UBX_FRM_SZ_MAX #define UBX_FRM_BUF_SZ UBX_FRM_SZ_MAX #define MODEM_UBX_SCRIPT_TIMEOUT_MS 1000 #define UBX_M10_SCRIPT_RETRY_DEFAULT 10 #define UBX_M10_GNSS_SYS_CNT 8 #define UBX_M10_GNSS_SUPP_SYS_CNT 6 struct ubx_m10_config { const struct device *uart; const uint32_t uart_baudrate; }; struct ubx_m10_data { struct gnss_nmea0183_match_data match_data; #if CONFIG_GNSS_SATELLITES struct gnss_satellite satellites[CONFIG_GNSS_U_BLOX_M10_SATELLITES_COUNT]; #endif /* UART backend */ struct modem_pipe *uart_pipe; struct modem_backend_uart uart_backend; uint8_t uart_backend_receive_buf[UART_RECV_BUF_SZ]; uint8_t uart_backend_transmit_buf[UART_TRNF_BUF_SZ]; /* Modem chat */ struct modem_chat chat; uint8_t chat_receive_buf[CHAT_RECV_BUF_SZ]; uint8_t *chat_argv[CHAT_ARGV_SZ]; /* Modem ubx */ struct modem_ubx ubx; uint8_t ubx_receive_buf[UBX_RECV_BUF_SZ]; uint8_t ubx_work_buf[UBX_WORK_BUF_SZ]; /* Modem ubx script */ struct modem_ubx_script script; uint8_t request_buf[UBX_FRM_BUF_SZ]; uint8_t response_buf[UBX_FRM_BUF_SZ]; uint8_t match_buf[UBX_FRM_BUF_SZ]; struct k_spinlock lock; }; MODEM_CHAT_MATCHES_DEFINE(unsol_matches, MODEM_CHAT_MATCH_WILDCARD("$??GGA,", ",*", gnss_nmea0183_match_gga_callback), MODEM_CHAT_MATCH_WILDCARD("$??RMC,", ",*", gnss_nmea0183_match_rmc_callback), #if CONFIG_GNSS_SATELLITES MODEM_CHAT_MATCH_WILDCARD("$??GSV,", ",*", gnss_nmea0183_match_gsv_callback), #endif ); static int ubx_m10_resume(const struct device *dev) { struct ubx_m10_data *data = dev->data; int ret; ret = modem_pipe_open(data->uart_pipe, K_SECONDS(10)); if (ret < 0) { return ret; } ret = modem_chat_attach(&data->chat, data->uart_pipe); if (ret < 0) { (void)modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } return ret; } static int ubx_m10_turn_off(const struct device *dev) { struct ubx_m10_data *data = dev->data; return modem_pipe_close(data->uart_pipe, K_SECONDS(10)); } static int ubx_m10_init_nmea0183_match(const struct device *dev) { struct ubx_m10_data *data = dev->data; const struct gnss_nmea0183_match_config match_config = { .gnss = dev, #if CONFIG_GNSS_SATELLITES .satellites = data->satellites, .satellites_size = ARRAY_SIZE(data->satellites), #endif }; return gnss_nmea0183_match_init(&data->match_data, &match_config); } static void ubx_m10_init_pipe(const struct device *dev) { const struct ubx_m10_config *cfg = dev->config; struct ubx_m10_data *data = dev->data; const struct modem_backend_uart_config uart_backend_config = { .uart = cfg->uart, .receive_buf = data->uart_backend_receive_buf, .receive_buf_size = sizeof(data->uart_backend_receive_buf), .transmit_buf = data->uart_backend_transmit_buf, .transmit_buf_size = ARRAY_SIZE(data->uart_backend_transmit_buf), }; data->uart_pipe = modem_backend_uart_init(&data->uart_backend, &uart_backend_config); } static uint8_t ubx_m10_char_delimiter[] = {'\r', '\n'}; static int ubx_m10_init_chat(const struct device *dev) { struct ubx_m10_data *data = dev->data; const struct modem_chat_config chat_config = { .user_data = data, .receive_buf = data->chat_receive_buf, .receive_buf_size = sizeof(data->chat_receive_buf), .delimiter = ubx_m10_char_delimiter, .delimiter_size = ARRAY_SIZE(ubx_m10_char_delimiter), .filter = NULL, .filter_size = 0, .argv = data->chat_argv, .argv_size = ARRAY_SIZE(data->chat_argv), .unsol_matches = unsol_matches, .unsol_matches_size = ARRAY_SIZE(unsol_matches), }; return modem_chat_init(&data->chat, &chat_config); } static int ubx_m10_init_ubx(const struct device *dev) { struct ubx_m10_data *data = dev->data; const struct modem_ubx_config ubx_config = { .user_data = data, .receive_buf = data->ubx_receive_buf, .receive_buf_size = sizeof(data->ubx_receive_buf), .work_buf = data->ubx_work_buf, .work_buf_size = sizeof(data->ubx_work_buf), }; return modem_ubx_init(&data->ubx, &ubx_config); } /** * @brief Changes modem module (chat or ubx) attached to the uart pipe. * @param dev Dev instance * @param change_from_to 0 for changing from "chat" to "ubx", 1 for changing from "ubx" to "chat" * @returns 0 if successful * @returns negative errno code if failure */ static int ubx_m10_modem_module_change(const struct device *dev, bool change_from_to) { struct ubx_m10_data *data = dev->data; int ret; if (change_from_to == 0) { modem_chat_release(&data->chat); ret = modem_ubx_attach(&data->ubx, data->uart_pipe); } else { /* change_from_to == 1 */ modem_ubx_release(&data->ubx); ret = modem_chat_attach(&data->chat, data->uart_pipe); } if (ret < 0) { (void)modem_pipe_close(data->uart_pipe, K_SECONDS(10)); } return ret; } static int ubx_m10_modem_ubx_run_script(const struct device *dev, struct modem_ubx_script *modem_ubx_script_tx) { struct ubx_m10_data *data = dev->data; int ret; ret = ubx_m10_modem_module_change(dev, 0); if (ret < 0) { goto reset_modem_module; } ret = modem_ubx_run_script(&data->ubx, modem_ubx_script_tx); reset_modem_module: ret |= ubx_m10_modem_module_change(dev, 1); return ret; } static void ubx_m10_modem_ubx_script_fill(const struct device *dev) { struct ubx_m10_data *data = dev->data; data->script.request = (struct ubx_frame *)data->request_buf; data->script.response = (struct ubx_frame *)data->response_buf; data->script.match = (struct ubx_frame *)data->match_buf; data->script.retry_count = UBX_M10_SCRIPT_RETRY_DEFAULT; data->script.timeout = K_MSEC(MODEM_UBX_SCRIPT_TIMEOUT_MS); } static int ubx_m10_modem_ubx_script_init(const struct device *dev, void *payload, uint16_t payld_sz, enum ubx_msg_class msg_cls, enum ubx_config_message msg_id) { int ret; struct ubx_m10_data *data = dev->data; struct ubx_cfg_ack_payload match_payload = { .message_class = msg_cls, .message_id = msg_id, }; ubx_m10_modem_ubx_script_fill(dev); ret = ubx_create_and_validate_frame(data->match_buf, sizeof(data->match_buf), UBX_CLASS_ACK, UBX_ACK_ACK, &match_payload, UBX_CFG_ACK_PAYLOAD_SZ); if (ret < 0) { return ret; } ret = ubx_create_and_validate_frame(data->request_buf, sizeof(data->request_buf), msg_cls, msg_id, payload, payld_sz); return ret; } static int ubx_m10_ubx_cfg_rate(const struct device *dev) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_rate_payload payload; key = k_spin_lock(&data->lock); ubx_cfg_rate_payload_default(&payload); ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_RATE_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_RATE); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_ubx_cfg_prt_set(const struct device *dev, uint32_t target_baudrate, uint8_t retry) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_prt_set_payload payload; key = k_spin_lock(&data->lock); ubx_cfg_prt_set_payload_default(&payload); payload.baudrate = target_baudrate; ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_PRT_SET_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_PRT); if (ret < 0) { goto unlock; } data->script.retry_count = retry; /* Returns failure if "target_baudrate" is different than device's currently set baudrate, * because the device will change its baudrate and respond with UBX-ACK with new baudrate, * which we will miss. Hence, we need to change uart's baudrate after sending the frame * (in order to receive response as well), which we are not doing right now. */ ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_ubx_cfg_rst(const struct device *dev, uint8_t reset_mode) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_rst_payload payload; key = k_spin_lock(&data->lock); ubx_cfg_rst_payload_default(&payload); payload.nav_bbr_mask = UBX_CFG_RST_NAV_BBR_MASK_HOT_START; payload.reset_mode = reset_mode; ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_RST_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_RST); if (ret < 0) { goto unlock; } data->script.match = NULL; ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } if (reset_mode == UBX_CFG_RST_RESET_MODE_CONTROLLED_GNSS_STOP) { k_sleep(K_MSEC(UBX_CFG_RST_WAIT_MS)); } unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_set_uart_baudrate(const struct device *dev, uint32_t baudrate) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; const struct ubx_m10_config *config = dev->config; struct uart_config uart_cfg; key = k_spin_lock(&data->lock); ret = ubx_m10_turn_off(dev); if (ret < 0) { goto reset_and_unlock; } ret = uart_config_get(config->uart, &uart_cfg); if (ret < 0) { goto reset_and_unlock; } uart_cfg.baudrate = baudrate; ret = uart_configure(config->uart, &uart_cfg); reset_and_unlock: ret |= ubx_m10_resume(dev); k_spin_unlock(&data->lock, key); return ret; } static bool ubx_m10_validate_baudrate(const struct device *dev, uint32_t baudrate) { for (int i = 0; i < UBX_BAUDRATE_COUNT; ++i) { if (baudrate == ubx_baudrate[i]) { return true; } } return false; } /* This function will return failure if "target_baudrate" != device's current baudrate. * Refer the function description of ubx_m10_ubx_cfg_prt_set for a detailed explanation. */ static int ubx_m10_configure_gnss_device_baudrate_prerequisite(const struct device *dev) { /* Retry = 1 should be enough, but setting 2 just to be safe. */ int ret, retry = 2; const struct ubx_m10_config *config = dev->config; uint32_t target_baudrate = config->uart_baudrate; ret = ubx_m10_validate_baudrate(dev, target_baudrate); if (ret < 0) { return ret; } /* Try communication with device with all possible baudrates, because initially we don't * know the currently set baudrate of the device. We will match the baudrate in one of the * following attempts and the device will thus change its baudrate to "target_baudrate". */ for (int i = 0; i < UBX_BAUDRATE_COUNT; ++i) { /* Set baudrate of UART pipe as ubx_baudrate[i]. */ ret = ubx_m10_set_uart_baudrate(dev, ubx_baudrate[i]); if (ret < 0) { return ret; } /* Try setting baudrate of device as target_baudrate. */ ret = ubx_m10_ubx_cfg_prt_set(dev, target_baudrate, retry); if (ret == 0) { break; } } /* Reset baudrate of UART pipe as target_baudrate. */ ret = ubx_m10_set_uart_baudrate(dev, target_baudrate); if (ret < 0) { return ret; } return 0; } static int ubx_m10_configure_gnss_device_baudrate(const struct device *dev) { int ret; const struct ubx_m10_config *config = dev->config; uint32_t target_baudrate = config->uart_baudrate; ret = ubx_m10_validate_baudrate(dev, target_baudrate); if (ret < 0) { return ret; } ret = ubx_m10_ubx_cfg_prt_set(dev, target_baudrate, UBX_M10_SCRIPT_RETRY_DEFAULT); if (ret < 0) { return ret; } return 0; } static int ubx_m10_configure_messages(const struct device *dev) { int ret = 0; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_msg_payload payload; key = k_spin_lock(&data->lock); ubx_cfg_msg_payload_default(&payload); /* Enabling GGA, RMC and GSV messages. */ payload.rate = 1; uint8_t message_enable[] = {UBX_NMEA_GGA, UBX_NMEA_RMC, UBX_NMEA_GSV}; for (int i = 0; i < sizeof(message_enable); ++i) { payload.message_id = message_enable[i]; ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_MSG_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_MSG); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } } /* Disabling DTM, GBS, GLL, GNS, GRS, GSA, GST, VLW, VTG and ZDA messages. */ payload.rate = 0; uint8_t message_disable[] = {UBX_NMEA_DTM, UBX_NMEA_GBS, UBX_NMEA_GLL, UBX_NMEA_GNS, UBX_NMEA_GRS, UBX_NMEA_GSA, UBX_NMEA_GST, UBX_NMEA_VLW, UBX_NMEA_VTG, UBX_NMEA_ZDA}; for (int i = 0; i < sizeof(message_disable); ++i) { payload.message_id = message_disable[i]; ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_MSG_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_MSG); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } } unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_navigation_mode_to_ubx_dynamic_model(const struct device *dev, enum gnss_navigation_mode mode) { switch (mode) { case GNSS_NAVIGATION_MODE_ZERO_DYNAMICS: return UBX_DYN_MODEL_STATIONARY; case GNSS_NAVIGATION_MODE_LOW_DYNAMICS: return UBX_DYN_MODEL_PORTABLE; case GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS: return UBX_DYN_MODEL_AIRBORNE1G; case GNSS_NAVIGATION_MODE_HIGH_DYNAMICS: return UBX_DYN_MODEL_AIRBORNE4G; default: return -EINVAL; } } static int ubx_m10_ubx_dynamic_model_to_navigation_mode(const struct device *dev, enum ubx_dynamic_model dynamic_model) { switch (dynamic_model) { case UBX_DYN_MODEL_PORTABLE: return GNSS_NAVIGATION_MODE_LOW_DYNAMICS; case UBX_DYN_MODEL_STATIONARY: return GNSS_NAVIGATION_MODE_ZERO_DYNAMICS; case UBX_DYN_MODEL_PEDESTRIAN: return GNSS_NAVIGATION_MODE_LOW_DYNAMICS; case UBX_DYN_MODEL_AUTOMOTIVE: return GNSS_NAVIGATION_MODE_LOW_DYNAMICS; case UBX_DYN_MODEL_SEA: return GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS; case UBX_DYN_MODEL_AIRBORNE1G: return GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS; case UBX_DYN_MODEL_AIRBORNE2G: return GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS; case UBX_DYN_MODEL_AIRBORNE4G: return GNSS_NAVIGATION_MODE_HIGH_DYNAMICS; case UBX_DYN_MODEL_WRIST: return GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS; case UBX_DYN_MODEL_BIKE: return GNSS_NAVIGATION_MODE_HIGH_DYNAMICS; default: return -EINVAL; } } static int ubx_m10_set_navigation_mode(const struct device *dev, enum gnss_navigation_mode mode) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_nav5_payload payload; key = k_spin_lock(&data->lock); ubx_cfg_nav5_payload_default(&payload); ret = ubx_m10_navigation_mode_to_ubx_dynamic_model(dev, mode); if (ret < 0) { goto unlock; } payload.dyn_model = ret; ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_NAV5_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_NAV5); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } k_sleep(K_MSEC(UBX_CFG_NAV5_WAIT_MS)); unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_get_navigation_mode(const struct device *dev, enum gnss_navigation_mode *mode) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; enum ubx_dynamic_model dynamic_model; key = k_spin_lock(&data->lock); ret = ubx_m10_modem_ubx_script_init(dev, NULL, UBX_FRM_GET_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_NAV5); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } struct ubx_frame *response = data->script.response; dynamic_model = ((struct ubx_cfg_nav5_payload *)response->payload_and_checksum)->dyn_model; ret = ubx_m10_ubx_dynamic_model_to_navigation_mode(dev, dynamic_model); if (ret < 0) { goto unlock; } *mode = ret; unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_get_supported_systems(const struct device *dev, gnss_systems_t *systems) { *systems = (GNSS_SYSTEM_GPS | GNSS_SYSTEM_GLONASS | GNSS_SYSTEM_GALILEO | GNSS_SYSTEM_BEIDOU | GNSS_SYSTEM_SBAS | GNSS_SYSTEM_QZSS); return 0; } static int ubx_m10_ubx_gnss_id_to_gnss_system(const struct device *dev, enum ubx_gnss_id gnss_id) { switch (gnss_id) { case UBX_GNSS_ID_GPS: return GNSS_SYSTEM_GPS; case UBX_GNSS_ID_SBAS: return GNSS_SYSTEM_SBAS; case UBX_GNSS_ID_GALILEO: return GNSS_SYSTEM_GALILEO; case UBX_GNSS_ID_BEIDOU: return GNSS_SYSTEM_BEIDOU; case UBX_GNSS_ID_QZSS: return GNSS_SYSTEM_QZSS; case UBX_GNSS_ID_GLONASS: return GNSS_SYSTEM_GLONASS; default: return -EINVAL; }; } static int ubx_m10_config_block_fill(const struct device *dev, gnss_systems_t gnss_system, struct ubx_cfg_gnss_payload *payload, uint8_t index, uint32_t enable) { uint32_t signal_config; switch (gnss_system) { case GNSS_SYSTEM_GPS: payload->config_blocks[index].gnss_id = UBX_GNSS_ID_GPS; signal_config = UBX_CFG_GNSS_FLAG_SGN_CNF_GPS_L1C_A; break; case GNSS_SYSTEM_GLONASS: payload->config_blocks[index].gnss_id = UBX_GNSS_ID_GLONASS; signal_config = UBX_CFG_GNSS_FLAG_SGN_CNF_GLONASS_L1; break; case GNSS_SYSTEM_GALILEO: payload->config_blocks[index].gnss_id = UBX_GNSS_ID_GALILEO; signal_config = UBX_CFG_GNSS_FLAG_SGN_CNF_GALILEO_E1; break; case GNSS_SYSTEM_BEIDOU: payload->config_blocks[index].gnss_id = UBX_GNSS_ID_BEIDOU; signal_config = UBX_CFG_GNSS_FLAG_SGN_CNF_BEIDOU_B1I; break; case GNSS_SYSTEM_QZSS: payload->config_blocks[index].gnss_id = UBX_GNSS_ID_QZSS; signal_config = UBX_CFG_GNSS_FLAG_SGN_CNF_QZSS_L1C_A; break; case GNSS_SYSTEM_SBAS: payload->config_blocks[index].gnss_id = UBX_GNSS_ID_SBAS; signal_config = UBX_CFG_GNSS_FLAG_SGN_CNF_SBAS_L1C_A; break; default: return -EINVAL; }; payload->config_blocks[index].flags = enable | signal_config; return 0; } static int ubx_m10_set_enabled_systems(const struct device *dev, gnss_systems_t systems) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; key = k_spin_lock(&data->lock); struct ubx_cfg_gnss_payload *payload; /* Get number of tracking channels for each supported gnss system by sending CFG-GNSS. */ ret = ubx_m10_modem_ubx_script_init(dev, NULL, UBX_FRM_GET_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_GNSS); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } struct ubx_frame *response = data->script.response; uint16_t res_trk_ch_sum = 0, max_trk_ch_sum = 0; /* Calculate sum of reserved and maximum tracking channels for each supported gnss system, * and assert that the sum is not greater than the number of tracking channels in use. */ payload = (struct ubx_cfg_gnss_payload *) response->payload_and_checksum; for (int i = 0; i < payload->num_config_blocks; ++i) { ret = ubx_m10_ubx_gnss_id_to_gnss_system(dev, payload->config_blocks[i].gnss_id); if (ret < 0) { goto unlock; } if (ret & systems) { res_trk_ch_sum += payload->config_blocks[i].num_res_trk_ch; max_trk_ch_sum += payload->config_blocks[i].max_num_trk_ch; } if (res_trk_ch_sum > payload->num_trk_ch_use || max_trk_ch_sum > payload->num_trk_ch_use) { ret = -EINVAL; goto unlock; } } /* Prepare payload (payload) for sending CFG-GNSS for enabling the gnss systems. */ payload = malloc(sizeof(*payload) + sizeof(struct ubx_cfg_gnss_payload_config_block) * UBX_M10_GNSS_SUPP_SYS_CNT); if (!payload) { ret = -ENOMEM; goto unlock; } payload->num_config_blocks = UBX_M10_GNSS_SUPP_SYS_CNT; ubx_cfg_gnss_payload_default(payload); uint8_t filled_blocks = 0; gnss_systems_t supported_systems; ret = ubx_m10_get_supported_systems(dev, &supported_systems); if (ret < 0) { goto free_and_unlock; } for (int i = 0; i < UBX_M10_GNSS_SYS_CNT; ++i) { gnss_systems_t gnss_system = 1 << i; if (gnss_system & supported_systems) { uint32_t enable = (systems & gnss_system) ? UBX_CFG_GNSS_FLAG_ENABLE : UBX_CFG_GNSS_FLAG_DISABLE; ret = ubx_m10_config_block_fill(dev, gnss_system, payload, filled_blocks, enable); if (ret < 0) { goto free_and_unlock; } ++filled_blocks; } } ret = ubx_m10_modem_ubx_script_init(dev, payload, UBX_CFG_GNSS_PAYLOAD_SZ(UBX_M10_GNSS_SUPP_SYS_CNT), UBX_CLASS_CFG, UBX_CFG_GNSS); if (ret < 0) { goto free_and_unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto free_and_unlock; } k_sleep(K_MSEC(UBX_CFG_GNSS_WAIT_MS)); free_and_unlock: free(payload); unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_get_enabled_systems(const struct device *dev, gnss_systems_t *systems) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; key = k_spin_lock(&data->lock); ret = ubx_m10_modem_ubx_script_init(dev, NULL, UBX_FRM_GET_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_GNSS); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } struct ubx_frame *response = data->script.response; struct ubx_cfg_gnss_payload *payload = (struct ubx_cfg_gnss_payload *) response->payload_and_checksum; *systems = 0; for (int i = 0; i < payload->num_config_blocks; ++i) { if (payload->config_blocks[i].flags & UBX_CFG_GNSS_FLAG_ENABLE) { enum ubx_gnss_id gnss_id = payload->config_blocks[i].gnss_id; ret = ubx_m10_ubx_gnss_id_to_gnss_system(dev, gnss_id); if (ret < 0) { goto unlock; } *systems |= ret; } } unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_set_fix_rate(const struct device *dev, uint32_t fix_interval_ms) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_rate_payload payload; if (fix_interval_ms < 50) { return -1; } key = k_spin_lock(&data->lock); ubx_cfg_rate_payload_default(&payload); payload.meas_rate_ms = fix_interval_ms; ret = ubx_m10_modem_ubx_script_init(dev, &payload, UBX_CFG_RATE_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_RATE); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); unlock: k_spin_unlock(&data->lock, key); return ret; } static int ubx_m10_get_fix_rate(const struct device *dev, uint32_t *fix_interval_ms) { int ret; k_spinlock_key_t key; struct ubx_m10_data *data = dev->data; struct ubx_cfg_rate_payload *payload; key = k_spin_lock(&data->lock); ret = ubx_m10_modem_ubx_script_init(dev, NULL, UBX_FRM_GET_PAYLOAD_SZ, UBX_CLASS_CFG, UBX_CFG_RATE); if (ret < 0) { goto unlock; } ret = ubx_m10_modem_ubx_run_script(dev, &(data->script)); if (ret < 0) { goto unlock; } struct ubx_frame *response = data->script.response; payload = (struct ubx_cfg_rate_payload *) response->payload_and_checksum; *fix_interval_ms = payload->meas_rate_ms; unlock: k_spin_unlock(&data->lock, key); return ret; } static const struct gnss_driver_api gnss_api = { .set_fix_rate = ubx_m10_set_fix_rate, .get_fix_rate = ubx_m10_get_fix_rate, .set_navigation_mode = ubx_m10_set_navigation_mode, .get_navigation_mode = ubx_m10_get_navigation_mode, .set_enabled_systems = ubx_m10_set_enabled_systems, .get_enabled_systems = ubx_m10_get_enabled_systems, .get_supported_systems = ubx_m10_get_supported_systems, }; static int ubx_m10_configure(const struct device *dev) { int ret; /* The return value could be ignored. See function description for more details. */ (void)ubx_m10_configure_gnss_device_baudrate_prerequisite(dev); /* Stopping GNSS messages for clearer communication while configuring the device. */ ret = ubx_m10_ubx_cfg_rst(dev, UBX_CFG_RST_RESET_MODE_CONTROLLED_GNSS_STOP); if (ret < 0) { goto reset; } ret = ubx_m10_ubx_cfg_rate(dev); if (ret < 0) { LOG_ERR("Configuring rate failed. Returned %d.", ret); goto reset; } ret = ubx_m10_configure_gnss_device_baudrate(dev); if (ret < 0) { LOG_ERR("Configuring baudrate failed. Returned %d.", ret); goto reset; } ret = ubx_m10_configure_messages(dev); if (ret < 0) { LOG_ERR("Configuring messages failed. Returned %d.", ret); } reset: ret = ubx_m10_ubx_cfg_rst(dev, UBX_CFG_RST_RESET_MODE_CONTROLLED_GNSS_START); return ret; } static int ubx_m10_init(const struct device *dev) { int ret; ret = ubx_m10_init_nmea0183_match(dev); if (ret < 0) { return ret; } ubx_m10_init_pipe(dev); ret = ubx_m10_init_chat(dev); if (ret < 0) { return ret; } ret = ubx_m10_init_ubx(dev); if (ret < 0) { return ret; } ret = ubx_m10_resume(dev); if (ret < 0) { return ret; } ret = ubx_m10_configure(dev); if (ret < 0) { return ret; } return 0; } #define UBX_M10(inst) \ static const struct ubx_m10_config ubx_m10_cfg_##inst = { \ .uart = DEVICE_DT_GET(DT_INST_BUS(inst)), \ .uart_baudrate = DT_PROP(DT_DRV_INST(inst), uart_baudrate), \ }; \ \ static struct ubx_m10_data ubx_m10_data_##inst = { \ .script.request = (struct ubx_frame *)ubx_m10_data_##inst.request_buf, \ .script.response = (struct ubx_frame *)ubx_m10_data_##inst.response_buf, \ .script.match = (struct ubx_frame *)ubx_m10_data_##inst.match_buf, \ .script.retry_count = UBX_M10_SCRIPT_RETRY_DEFAULT, \ .script.timeout = K_MSEC(MODEM_UBX_SCRIPT_TIMEOUT_MS), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ ubx_m10_init, \ NULL, \ &ubx_m10_data_##inst, \ &ubx_m10_cfg_##inst, \ POST_KERNEL, \ CONFIG_GNSS_INIT_PRIORITY, \ &gnss_api); DT_INST_FOREACH_STATUS_OKAY(UBX_M10) ```
/content/code_sandbox/drivers/gnss/gnss_u_blox_m10.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,906
```objective-c /* * */ /* * The GNSS NMEA0183 match is a set of modem_chat match handlers and a context to be * passed to said handlers, to parse the NMEA0183 messages received from a NMEA0183 * based GNSS device. * * The context struct gnss_nmea0183_match_data *data is placed as the first member * of the data structure which is passed to the modem_chat instance through the * user_data member. * * struct my_gnss_nmea0183_driver { * gnss_nmea0183_match_data match_data; * ... * }; * * The struct gnss_nmea0183_match_data context must be initialized using * gnss_nmea0183_match_init(). * * When initializing the modem_chat instance, the three match callbacks must be added * as part of the unsolicited matches. * * MODEM_CHAT_MATCHES_DEFINE(unsol_matches, * MODEM_CHAT_MATCH_WILDCARD("$??GGA,", ",*", gnss_nmea0183_match_gga_callback), * MODEM_CHAT_MATCH_WILDCARD("$??RMC,", ",*", gnss_nmea0183_match_rmc_callback), * #if CONFIG_GNSS_SATELLITES * MODEM_CHAT_MATCH_WILDCARD("$??GSV,", ",*", gnss_nmea0183_match_gsv_callback), * #endif * */ #ifndef ZEPHYR_DRIVERS_GNSS_GNSS_NMEA0183_MATCH_H_ #define ZEPHYR_DRIVERS_GNSS_GNSS_NMEA0183_MATCH_H_ #include <zephyr/types.h> #include <zephyr/device.h> #include <zephyr/drivers/gnss.h> #include <zephyr/modem/chat.h> struct gnss_nmea0183_match_data { const struct device *gnss; struct gnss_data data; #if CONFIG_GNSS_SATELLITES struct gnss_satellite *satellites; uint16_t satellites_size; uint16_t satellites_length; #endif uint32_t gga_utc; uint32_t rmc_utc; uint8_t gsv_message_number; }; /** GNSS NMEA0183 match configuration structure */ struct gnss_nmea0183_match_config { /** The GNSS device from which the data is published */ const struct device *gnss; #if CONFIG_GNSS_SATELLITES /** Buffer for parsed satellites */ struct gnss_satellite *satellites; /** Number of elements in buffer for parsed satellites */ uint16_t satellites_size; #endif }; /** * @brief Match callback for the NMEA GGA NMEA0183 message * * @details Should be used as the callback of a modem_chat match which matches "$??GGA," */ void gnss_nmea0183_match_gga_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data); /** * @brief Match callback for the NMEA RMC NMEA0183 message * * @details Should be used as the callback of a modem_chat match which matches "$??RMC," */ void gnss_nmea0183_match_rmc_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data); /** * @brief Match callback for the NMEA GSV NMEA0183 message * * @details Should be used as the callback of a modem_chat match which matches "$??GSV," */ void gnss_nmea0183_match_gsv_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data); /** * @brief Initialize a GNSS NMEA0183 match instance * * @param data GNSS NMEA0183 match instance to initialize * @param config Configuration to apply to GNSS NMEA0183 match instance */ int gnss_nmea0183_match_init(struct gnss_nmea0183_match_data *data, const struct gnss_nmea0183_match_config *config); #endif /* ZEPHYR_DRIVERS_GNSS_GNSS_NMEA0183_MATCH_H_ */ ```
/content/code_sandbox/drivers/gnss/gnss_nmea0183_match.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
879
```unknown config GNSS_NMEA_GENERIC bool "Generic GNSS NMEA device" default y depends on GNSS depends on DT_HAS_GNSS_NMEA_GENERIC_ENABLED depends on GNSS_REFERENCE_FRAME_WGS84 select MODEM_MODULES select MODEM_BACKEND_UART select MODEM_CHAT select GNSS_PARSE select GNSS_NMEA0183 select GNSS_NMEA0183_MATCH help Generic NMEA based GNSS device. if GNSS_NMEA_GENERIC config GNSS_NMEA_GENERIC_SATELLITES_COUNT int "Maximum satellite count" depends on GNSS_SATELLITES default 24 help Maximum number of satellite that the driver that can be decoded from the GNSS device. This does not affect the number of devices that the device is actually tracking, just how many of those can be reported in the satellites callback. endif ```
/content/code_sandbox/drivers/gnss/Kconfig.generic
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
194
```c /* * */ #include "gnss_dump.h" #include <stdlib.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <string.h> #if CONFIG_GNSS_DUMP_TO_LOG static char dump_buf[CONFIG_GNSS_DUMP_TO_LOG_BUF_SIZE]; #endif /* CONFIG_GNSS_DUMP_TO_LOG */ static const char *gnss_fix_status_to_str(enum gnss_fix_status fix_status) { switch (fix_status) { case GNSS_FIX_STATUS_NO_FIX: return "NO_FIX"; case GNSS_FIX_STATUS_GNSS_FIX: return "GNSS_FIX"; case GNSS_FIX_STATUS_DGNSS_FIX: return "DGNSS_FIX"; case GNSS_FIX_STATUS_ESTIMATED_FIX: return "ESTIMATED_FIX"; } return "unknown"; } static const char *gnss_fix_quality_to_str(enum gnss_fix_quality fix_quality) { switch (fix_quality) { case GNSS_FIX_QUALITY_INVALID: return "INVALID"; case GNSS_FIX_QUALITY_GNSS_SPS: return "GNSS_SPS"; case GNSS_FIX_QUALITY_DGNSS: return "DGNSS"; case GNSS_FIX_QUALITY_GNSS_PPS: return "GNSS_PPS"; case GNSS_FIX_QUALITY_RTK: return "RTK"; case GNSS_FIX_QUALITY_FLOAT_RTK: return "FLOAT_RTK"; case GNSS_FIX_QUALITY_ESTIMATED: return "ESTIMATED"; } return "unknown"; } #if CONFIG_GNSS_SATELLITES static const char *gnss_system_to_str(enum gnss_system system) { switch (system) { case GNSS_SYSTEM_GPS: return "GPS"; case GNSS_SYSTEM_GLONASS: return "GLONASS"; case GNSS_SYSTEM_GALILEO: return "GALILEO"; case GNSS_SYSTEM_BEIDOU: return "BEIDOU"; case GNSS_SYSTEM_QZSS: return "QZSS"; case GNSS_SYSTEM_IRNSS: return "IRNSS"; case GNSS_SYSTEM_SBAS: return "SBAS"; case GNSS_SYSTEM_IMES: return "IMES"; } return "unknown"; } #endif int gnss_dump_info(char *str, uint16_t strsize, const struct gnss_info *info) { int ret; const char *fmt = "gnss_info: {satellites_cnt: %u, hdop: %u.%u, fix_status: %s, " "fix_quality: %s}"; ret = snprintk(str, strsize, fmt, info->satellites_cnt, info->hdop / 1000, info->hdop % 1000, gnss_fix_status_to_str(info->fix_status), gnss_fix_quality_to_str(info->fix_quality)); return (strsize < ret) ? -ENOMEM : 0; } int gnss_dump_nav_data(char *str, uint16_t strsize, const struct navigation_data *nav_data) { int ret; const char *fmt = "navigation_data: {latitude: %s%lli.%09lli, longitude : %s%lli.%09lli, " "bearing %u.%03u, speed %u.%03u, altitude: %s%i.%03i}"; char *lat_sign = nav_data->latitude < 0 ? "-" : ""; char *lon_sign = nav_data->longitude < 0 ? "-" : ""; char *alt_sign = nav_data->altitude < 0 ? "-" : ""; ret = snprintk(str, strsize, fmt, lat_sign, llabs(nav_data->latitude) / 1000000000, llabs(nav_data->latitude) % 1000000000, lon_sign, llabs(nav_data->longitude) / 1000000000, llabs(nav_data->longitude) % 1000000000, nav_data->bearing / 1000, nav_data->bearing % 1000, nav_data->speed / 1000, nav_data->speed % 1000, alt_sign, abs(nav_data->altitude) / 1000, abs(nav_data->altitude) % 1000); return (strsize < ret) ? -ENOMEM : 0; } int gnss_dump_time(char *str, uint16_t strsize, const struct gnss_time *utc) { int ret; const char *fmt = "gnss_time: {hour: %u, minute: %u, millisecond %u, month_day %u, " "month: %u, century_year: %u}"; ret = snprintk(str, strsize, fmt, utc->hour, utc->minute, utc->millisecond, utc->month_day, utc->month, utc->century_year); return (strsize < ret) ? -ENOMEM : 0; } #if CONFIG_GNSS_SATELLITES int gnss_dump_satellite(char *str, uint16_t strsize, const struct gnss_satellite *satellite) { int ret; const char *fmt = "gnss_satellite: {prn: %u, snr: %u, elevation %u, azimuth %u, " "system: %s, is_tracked: %u}"; ret = snprintk(str, strsize, fmt, satellite->prn, satellite->snr, satellite->elevation, satellite->azimuth, gnss_system_to_str(satellite->system), satellite->is_tracked); return (strsize < ret) ? -ENOMEM : 0; } #endif #if CONFIG_GNSS_DUMP_TO_LOG static void gnss_dump_data_to_log(const struct device *dev, const struct gnss_data *data) { if (gnss_dump_info(dump_buf, sizeof(dump_buf), &data->info) < 0) { return; } LOG_PRINTK("%s: %s\r\n", dev->name, dump_buf); if (gnss_dump_nav_data(dump_buf, sizeof(dump_buf), &data->nav_data) < 0) { return; } LOG_PRINTK("%s: %s\r\n", dev->name, dump_buf); if (gnss_dump_time(dump_buf, sizeof(dump_buf), &data->utc) < 0) { return; } LOG_PRINTK("%s: %s\r\n", dev->name, dump_buf); } GNSS_DATA_CALLBACK_DEFINE(NULL, gnss_dump_data_to_log); #endif #if defined(CONFIG_GNSS_DUMP_TO_LOG) && defined(CONFIG_GNSS_SATELLITES) static void gnss_dump_satellites_to_log(const struct device *dev, const struct gnss_satellite *satellites, uint16_t size) { for (uint16_t i = 0; i < size; i++) { if (gnss_dump_satellite(dump_buf, sizeof(dump_buf), &satellites[i]) < 0) { return; } LOG_PRINTK("%s: %s\r\n", dev->name, dump_buf); } } GNSS_SATELLITES_CALLBACK_DEFINE(NULL, gnss_dump_satellites_to_log); #endif ```
/content/code_sandbox/drivers/gnss/gnss_dump.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,520
```c /* * */ #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/kernel.h> #include <zephyr/modem/chat.h> #include <string.h> #include "gnss_parse.h" #include "gnss_nmea0183.h" #include "gnss_nmea0183_match.h" static int gnss_nmea0183_match_parse_utc(char **argv, uint16_t argc, uint32_t *utc) { int64_t i64; if ((gnss_parse_dec_to_milli(argv[1], &i64) < 0) || (i64 < 0) || (i64 > UINT32_MAX)) { return -EINVAL; } *utc = (uint32_t)i64; return 0; } #if CONFIG_GNSS_SATELLITES static void gnss_nmea0183_match_reset_gsv(struct gnss_nmea0183_match_data *data) { data->satellites_length = 0; data->gsv_message_number = 1; } #endif static void gnss_nmea0183_match_publish(struct gnss_nmea0183_match_data *data) { if ((data->gga_utc == 0) || (data->rmc_utc == 0)) { return; } if (data->gga_utc == data->rmc_utc) { gnss_publish_data(data->gnss, &data->data); } } void gnss_nmea0183_match_gga_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct gnss_nmea0183_match_data *data = user_data; if (gnss_nmea0183_parse_gga((const char **)argv, argc, &data->data) < 0) { return; } if (gnss_nmea0183_match_parse_utc(argv, argc, &data->gga_utc) < 0) { return; } gnss_nmea0183_match_publish(data); } void gnss_nmea0183_match_rmc_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct gnss_nmea0183_match_data *data = user_data; if (gnss_nmea0183_parse_rmc((const char **)argv, argc, &data->data) < 0) { return; } if (gnss_nmea0183_match_parse_utc(argv, argc, &data->rmc_utc) < 0) { return; } gnss_nmea0183_match_publish(data); } #if CONFIG_GNSS_SATELLITES void gnss_nmea0183_match_gsv_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct gnss_nmea0183_match_data *data = user_data; struct gnss_nmea0183_gsv_header header; int ret; if (gnss_nmea0183_parse_gsv_header((const char **)argv, argc, &header) < 0) { return; } if (header.number_of_svs == 0) { return; } if (header.message_number != data->gsv_message_number) { gnss_nmea0183_match_reset_gsv(data); return; } data->gsv_message_number++; ret = gnss_nmea0183_parse_gsv_svs((const char **)argv, argc, &data->satellites[data->satellites_length], data->satellites_size - data->satellites_length); if (ret < 0) { gnss_nmea0183_match_reset_gsv(data); return; } data->satellites_length += (uint16_t)ret; if (data->satellites_length == header.number_of_svs) { gnss_publish_satellites(data->gnss, data->satellites, data->satellites_length); gnss_nmea0183_match_reset_gsv(data); } } #endif int gnss_nmea0183_match_init(struct gnss_nmea0183_match_data *data, const struct gnss_nmea0183_match_config *config) { __ASSERT(data != NULL, "data argument must be provided"); __ASSERT(config != NULL, "config argument must be provided"); memset(data, 0, sizeof(struct gnss_nmea0183_match_data)); data->gnss = config->gnss; #if CONFIG_GNSS_SATELLITES data->satellites = config->satellites; data->satellites_size = config->satellites_size; #endif return 0; } ```
/content/code_sandbox/drivers/gnss/gnss_nmea0183_match.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,023
```unknown config GNSS_EMUL bool "Emulated GNSS driver" default y depends on DT_HAS_ZEPHYR_GNSS_EMUL_ENABLED depends on GNSS_REFERENCE_FRAME_WGS84 select TIMEOUT_64BIT help Enable emulated GNSS driver. ```
/content/code_sandbox/drivers/gnss/Kconfig.emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
61
```c /* * */ #include <zephyr/kernel.h> #include <string.h> #include <errno.h> #include <stdlib.h> #include "gnss_parse.h" #define GNSS_PARSE_NANO_KNOTS_IN_MMS (1943840LL) #define GNSS_PARSE_NANO (1000000000LL) #define GNSS_PARSE_MICRO (1000000LL) #define GNSS_PARSE_MILLI (1000LL) int gnss_parse_dec_to_nano(const char *str, int64_t *nano) { int64_t sum = 0; int8_t decimal = -1; int8_t pos = 0; int8_t start = 0; int64_t increment; __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(nano != NULL, "nano argument must be provided"); /* Find decimal */ while (str[pos] != '\0') { /* Verify if char is decimal */ if (str[pos] == '.') { decimal = pos; break; } /* Advance position */ pos++; } /* Determine starting position based on decimal location */ pos = decimal < 0 ? pos - 1 : decimal - 1; /* Skip sign if it exists */ start = str[0] == '-' ? 1 : 0; /* Add whole value to sum */ increment = GNSS_PARSE_NANO; while (start <= pos) { /* Verify char is decimal */ if (str[pos] < '0' || str[pos] > '9') { return -EINVAL; } /* Add value to sum */ sum += (str[pos] - '0') * increment; /* Update increment */ increment *= 10; /* Degrement position */ pos--; } /* Check if decimal was found */ if (decimal < 0) { /* Set sign of sum */ sum = start == 1 ? -sum : sum; *nano = sum; return 0; } /* Convert decimal part to nano fractions and add it to sum */ pos = decimal + 1; increment = GNSS_PARSE_NANO / 10LL; while (str[pos] != '\0') { /* Verify char is decimal */ if (str[pos] < '0' || str[pos] > '9') { return -EINVAL; } /* Add value to micro_degrees */ sum += (str[pos] - '0') * increment; /* Update unit */ increment /= 10; /* Increment position */ pos++; } /* Set sign of sum */ sum = start == 1 ? -sum : sum; *nano = sum; return 0; } int gnss_parse_dec_to_micro(const char *str, uint64_t *micro) { int ret; __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(micro != NULL, "micro argument must be provided"); ret = gnss_parse_dec_to_nano(str, micro); if (ret < 0) { return ret; } *micro = (*micro) / GNSS_PARSE_MILLI; return 0; } int gnss_parse_dec_to_milli(const char *str, int64_t *milli) { int ret; __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(milli != NULL, "milli argument must be provided"); ret = gnss_parse_dec_to_nano(str, milli); if (ret < 0) { return ret; } (*milli) = (*milli) / GNSS_PARSE_MICRO; return 0; } int gnss_parse_atoi(const char *str, uint8_t base, int32_t *integer) { char *end; __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(integer != NULL, "integer argument must be provided"); *integer = (int32_t)strtol(str, &end, (int)base); if ('\0' != (*end)) { return -EINVAL; } return 0; } ```
/content/code_sandbox/drivers/gnss/gnss_parse.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
897
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_GNSS_GNSS_NMEA0183_H_ #define ZEPHYR_DRIVERS_GNSS_GNSS_NMEA0183_H_ #include <zephyr/drivers/gnss.h> /** * @brief Compute NMEA0183 checksum * * @example "PAIR002" -> 0x38 * * @param str String from which checksum is computed * * @retval checksum */ uint8_t gnss_nmea0183_checksum(const char *str); /** * @brief Encapsulate str in NMEA0183 message format * * @example "PAIR%03u", 2 -> "$PAIR002*38" * * @param str Destination for encapsulated string * @param size Size of destination for encapsulated string * @param fmt Format of string to encapsulate * @param ... Arguments * * @retval checksum */ int gnss_nmea0183_snprintk(char *str, size_t size, const char *fmt, ...); /** * @brief Computes and validates checksum * * @param argv Array of arguments split by ',' including message id and checksum * @param argc Number of arguments in argv * * @retval true if message is intact * @retval false if message is corrupted */ bool gnss_nmea0183_validate_message(char **argv, uint16_t argc); /** * @brief Parse a ddmm.mmmm formatted angle to nano degrees * * @example "5610.9928" -> 56183214000 * * @param ddmm_mmmm String representation of angle in ddmm.mmmm format * @param ndeg Result in nano degrees * * @retval -EINVAL if ddmm_mmmm argument is invalid * @retval 0 if parsed successfully */ int gnss_nmea0183_ddmm_mmmm_to_ndeg(const char *ddmm_mmmm, int64_t *ndeg); /** * @brief Parse knots to millimeters pr second * * @example "15.231" -> 7835 * * @param str String representation of speed in knots * @param mms Destination for speed in millimeters pr second * * @retval -EINVAL if str could not be parsed or if speed is negative * @retval 0 if parsed successfully */ int gnss_nmea0183_knots_to_mms(const char *str, int64_t *mms); /** * @brief Parse hhmmss.sss to struct gnss_time * * @example "133243.012" -> { .hour = 13, .minute = 32, .ms = 43012 } * @example "133243" -> { .hour = 13, .minute = 32, .ms = 43000 } * * @param str String representation of hours, minutes, seconds and subseconds * @param utc Destination for parsed time * * @retval -EINVAL if str could not be parsed * @retval 0 if parsed successfully */ int gnss_nmea0183_parse_hhmmss(const char *hhmmss, struct gnss_time *utc); /** * @brief Parse ddmmyy to unsigned integers * * @example "041122" -> { .mday = 4, .month = 11, .year = 22 } * * @param str String representation of speed in knots * @param utc Destination for parsed time * * @retval -EINVAL if str could not be parsed * @retval 0 if parsed successfully */ int gnss_nmea0183_parse_ddmmyy(const char *ddmmyy, struct gnss_time *utc); /** * @brief Parses NMEA0183 RMC message * * @details Parses the time, date, latitude, longitude, speed, and bearing * from the NMEA0183 RMC message provided as an array of strings split by ',' * * @param argv Array of arguments split by ',' including message id and checksum * @param argc Number of arguments in argv' * @param data Destination for data parsed from NMEA0183 RMC message * * @retval 0 if successful * @retval -EINVAL if input is invalid */ int gnss_nmea0183_parse_rmc(const char **argv, uint16_t argc, struct gnss_data *data); /** * @brief Parses NMEA0183 GGA message * * @details Parses the GNSS fix quality and status, number of satellites used for * fix, HDOP, and altitude (geoid separation) from the NMEA0183 GGA message provided * as an array of strings split by ',' * * @param argv Array of arguments split by ',' including message id and checksum * @param argc Number of arguments in argv' * @param data Destination for data parsed from NMEA0183 GGA message * * @retval 0 if successful * @retval -EINVAL if input is invalid */ int gnss_nmea0183_parse_gga(const char **argv, uint16_t argc, struct gnss_data *data); /** GSV header structure */ struct gnss_nmea0183_gsv_header { /** Indicates the system of the space-vehicles contained in the message */ enum gnss_system system; /** Number of GSV messages in total */ uint16_t number_of_messages; /** Number of this GSV message */ uint16_t message_number; /** Number of visible space-vehicles */ uint16_t number_of_svs; }; /** * @brief Parses header of NMEA0183 GSV message * * @details The GSV messages are part of a list of messages sent in ascending * order, split by GNSS system. * * @param argv Array of arguments split by ',' including message id and checksum * @param argc Number of arguments in argv * @param header Destination for parsed NMEA0183 GGA message header * * @retval 0 if successful * @retval -EINVAL if input is invalid */ int gnss_nmea0183_parse_gsv_header(const char **argv, uint16_t argc, struct gnss_nmea0183_gsv_header *header); /** * @brief Parses space-vehicles in NMEA0183 GSV message * * @details The NMEA0183 GSV message contains up to 4 space-vehicles which follow * the header. * * @param argv Array of arguments split by ',' including message id and checksum * @param argc Number of arguments in argv * @param satellites Destination for parsed satellites from NMEA0183 GGA message * @param size Size of destination for parsed satellites from NMEA0183 GGA message * * @retval Number of parsed space-vehicles stored at destination if successful * @retval -ENOMEM if all space-vehicles in message could not be stored at destination * @retval -EINVAL if input is invalid */ int gnss_nmea0183_parse_gsv_svs(const char **argv, uint16_t argc, struct gnss_satellite *satellites, uint16_t size); #endif /* ZEPHYR_DRIVERS_GNSS_GNSS_NMEA0183_H_ */ ```
/content/code_sandbox/drivers/gnss/gnss_nmea0183.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,525
```c /* * */ #include <zephyr/drivers/gnss.h> #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/modem/chat.h> #include <zephyr/modem/backend/uart.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/gpio.h> #include <string.h> #include "gnss_nmea0183.h" #include "gnss_nmea0183_match.h" #include "gnss_parse.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(luatos_air530z, CONFIG_GNSS_LOG_LEVEL); #define DT_DRV_COMPAT luatos_air530z #define UART_RECV_BUF_SZ 128 #define UART_TRANS_BUF_SZ 64 #define CHAT_RECV_BUF_SZ 256 #define CHAT_ARGV_SZ 32 MODEM_CHAT_SCRIPT_CMDS_DEFINE(init_script_cmds, #if CONFIG_GNSS_SATELLITES /* receive only GGA, RMC and GSV NMEA messages */ MODEM_CHAT_SCRIPT_CMD_RESP_NONE("$PCAS03,1,0,0,1,1,0,0,0,0,0,0,0,0*1F", 10), #else /* receive only GGA and RMC NMEA messages */ MODEM_CHAT_SCRIPT_CMD_RESP_NONE("$PCAS03,1,0,0,0,1,0,0,0,0,0,0,0,0*1E", 10), #endif ); MODEM_CHAT_SCRIPT_NO_ABORT_DEFINE(init_script, init_script_cmds, NULL, 5); struct gnss_luatos_air530z_config { const struct device *uart; const struct gpio_dt_spec on_off_gpio; const int uart_baudrate; }; struct gnss_luatos_air530z_data { struct gnss_nmea0183_match_data match_data; #if CONFIG_GNSS_SATELLITES struct gnss_satellite satellites[CONFIG_GNSS_LUATOS_AIR530Z_SATELLITES_COUNT]; #endif /* UART backend */ struct modem_pipe *uart_pipe; struct modem_backend_uart uart_backend; uint8_t uart_backend_receive_buf[UART_RECV_BUF_SZ]; uint8_t uart_backend_transmit_buf[UART_TRANS_BUF_SZ]; /* Modem chat */ struct modem_chat chat; uint8_t chat_receive_buf[CHAT_RECV_BUF_SZ]; uint8_t chat_delimiter[2]; uint8_t *chat_argv[CHAT_ARGV_SZ]; /* Dynamic chat script */ uint8_t dynamic_separators_buf[2]; uint8_t dynamic_request_buf[32]; struct modem_chat_script_chat dynamic_script_chat; struct modem_chat_script dynamic_script; struct k_sem lock; }; MODEM_CHAT_MATCHES_DEFINE(unsol_matches, MODEM_CHAT_MATCH_WILDCARD("$??GGA,", ",*", gnss_nmea0183_match_gga_callback), MODEM_CHAT_MATCH_WILDCARD("$??RMC,", ",*", gnss_nmea0183_match_rmc_callback), #if CONFIG_GNSS_SATELLITES MODEM_CHAT_MATCH_WILDCARD("$??GSV,", ",*", gnss_nmea0183_match_gsv_callback), #endif ); static void luatos_air530z_lock(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; (void)k_sem_take(&data->lock, K_FOREVER); } static void luatos_air530z_unlock(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; k_sem_give(&data->lock); } static int gnss_luatos_air530z_init_nmea0183_match(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; const struct gnss_nmea0183_match_config match_config = { .gnss = dev, #if CONFIG_GNSS_SATELLITES .satellites = data->satellites, .satellites_size = ARRAY_SIZE(data->satellites), #endif }; return gnss_nmea0183_match_init(&data->match_data, &match_config); } static void gnss_luatos_air530z_init_pipe(const struct device *dev) { const struct gnss_luatos_air530z_config *config = dev->config; struct gnss_luatos_air530z_data *data = dev->data; const struct modem_backend_uart_config uart_backend_config = { .uart = config->uart, .receive_buf = data->uart_backend_receive_buf, .receive_buf_size = sizeof(data->uart_backend_receive_buf), .transmit_buf = data->uart_backend_transmit_buf, .transmit_buf_size = ARRAY_SIZE(data->uart_backend_transmit_buf), }; data->uart_pipe = modem_backend_uart_init(&data->uart_backend, &uart_backend_config); } static int gnss_luatos_air530z_init_chat(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; const struct modem_chat_config chat_config = { .user_data = data, .receive_buf = data->chat_receive_buf, .receive_buf_size = sizeof(data->chat_receive_buf), .delimiter = data->chat_delimiter, .delimiter_size = ARRAY_SIZE(data->chat_delimiter), .filter = NULL, .filter_size = 0, .argv = data->chat_argv, .argv_size = ARRAY_SIZE(data->chat_argv), .unsol_matches = unsol_matches, .unsol_matches_size = ARRAY_SIZE(unsol_matches), }; return modem_chat_init(&data->chat, &chat_config); } static void luatos_air530z_init_dynamic_script(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; /* Air530z doesn't respond to commands. Thus, response_matches_size = 0; */ data->dynamic_script_chat.request = data->dynamic_request_buf; data->dynamic_script_chat.response_matches = NULL; data->dynamic_script_chat.response_matches_size = 0; data->dynamic_script_chat.timeout = 0; data->dynamic_script.name = "PCAS"; data->dynamic_script.script_chats = &data->dynamic_script_chat; data->dynamic_script.script_chats_size = 1; data->dynamic_script.abort_matches = NULL; data->dynamic_script.abort_matches_size = 0; data->dynamic_script.callback = NULL; data->dynamic_script.timeout = 5; } static int gnss_luatos_air530z_init(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; const struct gnss_luatos_air530z_config *config = dev->config; int ret; k_sem_init(&data->lock, 1, 1); ret = gnss_luatos_air530z_init_nmea0183_match(dev); if (ret < 0) { return ret; } gnss_luatos_air530z_init_pipe(dev); ret = gnss_luatos_air530z_init_chat(dev); if (ret < 0) { return ret; } luatos_air530z_init_dynamic_script(dev); ret = modem_pipe_open(data->uart_pipe, K_SECONDS(10)); if (ret < 0) { return ret; } ret = modem_chat_attach(&data->chat, data->uart_pipe); if (ret < 0) { modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } ret = modem_chat_run_script(&data->chat, &init_script); if (ret < 0) { LOG_ERR("Failed to run init_script"); modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } /* setup on-off gpio for power management */ if (!gpio_is_ready_dt(&config->on_off_gpio)) { LOG_ERR("on-off GPIO device not ready"); return -ENODEV; } gpio_pin_configure_dt(&config->on_off_gpio, GPIO_OUTPUT_HIGH); return 0; } static int luatos_air530z_pm_resume(const struct device *dev) { struct gnss_luatos_air530z_data *data = dev->data; int ret; ret = modem_pipe_open(data->uart_pipe, K_SECONDS(10)); if (ret < 0) { return ret; } ret = modem_chat_attach(&data->chat, data->uart_pipe); if (ret < 0) { modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } ret = modem_chat_run_script(&data->chat, &init_script); if (ret < 0) { modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } return 0; } static int luatos_air530z_pm_action(const struct device *dev, enum pm_device_action action) { struct gnss_luatos_air530z_data *data = dev->data; const struct gnss_luatos_air530z_config *config = dev->config; int ret = -ENOTSUP; switch (action) { case PM_DEVICE_ACTION_SUSPEND: gpio_pin_set_dt(&config->on_off_gpio, 0); ret = modem_pipe_close(data->uart_pipe, K_SECONDS(10)); break; case PM_DEVICE_ACTION_RESUME: gpio_pin_set_dt(&config->on_off_gpio, 1); ret = luatos_air530z_pm_resume(dev); break; default: break; } return ret; } static int luatos_air530z_set_fix_rate(const struct device *dev, uint32_t fix_interval_ms) { struct gnss_luatos_air530z_data *data = dev->data; int ret; if (fix_interval_ms < 100 || fix_interval_ms > 1000) { return -EINVAL; } luatos_air530z_lock(dev); ret = gnss_nmea0183_snprintk(data->dynamic_request_buf, sizeof(data->dynamic_request_buf), "PCAS02,%u", fix_interval_ms); data->dynamic_script_chat.request_size = ret; ret = modem_chat_run_script(&data->chat, &data->dynamic_script); if (ret < 0) { goto unlock_return; } unlock_return: luatos_air530z_unlock(dev); return ret; } static int luatos_air530z_set_enabled_systems(const struct device *dev, gnss_systems_t systems) { struct gnss_luatos_air530z_data *data = dev->data; gnss_systems_t supported_systems; uint8_t encoded_systems = 0; int ret; supported_systems = (GNSS_SYSTEM_GPS | GNSS_SYSTEM_GLONASS | GNSS_SYSTEM_BEIDOU); if ((~supported_systems) & systems) { return -EINVAL; } luatos_air530z_lock(dev); WRITE_BIT(encoded_systems, 0, systems & GNSS_SYSTEM_GPS); WRITE_BIT(encoded_systems, 1, systems & GNSS_SYSTEM_GLONASS); WRITE_BIT(encoded_systems, 2, systems & GNSS_SYSTEM_BEIDOU); ret = gnss_nmea0183_snprintk(data->dynamic_request_buf, sizeof(data->dynamic_request_buf), "PCAS04,%u", encoded_systems); if (ret < 0) { goto unlock_return; } data->dynamic_script_chat.request_size = ret; ret = modem_chat_run_script(&data->chat, &data->dynamic_script); if (ret < 0) { goto unlock_return; } unlock_return: luatos_air530z_unlock(dev); return ret; } static int luatos_air530z_get_supported_systems(const struct device *dev, gnss_systems_t *systems) { *systems = (GNSS_SYSTEM_GPS | GNSS_SYSTEM_GLONASS | GNSS_SYSTEM_BEIDOU); return 0; } static const struct gnss_driver_api gnss_api = { .set_fix_rate = luatos_air530z_set_fix_rate, .set_enabled_systems = luatos_air530z_set_enabled_systems, .get_supported_systems = luatos_air530z_get_supported_systems, }; #define LUATOS_AIR530Z(inst) \ static const struct gnss_luatos_air530z_config gnss_luatos_air530z_cfg_##inst = { \ .uart = DEVICE_DT_GET(DT_INST_BUS(inst)), \ .on_off_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, on_off_gpios, { 0 }), \ }; \ \ static struct gnss_luatos_air530z_data gnss_luatos_air530z_data_##inst = { \ .chat_delimiter = {'\r', '\n'}, \ .dynamic_separators_buf = {',', '*'}, \ }; \ \ PM_DEVICE_DT_INST_DEFINE(inst, luatos_air530z_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, gnss_luatos_air530z_init, \ PM_DEVICE_DT_INST_GET(inst), \ &gnss_luatos_air530z_data_##inst, \ &gnss_luatos_air530z_cfg_##inst, \ POST_KERNEL, CONFIG_GNSS_INIT_PRIORITY, &gnss_api); DT_INST_FOREACH_STATUS_OKAY(LUATOS_AIR530Z) ```
/content/code_sandbox/drivers/gnss/gnss_luatos_air530z.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,910
```c /* * */ #include <zephyr/drivers/gnss.h> #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/modem/chat.h> #include <zephyr/modem/backend/uart.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/pm/device.h> #include <string.h> #include "gnss_nmea0183.h" #include "gnss_nmea0183_match.h" #include "gnss_parse.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(gnss_nmea_generic, CONFIG_GNSS_LOG_LEVEL); #define UART_RX_BUF_SZ (256 + IS_ENABLED(CONFIG_GNSS_SATELLITES) * 512) #define UART_TX_BUF_SZ 64 #define CHAT_RECV_BUF_SZ 256 #define CHAT_ARGV_SZ 32 struct gnss_nmea_generic_config { const struct device *uart; const struct modem_chat_script *const init_chat_script; }; struct gnss_nmea_generic_data { struct gnss_nmea0183_match_data match_data; #if CONFIG_GNSS_SATELLITES struct gnss_satellite satellites[CONFIG_GNSS_NMEA_GENERIC_SATELLITES_COUNT]; #endif /* UART backend */ struct modem_pipe *uart_pipe; struct modem_backend_uart uart_backend; uint8_t uart_backend_receive_buf[UART_RX_BUF_SZ]; uint8_t uart_backend_transmit_buf[UART_TX_BUF_SZ]; /* Modem chat */ struct modem_chat chat; uint8_t chat_receive_buf[CHAT_RECV_BUF_SZ]; uint8_t *chat_argv[CHAT_ARGV_SZ]; }; MODEM_CHAT_MATCHES_DEFINE(unsol_matches, MODEM_CHAT_MATCH_WILDCARD("$??GGA,", ",*", gnss_nmea0183_match_gga_callback), MODEM_CHAT_MATCH_WILDCARD("$??RMC,", ",*", gnss_nmea0183_match_rmc_callback), #if CONFIG_GNSS_SATELLITES MODEM_CHAT_MATCH_WILDCARD("$??GSV,", ",*", gnss_nmea0183_match_gsv_callback), #endif ); static int gnss_nmea_generic_resume(const struct device *dev) { const struct gnss_nmea_generic_config *cfg = dev->config; struct gnss_nmea_generic_data *data = dev->data; int ret; ret = modem_pipe_open(data->uart_pipe, K_SECONDS(10)); if (ret < 0) { return ret; } ret = modem_chat_attach(&data->chat, data->uart_pipe); if (ret == 0) { ret = modem_chat_run_script(&data->chat, cfg->init_chat_script); } if (ret < 0) { modem_pipe_close(data->uart_pipe, K_SECONDS(10)); } return ret; } static const struct gnss_driver_api gnss_api = { }; static int gnss_nmea_generic_init_nmea0183_match(const struct device *dev) { struct gnss_nmea_generic_data *data = dev->data; const struct gnss_nmea0183_match_config match_config = { .gnss = dev, #if CONFIG_GNSS_SATELLITES .satellites = data->satellites, .satellites_size = ARRAY_SIZE(data->satellites), #endif }; return gnss_nmea0183_match_init(&data->match_data, &match_config); } static void gnss_nmea_generic_init_pipe(const struct device *dev) { const struct gnss_nmea_generic_config *cfg = dev->config; struct gnss_nmea_generic_data *data = dev->data; const struct modem_backend_uart_config uart_backend_config = { .uart = cfg->uart, .receive_buf = data->uart_backend_receive_buf, .receive_buf_size = sizeof(data->uart_backend_receive_buf), .transmit_buf = data->uart_backend_transmit_buf, .transmit_buf_size = sizeof(data->uart_backend_transmit_buf), }; data->uart_pipe = modem_backend_uart_init(&data->uart_backend, &uart_backend_config); } static uint8_t gnss_nmea_generic_char_delimiter[] = {'\r', '\n'}; static int gnss_nmea_generic_init_chat(const struct device *dev) { struct gnss_nmea_generic_data *data = dev->data; const struct modem_chat_config chat_config = { .user_data = data, .receive_buf = data->chat_receive_buf, .receive_buf_size = sizeof(data->chat_receive_buf), .delimiter = gnss_nmea_generic_char_delimiter, .delimiter_size = ARRAY_SIZE(gnss_nmea_generic_char_delimiter), .filter = NULL, .filter_size = 0, .argv = data->chat_argv, .argv_size = ARRAY_SIZE(data->chat_argv), .unsol_matches = unsol_matches, .unsol_matches_size = ARRAY_SIZE(unsol_matches), }; return modem_chat_init(&data->chat, &chat_config); } static int gnss_nmea_generic_init(const struct device *dev) { int ret; ret = gnss_nmea_generic_init_nmea0183_match(dev); if (ret < 0) { return ret; } gnss_nmea_generic_init_pipe(dev); ret = gnss_nmea_generic_init_chat(dev); if (ret < 0) { return ret; } #if CONFIG_PM_DEVICE pm_device_init_suspended(dev); #else ret = gnss_nmea_generic_resume(dev); if (ret < 0) { return ret; } #endif return 0; } #if CONFIG_PM_DEVICE static int gnss_nmea_generic_pm_action(const struct device *dev, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_RESUME: return gnss_nmea_generic_resume(dev); default: return -ENOTSUP; } } #endif #if DT_HAS_COMPAT_STATUS_OKAY(gnss_nmea_generic) MODEM_CHAT_SCRIPT_EMPTY_DEFINE(gnss_nmea_generic_init_chat_script); #endif #define GNSS_NMEA_GENERIC(inst) \ static const struct gnss_nmea_generic_config gnss_nmea_generic_cfg_##inst = { \ .uart = DEVICE_DT_GET(DT_INST_BUS(inst)), \ .init_chat_script = &_CONCAT(DT_DRV_COMPAT, _init_chat_script), \ }; \ \ static struct gnss_nmea_generic_data gnss_nmea_generic_data_##inst; \ \ PM_DEVICE_DT_INST_DEFINE(inst, gnss_nmea_generic_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, gnss_nmea_generic_init, PM_DEVICE_DT_INST_GET(inst),\ &gnss_nmea_generic_data_##inst, \ &gnss_nmea_generic_cfg_##inst, \ POST_KERNEL, CONFIG_GNSS_INIT_PRIORITY, &gnss_api); #define DT_DRV_COMPAT gnss_nmea_generic DT_INST_FOREACH_STATUS_OKAY(GNSS_NMEA_GENERIC) #undef DT_DRV_COMPAT ```
/content/code_sandbox/drivers/gnss/gnss_nmea_generic.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,541
```unknown config GNSS_QUECTEL_LCX6G bool "Quectel LCX6G GNSS modem driver" default y depends on GNSS depends on DT_HAS_QUECTEL_LC26G_ENABLED || DT_HAS_QUECTEL_LC76G_ENABLED || DT_HAS_QUECTEL_LC86G_ENABLED depends on GNSS_REFERENCE_FRAME_WGS84 select MODEM_MODULES select MODEM_BACKEND_UART select MODEM_CHAT select GNSS_PARSE select GNSS_NMEA0183 select GNSS_NMEA0183_MATCH help Enable quectel LCX6G series GNSS modem driver. if GNSS_QUECTEL_LCX6G config GNSS_QUECTEL_LCX6G_UART_RX_BUF_SIZE int "Size of UART backend receive buffer" default 256 config GNSS_QUECTEL_LCX6G_UART_TX_BUF_SIZE int "Size of UART backend transmit buffer" default 64 if GNSS_SATELLITES config GNSS_QUECTEL_LCX6G_SAT_ARRAY_SIZE int "Size of GNSS satellites array" default 24 endif # GNSS_SATELLITES endif # GNSS_QUECTEL_LCX6G ```
/content/code_sandbox/drivers/gnss/Kconfig.quectel_lcx6g
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
271
```c /* * */ #include <zephyr/drivers/gnss.h> #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/modem/chat.h> #include <zephyr/modem/backend/uart.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/pm/device_runtime.h> #include <string.h> #include "gnss_nmea0183.h" #include "gnss_nmea0183_match.h" #include "gnss_parse.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(quectel_lcx6g, CONFIG_GNSS_LOG_LEVEL); #define QUECTEL_LCX6G_PM_TIMEOUT_MS 500U #define QUECTEL_LCX6G_SCRIPT_TIMEOUT_S 10U #define QUECTEL_LCX6G_PAIR_NAV_MODE_STATIONARY 4 #define QUECTEL_LCX6G_PAIR_NAV_MODE_FITNESS 1 #define QUECTEL_LCX6G_PAIR_NAV_MODE_NORMAL 0 #define QUECTEL_LCX6G_PAIR_NAV_MODE_DRONE 5 #define QUECTEL_LCX6G_PAIR_PPS_MODE_DISABLED 0 #define QUECTEL_LCX6G_PAIR_PPS_MODE_ENABLED 4 #define QUECTEL_LCX6G_PAIR_PPS_MODE_ENABLED_AFTER_LOCK 1 #define QUECTEL_LCX6G_PAIR_PPS_MODE_ENABLED_WHILE_LOCKED 2 struct quectel_lcx6g_config { const struct device *uart; const enum gnss_pps_mode pps_mode; const uint16_t pps_pulse_width; }; struct quectel_lcx6g_data { struct gnss_nmea0183_match_data match_data; #if CONFIG_GNSS_SATELLITES struct gnss_satellite satellites[CONFIG_GNSS_QUECTEL_LCX6G_SAT_ARRAY_SIZE]; #endif /* UART backend */ struct modem_pipe *uart_pipe; struct modem_backend_uart uart_backend; uint8_t uart_backend_receive_buf[CONFIG_GNSS_QUECTEL_LCX6G_UART_RX_BUF_SIZE]; uint8_t uart_backend_transmit_buf[CONFIG_GNSS_QUECTEL_LCX6G_UART_TX_BUF_SIZE]; /* Modem chat */ struct modem_chat chat; uint8_t chat_receive_buf[256]; uint8_t chat_delimiter[2]; uint8_t *chat_argv[32]; /* Pair chat script */ uint8_t pair_request_buf[32]; uint8_t pair_match_buf[32]; struct modem_chat_match pair_match; struct modem_chat_script_chat pair_script_chat; struct modem_chat_script pair_script; /* Allocation for responses from GNSS modem */ union { uint16_t fix_rate_response; gnss_systems_t enabled_systems_response; enum gnss_navigation_mode navigation_mode_response; }; struct k_sem lock; k_timeout_t pm_timeout; }; #ifdef CONFIG_PM_DEVICE MODEM_CHAT_MATCH_DEFINE(pair003_success_match, "$PAIR001,003,0*38", "", NULL); MODEM_CHAT_SCRIPT_CMDS_DEFINE( suspend_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR003*39", pair003_success_match) ); MODEM_CHAT_SCRIPT_NO_ABORT_DEFINE(suspend_script, suspend_script_cmds, NULL, QUECTEL_LCX6G_SCRIPT_TIMEOUT_S); #endif /* CONFIG_PM_DEVICE */ MODEM_CHAT_MATCH_DEFINE(pair062_ack_match, "$PAIR001,062,0*3F", "", NULL); MODEM_CHAT_SCRIPT_CMDS_DEFINE( resume_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR002*38", modem_chat_any_match), MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,0,1*3F", pair062_ack_match), MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,1,0*3F", pair062_ack_match), MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,2,0*3C", pair062_ack_match), #if CONFIG_GNSS_SATELLITES MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,3,5*38", pair062_ack_match), #else MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,3,0*3D", pair062_ack_match), #endif MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,4,1*3B", pair062_ack_match), MODEM_CHAT_SCRIPT_CMD_RESP("$PAIR062,5,0*3B", pair062_ack_match), ); MODEM_CHAT_SCRIPT_NO_ABORT_DEFINE(resume_script, resume_script_cmds, NULL, QUECTEL_LCX6G_SCRIPT_TIMEOUT_S); MODEM_CHAT_MATCHES_DEFINE(unsol_matches, MODEM_CHAT_MATCH_WILDCARD("$??GGA,", ",*", gnss_nmea0183_match_gga_callback), MODEM_CHAT_MATCH_WILDCARD("$??RMC,", ",*", gnss_nmea0183_match_rmc_callback), #if CONFIG_GNSS_SATELLITES MODEM_CHAT_MATCH_WILDCARD("$??GSV,", ",*", gnss_nmea0183_match_gsv_callback), #endif ); static int quectel_lcx6g_configure_pps(const struct device *dev) { const struct quectel_lcx6g_config *config = dev->config; struct quectel_lcx6g_data *data = dev->data; uint8_t pps_mode = 0; int ret; switch (config->pps_mode) { case GNSS_PPS_MODE_DISABLED: pps_mode = QUECTEL_LCX6G_PAIR_PPS_MODE_DISABLED; break; case GNSS_PPS_MODE_ENABLED: pps_mode = QUECTEL_LCX6G_PAIR_PPS_MODE_ENABLED; break; case GNSS_PPS_MODE_ENABLED_AFTER_LOCK: pps_mode = QUECTEL_LCX6G_PAIR_PPS_MODE_ENABLED_AFTER_LOCK; break; case GNSS_PPS_MODE_ENABLED_WHILE_LOCKED: pps_mode = QUECTEL_LCX6G_PAIR_PPS_MODE_ENABLED_WHILE_LOCKED; break; } ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR752,%u,%u", pps_mode, config->pps_pulse_width); if (ret < 0) { return ret; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { return ret; } ret = gnss_nmea0183_snprintk(data->pair_match_buf, sizeof(data->pair_match_buf), "PAIR001,752,0"); if (ret < 0) { return ret; } ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { return ret; } return modem_chat_run_script(&data->chat, &data->pair_script); } static void quectel_lcx6g_lock(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; (void)k_sem_take(&data->lock, K_FOREVER); } static void quectel_lcx6g_unlock(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; k_sem_give(&data->lock); } static void quectel_lcx6g_pm_changed(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; uint32_t pm_ready_at_ms; pm_ready_at_ms = k_uptime_get() + QUECTEL_LCX6G_PM_TIMEOUT_MS; data->pm_timeout = K_TIMEOUT_ABS_MS(pm_ready_at_ms); } static void quectel_lcx6g_await_pm_ready(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; LOG_INF("Waiting until PM ready"); k_sleep(data->pm_timeout); } static int quectel_lcx6g_resume(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; int ret; LOG_INF("Resuming"); quectel_lcx6g_await_pm_ready(dev); ret = modem_pipe_open(data->uart_pipe, K_SECONDS(10)); if (ret < 0) { LOG_ERR("Failed to open pipe"); return ret; } ret = modem_chat_attach(&data->chat, data->uart_pipe); if (ret < 0) { LOG_ERR("Failed to attach chat"); modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } ret = modem_chat_run_script(&data->chat, &resume_script); if (ret < 0) { LOG_ERR("Failed to initialize GNSS"); modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } ret = quectel_lcx6g_configure_pps(dev); if (ret < 0) { LOG_ERR("Failed to configure PPS"); modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } LOG_INF("Resumed"); return ret; } #ifdef CONFIG_PM_DEVICE static int quectel_lcx6g_suspend(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; int ret; LOG_INF("Suspending"); quectel_lcx6g_await_pm_ready(dev); ret = modem_chat_run_script(&data->chat, &suspend_script); if (ret < 0) { LOG_ERR("Failed to suspend GNSS"); } else { LOG_INF("Suspended"); } modem_pipe_close(data->uart_pipe, K_SECONDS(10)); return ret; } static void quectel_lcx6g_turn_on(const struct device *dev) { LOG_INF("Powered on"); } static int quectel_lcx6g_turn_off(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; LOG_INF("Powered off"); return modem_pipe_close(data->uart_pipe, K_SECONDS(10)); } static int quectel_lcx6g_pm_action(const struct device *dev, enum pm_device_action action) { int ret = -ENOTSUP; quectel_lcx6g_lock(dev); switch (action) { case PM_DEVICE_ACTION_SUSPEND: ret = quectel_lcx6g_suspend(dev); break; case PM_DEVICE_ACTION_RESUME: ret = quectel_lcx6g_resume(dev); break; case PM_DEVICE_ACTION_TURN_ON: quectel_lcx6g_turn_on(dev); ret = 0; break; case PM_DEVICE_ACTION_TURN_OFF: ret = quectel_lcx6g_turn_off(dev); break; default: break; } quectel_lcx6g_pm_changed(dev); quectel_lcx6g_unlock(dev); return ret; } #endif /* CONFIG_PM_DEVICE */ static int quectel_lcx6g_set_fix_rate(const struct device *dev, uint32_t fix_interval_ms) { struct quectel_lcx6g_data *data = dev->data; int ret; if (fix_interval_ms < 100 || fix_interval_ms > 1000) { return -EINVAL; } quectel_lcx6g_lock(dev); ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR050,%u", fix_interval_ms); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } ret = gnss_nmea0183_snprintk(data->pair_match_buf, sizeof(data->pair_match_buf), "PAIR001,050,0"); if (ret < 0) { goto unlock_return; } ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } ret = modem_chat_run_script(&data->chat, &data->pair_script); if (ret < 0) { goto unlock_return; } unlock_return: quectel_lcx6g_unlock(dev); return ret; } static void quectel_lcx6g_get_fix_rate_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct quectel_lcx6g_data *data = user_data; int32_t tmp; if (argc != 3) { return; } if ((gnss_parse_atoi(argv[1], 10, &tmp) < 0) || (tmp < 0) || (tmp > 1000)) { return; } data->fix_rate_response = (uint16_t)tmp; } static int quectel_lcx6g_get_fix_rate(const struct device *dev, uint32_t *fix_interval_ms) { struct quectel_lcx6g_data *data = dev->data; int ret; quectel_lcx6g_lock(dev); ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR051"); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } strncpy(data->pair_match_buf, "$PAIR051,", sizeof(data->pair_match_buf)); ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } modem_chat_match_set_callback(&data->pair_match, quectel_lcx6g_get_fix_rate_callback); ret = modem_chat_run_script(&data->chat, &data->pair_script); modem_chat_match_set_callback(&data->pair_match, NULL); if (ret < 0) { goto unlock_return; } *fix_interval_ms = data->fix_rate_response; unlock_return: quectel_lcx6g_unlock(dev); return 0; } static int quectel_lcx6g_set_navigation_mode(const struct device *dev, enum gnss_navigation_mode mode) { struct quectel_lcx6g_data *data = dev->data; uint8_t navigation_mode = 0; int ret; switch (mode) { case GNSS_NAVIGATION_MODE_ZERO_DYNAMICS: navigation_mode = QUECTEL_LCX6G_PAIR_NAV_MODE_STATIONARY; break; case GNSS_NAVIGATION_MODE_LOW_DYNAMICS: navigation_mode = QUECTEL_LCX6G_PAIR_NAV_MODE_FITNESS; break; case GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS: navigation_mode = QUECTEL_LCX6G_PAIR_NAV_MODE_NORMAL; break; case GNSS_NAVIGATION_MODE_HIGH_DYNAMICS: navigation_mode = QUECTEL_LCX6G_PAIR_NAV_MODE_DRONE; break; } quectel_lcx6g_lock(dev); ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR080,%u", navigation_mode); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } ret = gnss_nmea0183_snprintk(data->pair_match_buf, sizeof(data->pair_match_buf), "PAIR001,080,0"); if (ret < 0) { goto unlock_return; } ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } ret = modem_chat_run_script(&data->chat, &data->pair_script); if (ret < 0) { goto unlock_return; } unlock_return: quectel_lcx6g_unlock(dev); return ret; } static void quectel_lcx6g_get_nav_mode_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct quectel_lcx6g_data *data = user_data; int32_t tmp; if (argc != 3) { return; } if ((gnss_parse_atoi(argv[1], 10, &tmp) < 0) || (tmp < 0) || (tmp > 7)) { return; } switch (tmp) { case QUECTEL_LCX6G_PAIR_NAV_MODE_FITNESS: data->navigation_mode_response = GNSS_NAVIGATION_MODE_LOW_DYNAMICS; break; case QUECTEL_LCX6G_PAIR_NAV_MODE_STATIONARY: data->navigation_mode_response = GNSS_NAVIGATION_MODE_ZERO_DYNAMICS; break; case QUECTEL_LCX6G_PAIR_NAV_MODE_DRONE: data->navigation_mode_response = GNSS_NAVIGATION_MODE_HIGH_DYNAMICS; break; default: data->navigation_mode_response = GNSS_NAVIGATION_MODE_BALANCED_DYNAMICS; break; } } static int quectel_lcx6g_get_navigation_mode(const struct device *dev, enum gnss_navigation_mode *mode) { struct quectel_lcx6g_data *data = dev->data; int ret; quectel_lcx6g_lock(dev); ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR081"); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } strncpy(data->pair_match_buf, "$PAIR081,", sizeof(data->pair_match_buf)); ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } modem_chat_match_set_callback(&data->pair_match, quectel_lcx6g_get_nav_mode_callback); ret = modem_chat_run_script(&data->chat, &data->pair_script); modem_chat_match_set_callback(&data->pair_match, NULL); if (ret < 0) { goto unlock_return; } *mode = data->navigation_mode_response; unlock_return: quectel_lcx6g_unlock(dev); return ret; } static int quectel_lcx6g_set_enabled_systems(const struct device *dev, gnss_systems_t systems) { struct quectel_lcx6g_data *data = dev->data; gnss_systems_t supported_systems; int ret; supported_systems = (GNSS_SYSTEM_GPS | GNSS_SYSTEM_GLONASS | GNSS_SYSTEM_GALILEO | GNSS_SYSTEM_BEIDOU | GNSS_SYSTEM_QZSS | GNSS_SYSTEM_SBAS); if ((~supported_systems) & systems) { return -EINVAL; } quectel_lcx6g_lock(dev); ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR066,%u,%u,%u,%u,%u,0", (0 < (systems & GNSS_SYSTEM_GPS)), (0 < (systems & GNSS_SYSTEM_GLONASS)), (0 < (systems & GNSS_SYSTEM_GALILEO)), (0 < (systems & GNSS_SYSTEM_BEIDOU)), (0 < (systems & GNSS_SYSTEM_QZSS))); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } ret = gnss_nmea0183_snprintk(data->pair_match_buf, sizeof(data->pair_match_buf), "PAIR001,066,0"); if (ret < 0) { goto unlock_return; } ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } ret = modem_chat_run_script(&data->chat, &data->pair_script); if (ret < 0) { goto unlock_return; } ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR410,%u", (0 < (systems & GNSS_SYSTEM_SBAS))); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } ret = gnss_nmea0183_snprintk(data->pair_match_buf, sizeof(data->pair_match_buf), "PAIR001,410,0"); if (ret < 0) { goto unlock_return; } ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } ret = modem_chat_run_script(&data->chat, &data->pair_script); if (ret < 0) { goto unlock_return; } unlock_return: quectel_lcx6g_unlock(dev); return ret; } static inline bool search_mode_enabled(const char *arg) { return arg[0] == '1'; } static void quectel_lcx6g_get_search_mode_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct quectel_lcx6g_data *data = user_data; if (argc != 8) { return; } data->enabled_systems_response = search_mode_enabled(argv[1]) ? GNSS_SYSTEM_GPS : 0; data->enabled_systems_response |= search_mode_enabled(argv[2]) ? GNSS_SYSTEM_GLONASS : 0; data->enabled_systems_response |= search_mode_enabled(argv[3]) ? GNSS_SYSTEM_GALILEO : 0; data->enabled_systems_response |= search_mode_enabled(argv[4]) ? GNSS_SYSTEM_BEIDOU : 0; data->enabled_systems_response |= search_mode_enabled(argv[5]) ? GNSS_SYSTEM_QZSS : 0; } static void quectel_lcx6g_get_sbas_status_callback(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct quectel_lcx6g_data *data = user_data; if (argc != 3) { return; } data->enabled_systems_response |= ('1' == argv[1][0]) ? GNSS_SYSTEM_SBAS : 0; } static int quectel_lcx6g_get_enabled_systems(const struct device *dev, gnss_systems_t *systems) { struct quectel_lcx6g_data *data = dev->data; int ret; quectel_lcx6g_lock(dev); ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR067"); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } strncpy(data->pair_match_buf, "$PAIR067,", sizeof(data->pair_match_buf)); ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } modem_chat_match_set_callback(&data->pair_match, quectel_lcx6g_get_search_mode_callback); ret = modem_chat_run_script(&data->chat, &data->pair_script); modem_chat_match_set_callback(&data->pair_match, NULL); if (ret < 0) { goto unlock_return; } ret = gnss_nmea0183_snprintk(data->pair_request_buf, sizeof(data->pair_request_buf), "PAIR411"); if (ret < 0) { goto unlock_return; } ret = modem_chat_script_chat_set_request(&data->pair_script_chat, data->pair_request_buf); if (ret < 0) { goto unlock_return; } strncpy(data->pair_match_buf, "$PAIR411,", sizeof(data->pair_match_buf)); ret = modem_chat_match_set_match(&data->pair_match, data->pair_match_buf); if (ret < 0) { goto unlock_return; } modem_chat_match_set_callback(&data->pair_match, quectel_lcx6g_get_sbas_status_callback); ret = modem_chat_run_script(&data->chat, &data->pair_script); modem_chat_match_set_callback(&data->pair_match, NULL); if (ret < 0) { goto unlock_return; } *systems = data->enabled_systems_response; unlock_return: quectel_lcx6g_unlock(dev); return ret; } static int quectel_lcx6g_get_supported_systems(const struct device *dev, gnss_systems_t *systems) { *systems = (GNSS_SYSTEM_GPS | GNSS_SYSTEM_GLONASS | GNSS_SYSTEM_GALILEO | GNSS_SYSTEM_BEIDOU | GNSS_SYSTEM_QZSS | GNSS_SYSTEM_SBAS); return 0; } static const struct gnss_driver_api gnss_api = { .set_fix_rate = quectel_lcx6g_set_fix_rate, .get_fix_rate = quectel_lcx6g_get_fix_rate, .set_navigation_mode = quectel_lcx6g_set_navigation_mode, .get_navigation_mode = quectel_lcx6g_get_navigation_mode, .set_enabled_systems = quectel_lcx6g_set_enabled_systems, .get_enabled_systems = quectel_lcx6g_get_enabled_systems, .get_supported_systems = quectel_lcx6g_get_supported_systems, }; static int quectel_lcx6g_init_nmea0183_match(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; const struct gnss_nmea0183_match_config config = { .gnss = dev, #if CONFIG_GNSS_SATELLITES .satellites = data->satellites, .satellites_size = ARRAY_SIZE(data->satellites), #endif }; return gnss_nmea0183_match_init(&data->match_data, &config); } static void quectel_lcx6g_init_pipe(const struct device *dev) { const struct quectel_lcx6g_config *config = dev->config; struct quectel_lcx6g_data *data = dev->data; const struct modem_backend_uart_config uart_backend_config = { .uart = config->uart, .receive_buf = data->uart_backend_receive_buf, .receive_buf_size = ARRAY_SIZE(data->uart_backend_receive_buf), .transmit_buf = data->uart_backend_transmit_buf, .transmit_buf_size = ARRAY_SIZE(data->uart_backend_transmit_buf), }; data->uart_pipe = modem_backend_uart_init(&data->uart_backend, &uart_backend_config); } static int quectel_lcx6g_init_chat(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; const struct modem_chat_config chat_config = { .user_data = data, .receive_buf = data->chat_receive_buf, .receive_buf_size = ARRAY_SIZE(data->chat_receive_buf), .delimiter = data->chat_delimiter, .delimiter_size = ARRAY_SIZE(data->chat_delimiter), .filter = NULL, .filter_size = 0, .argv = data->chat_argv, .argv_size = ARRAY_SIZE(data->chat_argv), .unsol_matches = unsol_matches, .unsol_matches_size = ARRAY_SIZE(unsol_matches), }; return modem_chat_init(&data->chat, &chat_config); } static void quectel_lcx6g_init_pair_script(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; modem_chat_match_init(&data->pair_match); modem_chat_match_set_separators(&data->pair_match, ",*"); modem_chat_script_chat_init(&data->pair_script_chat); modem_chat_script_chat_set_response_matches(&data->pair_script_chat, &data->pair_match, 1); modem_chat_script_init(&data->pair_script); modem_chat_script_set_name(&data->pair_script, "pair"); modem_chat_script_set_script_chats(&data->pair_script, &data->pair_script_chat, 1); modem_chat_script_set_abort_matches(&data->pair_script, NULL, 0); modem_chat_script_set_timeout(&data->pair_script, 10); } static int quectel_lcx6g_init(const struct device *dev) { struct quectel_lcx6g_data *data = dev->data; int ret; k_sem_init(&data->lock, 1, 1); ret = quectel_lcx6g_init_nmea0183_match(dev); if (ret < 0) { return ret; } quectel_lcx6g_init_pipe(dev); ret = quectel_lcx6g_init_chat(dev); if (ret < 0) { return ret; } quectel_lcx6g_init_pair_script(dev); quectel_lcx6g_pm_changed(dev); if (pm_device_is_powered(dev)) { ret = quectel_lcx6g_resume(dev); if (ret < 0) { return ret; } quectel_lcx6g_pm_changed(dev); } else { pm_device_init_off(dev); } return pm_device_runtime_enable(dev); } #define LCX6G_INST_NAME(inst, name) \ _CONCAT(_CONCAT(_CONCAT(name, _), DT_DRV_COMPAT), inst) #define LCX6G_DEVICE(inst) \ static const struct quectel_lcx6g_config LCX6G_INST_NAME(inst, config) = { \ .uart = DEVICE_DT_GET(DT_INST_BUS(inst)), \ .pps_mode = DT_INST_STRING_UPPER_TOKEN(inst, pps_mode), \ .pps_pulse_width = DT_INST_PROP(inst, pps_pulse_width), \ }; \ \ static struct quectel_lcx6g_data LCX6G_INST_NAME(inst, data) = { \ .chat_delimiter = {'\r', '\n'}, \ }; \ \ PM_DEVICE_DT_INST_DEFINE(inst, quectel_lcx6g_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, quectel_lcx6g_init, PM_DEVICE_DT_INST_GET(inst), \ &LCX6G_INST_NAME(inst, data), &LCX6G_INST_NAME(inst, config), \ POST_KERNEL, CONFIG_GNSS_INIT_PRIORITY, &gnss_api); #define DT_DRV_COMPAT quectel_lc26g DT_INST_FOREACH_STATUS_OKAY(LCX6G_DEVICE) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT quectel_lc76g DT_INST_FOREACH_STATUS_OKAY(LCX6G_DEVICE) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT quectel_lc86g DT_INST_FOREACH_STATUS_OKAY(LCX6G_DEVICE) #undef DT_DRV_COMPAT ```
/content/code_sandbox/drivers/gnss/gnss_quectel_lcx6g.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,874
```unknown menuconfig GNSS bool "GNSS drivers" select EXPERIMENTAL help Enable GNSS drivers and configuration. if GNSS config GNSS_SATELLITES bool "GNSS satellites support" help Enable GNSS sattelites callback. config GNSS_DUMP bool "GNSS dump support" depends on LOG help Enable GNSS dump library config GNSS_DUMP_TO_LOG bool "Dump GNSS events to log" select GNSS_DUMP help Enable GNSS dump to log. if GNSS_DUMP_TO_LOG config GNSS_DUMP_TO_LOG_BUF_SIZE int "GNSS log dump buffer size" default 128 help Size of GNSS log dump buffer endif config GNSS_PARSE bool "GNSS parsing utilities" help Enable GNSS parsing utilities. config GNSS_NMEA0183 bool "NMEA0183 parsing utilities" select GNSS_PARSE help Enable NMEA0183 parsing utilities. config GNSS_NMEA0183_MATCH bool "GNSS NMEA0183 match utilities" select GNSS_NMEA0183 help Enable NMEA0183 match utilities. config GNSS_INIT_PRIORITY int "GNSS driver initialization priority" default 80 range 0 99 help Driver initialization priority for GNSS drivers. config GNSS_U_BLOX_PROTOCOL bool "GNSS U-BLOX protocol" select MODEM_UBX help Enable gnss u-blox protocol. choice GNSS_REFERENCE_FRAME bool "GNSS reference frame datum" default GNSS_REFERENCE_FRAME_WGS84 config GNSS_REFERENCE_FRAME_WGS84 bool "Use the WGS84 ellipsoid as reference frame datum" endchoice module = GNSS module-str = gnss source "subsys/logging/Kconfig.template.log_config" rsource "Kconfig.emul" rsource "Kconfig.generic" rsource "Kconfig.quectel_lcx6g" rsource "Kconfig.u_blox_m10" rsource "Kconfig.luatos_air530z" endif ```
/content/code_sandbox/drivers/gnss/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
447
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_GNSS_GNSS_DUMP_H_ #define ZEPHYR_DRIVERS_GNSS_GNSS_DUMP_H_ #include <zephyr/drivers/gnss.h> /** * @brief Dump struct gnss_info as string * * @param str Destination for dumped GNSS info * @param strsize Size of str * @param info GNSS info to dump * * @retval 0 if GNSS info successfully dumped * @retval -ENOMEM if strsize too small */ int gnss_dump_info(char *str, uint16_t strsize, const struct gnss_info *info); /** * @brief Dump struct navigation_data as string * * @param str Destination for dumped navigation data * @param strsize Size of str * @param nav_data Navigation data to dump * * @retval 0 if navigation data successfully dumped * @retval -ENOMEM if strsize too small */ int gnss_dump_nav_data(char *str, uint16_t strsize, const struct navigation_data *nav_data); /** * @brief Dump struct gnss_time as string * * @param str Destination for dumped GNSS time * @param strsize Size of str * @param utc GNSS time to dump * * @retval 0 if GNSS time successfully dumped * @retval -ENOMEM if strsize too small */ int gnss_dump_time(char *str, uint16_t strsize, const struct gnss_time *utc); /** * @brief Dump struct gnss_satellite as string * * @param str Destination for dumped GNSS satellite * @param strsize Size of str * @param utc GNSS satellite to dump * * @retval 0 if GNSS satellite successfully dumped * @retval -ENOMEM if strsize too small */ int gnss_dump_satellite(char *str, uint16_t strsize, const struct gnss_satellite *satellite); #endif /* ZEPHYR_DRIVERS_GNSS_GNSS_DUMP_H_ */ ```
/content/code_sandbox/drivers/gnss/gnss_dump.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
425
```c /* * */ #include <zephyr/drivers/gnss/gnss_publish.h> #include <zephyr/kernel.h> #include <zephyr/sys/iterable_sections.h> static K_SEM_DEFINE(semlock, 1, 1); void gnss_publish_data(const struct device *dev, const struct gnss_data *data) { (void)k_sem_take(&semlock, K_FOREVER); STRUCT_SECTION_FOREACH(gnss_data_callback, callback) { if (callback->dev == NULL || callback->dev == dev) { callback->callback(dev, data); } } k_sem_give(&semlock); } #if CONFIG_GNSS_SATELLITES void gnss_publish_satellites(const struct device *dev, const struct gnss_satellite *satellites, uint16_t size) { (void)k_sem_take(&semlock, K_FOREVER); STRUCT_SECTION_FOREACH(gnss_satellites_callback, callback) { if (callback->dev == NULL || callback->dev == dev) { callback->callback(dev, satellites, size); } } k_sem_give(&semlock); } #endif ```
/content/code_sandbox/drivers/gnss/gnss_publish.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
246
```c /* * */ #include "gnss_u_blox_protocol.h" const uint32_t ubx_baudrate[UBX_BAUDRATE_COUNT] = { 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800, 921600, }; static inline int ubx_validate_payload_size_ack(uint8_t msg_id, uint16_t payload_size) { switch (msg_id) { case UBX_ACK_ACK: return payload_size == UBX_CFG_ACK_PAYLOAD_SZ ? 0 : -1; case UBX_ACK_NAK: return payload_size == UBX_CFG_NAK_PAYLOAD_SZ ? 0 : -1; default: return -1; } } static inline int ubx_validate_payload_size_cfg(uint8_t msg_id, uint16_t payload_size) { switch (msg_id) { case UBX_CFG_RATE: return payload_size == UBX_CFG_RATE_PAYLOAD_SZ ? 0 : -1; case UBX_CFG_PRT: return (payload_size == UBX_CFG_PRT_POLL_PAYLOAD_SZ || payload_size == UBX_CFG_PRT_SET_PAYLOAD_SZ) ? 0 : -1; case UBX_CFG_RST: return payload_size == UBX_CFG_RST_PAYLOAD_SZ ? 0 : -1; case UBX_CFG_NAV5: return payload_size == UBX_CFG_NAV5_PAYLOAD_SZ ? 0 : -1; case UBX_CFG_GNSS: return ((payload_size - UBX_CFG_GNSS_PAYLOAD_INIT_SZ) % UBX_CFG_GNSS_PAYLOAD_CFG_BLK_SZ == 0) ? 0 : -1; case UBX_CFG_MSG: return payload_size == UBX_CFG_MSG_PAYLOAD_SZ ? 0 : -1; default: return -1; } } static inline int ubx_validate_payload_size(uint8_t msg_cls, uint8_t msg_id, uint16_t payload_size) { if (payload_size == 0) { return 0; } if (payload_size > UBX_PAYLOAD_SZ_MAX) { return -1; } switch (msg_cls) { case UBX_CLASS_ACK: return ubx_validate_payload_size_ack(msg_id, payload_size); case UBX_CLASS_CFG: return ubx_validate_payload_size_cfg(msg_id, payload_size); default: return -1; } } int ubx_create_and_validate_frame(uint8_t *ubx_frame, uint16_t ubx_frame_size, uint8_t msg_cls, uint8_t msg_id, const void *payload, uint16_t payload_size) { if (ubx_validate_payload_size(msg_cls, msg_id, payload_size)) { return -1; } return modem_ubx_create_frame(ubx_frame, ubx_frame_size, msg_cls, msg_id, payload, payload_size); } void ubx_cfg_ack_payload_default(struct ubx_cfg_ack_payload *payload) { payload->message_class = UBX_CLASS_CFG; payload->message_id = UBX_CFG_PRT; } void ubx_cfg_rate_payload_default(struct ubx_cfg_rate_payload *payload) { payload->meas_rate_ms = 1000; payload->nav_rate = 1; payload->time_ref = UBX_CFG_RATE_TIME_REF_UTC; } void ubx_cfg_prt_poll_payload_default(struct ubx_cfg_prt_poll_payload *payload) { payload->port_id = UBX_PORT_NUMBER_UART; } void ubx_cfg_prt_set_payload_default(struct ubx_cfg_prt_set_payload *payload) { payload->port_id = UBX_PORT_NUMBER_UART; payload->reserved0 = UBX_CFG_PRT_RESERVED0; payload->tx_ready_pin_conf = UBX_CFG_PRT_TX_READY_PIN_CONF_POL_HIGH; payload->port_mode = UBX_CFG_PRT_PORT_MODE_CHAR_LEN_8 | UBX_CFG_PRT_PORT_MODE_PARITY_NONE | UBX_CFG_PRT_PORT_MODE_STOP_BITS_1; payload->baudrate = ubx_baudrate[3]; payload->in_proto_mask = UBX_CFG_PRT_IN_PROTO_UBX | UBX_CFG_PRT_IN_PROTO_NMEA | UBX_CFG_PRT_IN_PROTO_RTCM; payload->out_proto_mask = UBX_CFG_PRT_OUT_PROTO_UBX | UBX_CFG_PRT_OUT_PROTO_NMEA | UBX_CFG_PRT_OUT_PROTO_RTCM3; payload->flags = UBX_CFG_PRT_FLAGS_DEFAULT; payload->reserved1 = UBX_CFG_PRT_RESERVED1; } void ubx_cfg_rst_payload_default(struct ubx_cfg_rst_payload *payload) { payload->nav_bbr_mask = UBX_CFG_RST_NAV_BBR_MASK_HOT_START; payload->reset_mode = UBX_CFG_RST_RESET_MODE_CONTROLLED_SOFT_RESET; payload->reserved0 = UBX_CFG_RST_RESERVED0; } void ubx_cfg_nav5_payload_default(struct ubx_cfg_nav5_payload *payload) { payload->mask = UBX_CFG_NAV5_MASK_ALL; payload->dyn_model = UBX_DYN_MODEL_PORTABLE; payload->fix_mode = UBX_FIX_AUTO_FIX; payload->fixed_alt = UBX_CFG_NAV5_FIXED_ALT_DEFAULT; payload->fixed_alt_var = UBX_CFG_NAV5_FIXED_ALT_VAR_DEFAULT; payload->min_elev = UBX_CFG_NAV5_MIN_ELEV_DEFAULT; payload->dr_limit = UBX_CFG_NAV5_DR_LIMIT_DEFAULT; payload->p_dop = UBX_CFG_NAV5_P_DOP_DEFAULT; payload->t_dop = UBX_CFG_NAV5_T_DOP_DEFAULT; payload->p_acc = UBX_CFG_NAV5_P_ACC_DEFAULT; payload->t_acc = UBX_CFG_NAV5_T_ACC_DEFAULT; payload->static_hold_threshold = UBX_CFG_NAV5_STATIC_HOLD_THRESHOLD_DEFAULT; payload->dgnss_timeout = UBX_CFG_NAV5_DGNSS_TIMEOUT_DEFAULT; payload->cno_threshold_num_svs = UBX_CFG_NAV5_CNO_THRESHOLD_NUM_SVS_DEFAULT; payload->cno_threshold = UBX_CFG_NAV5_CNO_THRESHOLD_DEFAULT; payload->reserved0 = UBX_CFG_NAV5_RESERVED0; payload->static_hold_dist_threshold = UBX_CFG_NAV5_STATIC_HOLD_DIST_THRESHOLD; payload->utc_standard = UBX_CFG_NAV5_UTC_STANDARD_DEFAULT; } static struct ubx_cfg_gnss_payload_config_block ubx_cfg_gnss_payload_config_block_default = { .gnss_id = UBX_GNSS_ID_GPS, .num_res_trk_ch = 0x00, .max_num_trk_ch = 0x00, .reserved0 = UBX_CFG_GNSS_RESERVED0, .flags = UBX_CFG_GNSS_FLAG_ENABLE | UBX_CFG_GNSS_FLAG_SGN_CNF_GPS_L1C_A, }; void ubx_cfg_gnss_payload_default(struct ubx_cfg_gnss_payload *payload) { payload->msg_ver = UBX_CFG_GNSS_MSG_VER; payload->num_trk_ch_hw = UBX_CFG_GNSS_NUM_TRK_CH_HW_DEFAULT; payload->num_trk_ch_use = UBX_CFG_GNSS_NUM_TRK_CH_USE_DEFAULT; for (int i = 0; i < payload->num_config_blocks; ++i) { payload->config_blocks[i] = ubx_cfg_gnss_payload_config_block_default; } } void ubx_cfg_msg_payload_default(struct ubx_cfg_msg_payload *payload) { payload->message_class = UBX_CLASS_NMEA; payload->message_id = UBX_NMEA_GGA; payload->rate = UBX_CFG_MSG_RATE_DEFAULT; } ```
/content/code_sandbox/drivers/gnss/gnss_u_blox_protocol/gnss_u_blox_protocol.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,572
```c /* * */ #include <zephyr/kernel.h> #include <string.h> #include <stdarg.h> #include <stdarg.h> #include "gnss_nmea0183.h" #include "gnss_parse.h" #define GNSS_NMEA0183_PICO_DEGREES_IN_DEGREE (1000000000000ULL) #define GNSS_NMEA0183_PICO_DEGREES_IN_MINUTE (GNSS_NMEA0183_PICO_DEGREES_IN_DEGREE / 60ULL) #define GNSS_NMEA0183_PICO_DEGREES_IN_NANO_DEGREE (1000ULL) #define GNSS_NMEA0183_NANO_KNOTS_IN_MMS (1943861LL) #define GNSS_NMEA0183_MESSAGE_SIZE_MIN (6) #define GNSS_NMEA0183_MESSAGE_CHECKSUM_SIZE (3) #define GNSS_NMEA0183_GSV_HDR_ARG_CNT (4) #define GNSS_NMEA0183_GSV_SV_ARG_CNT (4) #define GNSS_NMEA0183_GSV_PRN_GPS_RANGE (32) #define GNSS_NMEA0183_GSV_PRN_SBAS_OFFSET (87) #define GNSS_NMEA0183_GSV_PRN_GLONASS_OFFSET (64) #define GNSS_NMEA0183_GSV_PRN_BEIDOU_OFFSET (100) struct gsv_header_args { const char *message_id; const char *number_of_messages; const char *message_number; const char *numver_of_svs; }; struct gsv_sv_args { const char *prn; const char *elevation; const char *azimuth; const char *snr; }; static int gnss_system_from_gsv_header_args(const struct gsv_header_args *args, enum gnss_system *sv_system) { switch (args->message_id[2]) { case 'A': *sv_system = GNSS_SYSTEM_GALILEO; break; case 'B': *sv_system = GNSS_SYSTEM_BEIDOU; break; case 'P': *sv_system = GNSS_SYSTEM_GPS; break; case 'L': *sv_system = GNSS_SYSTEM_GLONASS; break; case 'Q': *sv_system = GNSS_SYSTEM_QZSS; break; default: return -EINVAL; } return 0; } static void align_satellite_with_gnss_system(enum gnss_system sv_system, struct gnss_satellite *satellite) { switch (sv_system) { case GNSS_SYSTEM_GPS: if (satellite->prn > GNSS_NMEA0183_GSV_PRN_GPS_RANGE) { satellite->system = GNSS_SYSTEM_SBAS; satellite->prn += GNSS_NMEA0183_GSV_PRN_SBAS_OFFSET; break; } satellite->system = GNSS_SYSTEM_GPS; break; case GNSS_SYSTEM_GLONASS: satellite->system = GNSS_SYSTEM_GLONASS; satellite->prn -= GNSS_NMEA0183_GSV_PRN_GLONASS_OFFSET; break; case GNSS_SYSTEM_GALILEO: satellite->system = GNSS_SYSTEM_GALILEO; break; case GNSS_SYSTEM_BEIDOU: satellite->system = GNSS_SYSTEM_BEIDOU; satellite->prn -= GNSS_NMEA0183_GSV_PRN_BEIDOU_OFFSET; break; case GNSS_SYSTEM_QZSS: satellite->system = GNSS_SYSTEM_QZSS; break; case GNSS_SYSTEM_IRNSS: case GNSS_SYSTEM_IMES: case GNSS_SYSTEM_SBAS: break; } } uint8_t gnss_nmea0183_checksum(const char *str) { uint8_t checksum = 0; size_t end; __ASSERT(str != NULL, "str argument must be provided"); end = strlen(str); for (size_t i = 0; i < end; i++) { checksum = checksum ^ str[i]; } return checksum; } int gnss_nmea0183_snprintk(char *str, size_t size, const char *fmt, ...) { va_list ap; uint8_t checksum; int pos; int len; __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(fmt != NULL, "fmt argument must be provided"); if (size < GNSS_NMEA0183_MESSAGE_SIZE_MIN) { return -ENOMEM; } str[0] = '$'; va_start(ap, fmt); pos = vsnprintk(&str[1], size - 1, fmt, ap) + 1; va_end(ap); if (pos < 0) { return -EINVAL; } len = pos + GNSS_NMEA0183_MESSAGE_CHECKSUM_SIZE; if ((size - 1) < len) { return -ENOMEM; } checksum = gnss_nmea0183_checksum(&str[1]); pos = snprintk(&str[pos], size - pos, "*%02X", checksum); if (pos != 3) { return -EINVAL; } str[len] = '\0'; return len; } int gnss_nmea0183_ddmm_mmmm_to_ndeg(const char *ddmm_mmmm, int64_t *ndeg) { uint64_t pico_degrees = 0; int8_t decimal = -1; int8_t pos = 0; uint64_t increment; __ASSERT(ddmm_mmmm != NULL, "ddmm_mmmm argument must be provided"); __ASSERT(ndeg != NULL, "ndeg argument must be provided"); /* Find decimal */ while (ddmm_mmmm[pos] != '\0') { /* Verify if char is decimal */ if (ddmm_mmmm[pos] == '.') { decimal = pos; break; } /* Advance position */ pos++; } /* Verify decimal was found and placed correctly */ if (decimal < 1) { return -EINVAL; } /* Validate potential degree fraction is within bounds */ if (decimal > 1 && ddmm_mmmm[decimal - 2] > '5') { return -EINVAL; } /* Convert minute fraction to pico degrees and add it to pico_degrees */ pos = decimal + 1; increment = (GNSS_NMEA0183_PICO_DEGREES_IN_MINUTE / 10); while (ddmm_mmmm[pos] != '\0') { /* Verify char is decimal */ if (ddmm_mmmm[pos] < '0' || ddmm_mmmm[pos] > '9') { return -EINVAL; } /* Add increment to pico_degrees */ pico_degrees += (ddmm_mmmm[pos] - '0') * increment; /* Update unit */ increment /= 10; /* Increment position */ pos++; } /* Convert minutes and degrees to pico_degrees */ pos = decimal - 1; increment = GNSS_NMEA0183_PICO_DEGREES_IN_MINUTE; while (pos >= 0) { /* Check if digit switched from minutes to degrees */ if ((decimal - pos) == 3) { /* Reset increment to degrees */ increment = GNSS_NMEA0183_PICO_DEGREES_IN_DEGREE; } /* Verify char is decimal */ if (ddmm_mmmm[pos] < '0' || ddmm_mmmm[pos] > '9') { return -EINVAL; } /* Add increment to pico_degrees */ pico_degrees += (ddmm_mmmm[pos] - '0') * increment; /* Update unit */ increment *= 10; /* Decrement position */ pos--; } /* Convert to nano degrees */ *ndeg = (int64_t)(pico_degrees / GNSS_NMEA0183_PICO_DEGREES_IN_NANO_DEGREE); return 0; } bool gnss_nmea0183_validate_message(char **argv, uint16_t argc) { int32_t tmp = 0; uint8_t checksum = 0; size_t len; __ASSERT(argv != NULL, "argv argument must be provided"); /* Message must contain message id and checksum */ if (argc < 2) { return false; } /* First argument should start with '$' which is not covered by checksum */ if ((argc < 1) || (argv[0][0] != '$')) { return false; } len = strlen(argv[0]); for (uint16_t u = 1; u < len; u++) { checksum ^= argv[0][u]; } checksum ^= ','; /* Cover all except last argument which contains the checksum*/ for (uint16_t i = 1; i < (argc - 1); i++) { len = strlen(argv[i]); for (uint16_t u = 0; u < len; u++) { checksum ^= argv[i][u]; } checksum ^= ','; } if ((gnss_parse_atoi(argv[argc - 1], 16, &tmp) < 0) || (tmp > UINT8_MAX) || (tmp < 0)) { return false; } return checksum == (uint8_t)tmp; } int gnss_nmea0183_knots_to_mms(const char *str, int64_t *mms) { int ret; __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(mms != NULL, "mms argument must be provided"); ret = gnss_parse_dec_to_nano(str, mms); if (ret < 0) { return ret; } *mms = (*mms) / GNSS_NMEA0183_NANO_KNOTS_IN_MMS; return 0; } int gnss_nmea0183_parse_hhmmss(const char *hhmmss, struct gnss_time *utc) { int64_t i64; int32_t i32; char part[3] = {0}; __ASSERT(hhmmss != NULL, "hhmmss argument must be provided"); __ASSERT(utc != NULL, "utc argument must be provided"); if (strlen(hhmmss) < 6) { return -EINVAL; } memcpy(part, hhmmss, 2); if ((gnss_parse_atoi(part, 10, &i32) < 0) || (i32 < 0) || (i32 > 23)) { return -EINVAL; } utc->hour = (uint8_t)i32; memcpy(part, &hhmmss[2], 2); if ((gnss_parse_atoi(part, 10, &i32) < 0) || (i32 < 0) || (i32 > 59)) { return -EINVAL; } utc->minute = (uint8_t)i32; if ((gnss_parse_dec_to_milli(&hhmmss[4], &i64) < 0) || (i64 < 0) || (i64 > 59999)) { return -EINVAL; } utc->millisecond = (uint16_t)i64; return 0; } int gnss_nmea0183_parse_ddmmyy(const char *ddmmyy, struct gnss_time *utc) { int32_t i32; char part[3] = {0}; __ASSERT(ddmmyy != NULL, "ddmmyy argument must be provided"); __ASSERT(utc != NULL, "utc argument must be provided"); if (strlen(ddmmyy) != 6) { return -EINVAL; } memcpy(part, ddmmyy, 2); if ((gnss_parse_atoi(part, 10, &i32) < 0) || (i32 < 1) || (i32 > 31)) { return -EINVAL; } utc->month_day = (uint8_t)i32; memcpy(part, &ddmmyy[2], 2); if ((gnss_parse_atoi(part, 10, &i32) < 0) || (i32 < 1) || (i32 > 12)) { return -EINVAL; } utc->month = (uint8_t)i32; memcpy(part, &ddmmyy[4], 2); if ((gnss_parse_atoi(part, 10, &i32) < 0) || (i32 < 0) || (i32 > 99)) { return -EINVAL; } utc->century_year = (uint8_t)i32; return 0; } int gnss_nmea0183_parse_rmc(const char **argv, uint16_t argc, struct gnss_data *data) { int64_t tmp; __ASSERT(argv != NULL, "argv argument must be provided"); __ASSERT(data != NULL, "data argument must be provided"); if (argc < 10) { return -EINVAL; } /* Validate GNSS has fix */ if (argv[2][0] == 'V') { return 0; } if (argv[2][0] != 'A') { return -EINVAL; } /* Parse UTC time */ if ((gnss_nmea0183_parse_hhmmss(argv[1], &data->utc) < 0)) { return -EINVAL; } /* Validate cardinal directions */ if (((argv[4][0] != 'N') && (argv[4][0] != 'S')) || ((argv[6][0] != 'E') && (argv[6][0] != 'W'))) { return -EINVAL; } /* Parse coordinates */ if ((gnss_nmea0183_ddmm_mmmm_to_ndeg(argv[3], &data->nav_data.latitude) < 0) || (gnss_nmea0183_ddmm_mmmm_to_ndeg(argv[5], &data->nav_data.longitude) < 0)) { return -EINVAL; } /* Align sign of coordinates with cardinal directions */ data->nav_data.latitude = argv[4][0] == 'N' ? data->nav_data.latitude : -data->nav_data.latitude; data->nav_data.longitude = argv[6][0] == 'E' ? data->nav_data.longitude : -data->nav_data.longitude; /* Parse speed */ if ((gnss_nmea0183_knots_to_mms(argv[7], &tmp) < 0) || (tmp > UINT32_MAX)) { return -EINVAL; } data->nav_data.speed = (uint32_t)tmp; /* Parse bearing */ if ((gnss_parse_dec_to_milli(argv[8], &tmp) < 0) || (tmp > 359999) || (tmp < 0)) { return -EINVAL; } data->nav_data.bearing = (uint32_t)tmp; /* Parse UTC date */ if ((gnss_nmea0183_parse_ddmmyy(argv[9], &data->utc) < 0)) { return -EINVAL; } return 0; } static int parse_gga_fix_quality(const char *str, enum gnss_fix_quality *fix_quality) { __ASSERT(str != NULL, "str argument must be provided"); __ASSERT(fix_quality != NULL, "fix_quality argument must be provided"); if ((str[1] != ((char)'\0')) || (str[0] < ((char)'0')) || (((char)'6') < str[0])) { return -EINVAL; } (*fix_quality) = (enum gnss_fix_quality)(str[0] - ((char)'0')); return 0; } static enum gnss_fix_status fix_status_from_fix_quality(enum gnss_fix_quality fix_quality) { enum gnss_fix_status fix_status = GNSS_FIX_STATUS_NO_FIX; switch (fix_quality) { case GNSS_FIX_QUALITY_GNSS_SPS: case GNSS_FIX_QUALITY_GNSS_PPS: fix_status = GNSS_FIX_STATUS_GNSS_FIX; break; case GNSS_FIX_QUALITY_DGNSS: case GNSS_FIX_QUALITY_RTK: case GNSS_FIX_QUALITY_FLOAT_RTK: fix_status = GNSS_FIX_STATUS_DGNSS_FIX; break; case GNSS_FIX_QUALITY_ESTIMATED: fix_status = GNSS_FIX_STATUS_ESTIMATED_FIX; break; default: break; } return fix_status; } int gnss_nmea0183_parse_gga(const char **argv, uint16_t argc, struct gnss_data *data) { int32_t tmp32; int64_t tmp64; __ASSERT(argv != NULL, "argv argument must be provided"); __ASSERT(data != NULL, "data argument must be provided"); if (argc < 12) { return -EINVAL; } /* Parse fix quality and status */ if (parse_gga_fix_quality(argv[6], &data->info.fix_quality) < 0) { return -EINVAL; } data->info.fix_status = fix_status_from_fix_quality(data->info.fix_quality); /* Validate GNSS has fix */ if (data->info.fix_status == GNSS_FIX_STATUS_NO_FIX) { return 0; } /* Parse number of satellites */ if ((gnss_parse_atoi(argv[7], 10, &tmp32) < 0) || (tmp32 > UINT16_MAX) || (tmp32 < 0)) { return -EINVAL; } data->info.satellites_cnt = (uint16_t)tmp32; /* Parse HDOP */ if ((gnss_parse_dec_to_milli(argv[8], &tmp64) < 0) || (tmp64 > UINT32_MAX) || (tmp64 < 0)) { return -EINVAL; } data->info.hdop = (uint16_t)tmp64; /* Parse altitude */ if ((gnss_parse_dec_to_milli(argv[9], &tmp64) < 0) || (tmp64 > INT32_MAX) || (tmp64 < INT32_MIN)) { return -EINVAL; } data->nav_data.altitude = (int32_t)tmp64; return 0; } static int parse_gsv_svs(struct gnss_satellite *satellites, const struct gsv_sv_args *svs, uint16_t svs_size) { int32_t i32; for (uint16_t i = 0; i < svs_size; i++) { /* Parse PRN */ if ((gnss_parse_atoi(svs[i].prn, 10, &i32) < 0) || (i32 < 0) || (i32 > UINT16_MAX)) { return -EINVAL; } satellites[i].prn = (uint16_t)i32; /* Parse elevation */ if ((gnss_parse_atoi(svs[i].elevation, 10, &i32) < 0) || (i32 < 0) || (i32 > 90)) { return -EINVAL; } satellites[i].elevation = (uint8_t)i32; /* Parse azimuth */ if ((gnss_parse_atoi(svs[i].azimuth, 10, &i32) < 0) || (i32 < 0) || (i32 > 359)) { return -EINVAL; } satellites[i].azimuth = (uint16_t)i32; /* Parse SNR */ if (strlen(svs[i].snr) == 0) { satellites[i].snr = 0; satellites[i].is_tracked = false; continue; } if ((gnss_parse_atoi(svs[i].snr, 10, &i32) < 0) || (i32 < 0) || (i32 > 99)) { return -EINVAL; } satellites[i].snr = (uint16_t)i32; satellites[i].is_tracked = true; } return 0; } int gnss_nmea0183_parse_gsv_header(const char **argv, uint16_t argc, struct gnss_nmea0183_gsv_header *header) { const struct gsv_header_args *args = (const struct gsv_header_args *)argv; int i32; __ASSERT(argv != NULL, "argv argument must be provided"); __ASSERT(header != NULL, "header argument must be provided"); if (argc < 4) { return -EINVAL; } /* Parse GNSS sv_system */ if (gnss_system_from_gsv_header_args(args, &header->system) < 0) { return -EINVAL; } /* Parse number of messages */ if ((gnss_parse_atoi(args->number_of_messages, 10, &i32) < 0) || (i32 < 0) || (i32 > UINT16_MAX)) { return -EINVAL; } header->number_of_messages = (uint16_t)i32; /* Parse message number */ if ((gnss_parse_atoi(args->message_number, 10, &i32) < 0) || (i32 < 0) || (i32 > UINT16_MAX)) { return -EINVAL; } header->message_number = (uint16_t)i32; /* Parse message number */ if ((gnss_parse_atoi(args->numver_of_svs, 10, &i32) < 0) || (i32 < 0) || (i32 > UINT16_MAX)) { return -EINVAL; } header->number_of_svs = (uint16_t)i32; return 0; } int gnss_nmea0183_parse_gsv_svs(const char **argv, uint16_t argc, struct gnss_satellite *satellites, uint16_t size) { const struct gsv_header_args *header_args = (const struct gsv_header_args *)argv; const struct gsv_sv_args *sv_args = (const struct gsv_sv_args *)(argv + 4); uint16_t sv_args_size; enum gnss_system sv_system; __ASSERT(argv != NULL, "argv argument must be provided"); __ASSERT(satellites != NULL, "satellites argument must be provided"); if (argc < 9) { return 0; } sv_args_size = (argc - GNSS_NMEA0183_GSV_HDR_ARG_CNT) / GNSS_NMEA0183_GSV_SV_ARG_CNT; if (size < sv_args_size) { return -ENOMEM; } if (parse_gsv_svs(satellites, sv_args, sv_args_size) < 0) { return -EINVAL; } if (gnss_system_from_gsv_header_args(header_args, &sv_system) < 0) { return -EINVAL; } for (uint16_t i = 0; i < sv_args_size; i++) { align_satellite_with_gnss_system(sv_system, &satellites[i]); } return (int)sv_args_size; } ```
/content/code_sandbox/drivers/gnss/gnss_nmea0183.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,083
```objective-c /* * */ /* Referred some enum definitions from file "include/zephyr/drivers/gnss/ublox_neo_m8_defines.h" * from the pull request #46447 (link - path_to_url */ #ifndef ZEPHYR_U_BLOX_PROTOCOL_DEFINES_ #define ZEPHYR_U_BLOX_PROTOCOL_DEFINES_ enum ubx_gnss_id { UBX_GNSS_ID_GPS = 0, UBX_GNSS_ID_SBAS = 1, UBX_GNSS_ID_GALILEO = 2, UBX_GNSS_ID_BEIDOU = 3, UBX_GNSS_ID_IMES = 4, UBX_GNSS_ID_QZSS = 5, UBX_GNSS_ID_GLONASS = 6, }; enum ubx_port_number { UBX_PORT_NUMBER_DDC = 0, UBX_PORT_NUMBER_UART, UBX_PORT_NUMBER_USB, UBX_PORT_NUMBER_SPI, }; enum ubx_dynamic_model { UBX_DYN_MODEL_PORTABLE = 0, UBX_DYN_MODEL_STATIONARY = 2, UBX_DYN_MODEL_PEDESTRIAN = 3, UBX_DYN_MODEL_AUTOMOTIVE = 4, UBX_DYN_MODEL_SEA = 5, UBX_DYN_MODEL_AIRBORNE1G = 6, UBX_DYN_MODEL_AIRBORNE2G = 7, UBX_DYN_MODEL_AIRBORNE4G = 8, UBX_DYN_MODEL_WRIST = 9, UBX_DYN_MODEL_BIKE = 10, }; enum ubx_fix_mode { UBX_FIX_P_2D = 1, UBX_FIX_P_3D, UBX_FIX_AUTO_FIX, }; enum ubx_utc_standard { UBX_UTC_AUTOUTC = 0, UBX_UTC_GPS = 3, UBX_UTC_GALILEO = 5, UBX_UTC_GLONASS, UBX_UTC_BEIDOU, UBX_UTC_NAVIC, }; enum ubx_msg_class { UBX_CLASS_NAV = 0x01, UBX_CLASS_RXM = 0x02, UBX_CLASS_INF = 0x04, UBX_CLASS_ACK = 0x05, UBX_CLASS_CFG = 0x06, UBX_CLASS_UPD = 0x09, UBX_CLASS_MON = 0x0A, UBX_CLASS_AID = 0x0B, UBX_CLASS_TIM = 0x0D, UBX_CLASS_ESF = 0x10, UBX_CLASS_MGA = 0x13, UBX_CLASS_LOG = 0x21, UBX_CLASS_SEC = 0x27, UBX_CLASS_HNR = 0x28, UBX_CLASS_NMEA = 0xF0, }; enum ubx_ack_message { UBX_ACK_ACK = 0x01, UBX_ACK_NAK = 0x00, }; enum ubx_config_message { UBX_CFG_ANT = 0x13, UBX_CFG_BATCH = 0x93, UBX_CFG_CFG = 0x09, UBX_CFG_DAT = 0x06, UBX_CFG_DGNSS = 0x70, UBX_CFG_DOSC = 0x61, UBX_CFG_ESFALG = 0x56, UBX_CFG_ESFAE = 0x4C, UBX_CFG_ESFGE = 0x4D, UBX_CFG_ESFWTE = 0x82, UBX_CFG_ESRCE = 0x60, UBX_CFG_GEOFENCE = 0x69, UBX_CFG_GNSS = 0x3E, UBX_CFG_HNR = 0x5C, UBX_CFG_INF = 0x02, UBX_CFG_ITFM = 0x39, UBX_CFG_LOGFILTER = 0x47, UBX_CFG_MSG = 0x01, UBX_CFG_NAV5 = 0x24, UBX_CFG_NAVX5 = 0x23, UBX_CFG_NMEA = 0x17, UBX_CFG_ODO = 0x1E, UBX_CFG_PM2 = 0x3B, UBX_CFG_PMS = 0x86, UBX_CFG_PRT = 0x00, UBX_CFG_PWR = 0x57, UBX_CFG_RATE = 0x08, UBX_CFG_RINV = 0x34, UBX_CFG_RST = 0x04, UBX_CFG_RXM = 0x11, UBX_CFG_SBAS = 0x16, UBX_CFG_SENIF = 0x88, UBX_CFG_SLAS = 0x8D, UBX_CFG_SMGR = 0x62, UBX_CFG_SPT = 0x64, UBX_CFG_TMODE2 = 0x3D, UBX_CFG_TMODE3 = 0x71, UBX_CFG_TP5 = 0x31, UBX_CFG_TXSLOT = 0x53, UBX_CFG_USB = 0x1B, }; enum ubx_information_message { UBX_INF_DEBUG = 0x04, UBX_INF_ERROR = 0x00, UBX_INF_NOTICE = 0x02, UBX_INF_TEST = 0x03, UBX_INF_WARNING = 0x01, }; enum ubx_logging_message { UBX_LOG_BATCH = 0x11, UBX_LOG_CREATE = 0x07, UBX_LOG_ERASE = 0x03, UBX_LOG_FINDTIME = 0x0E, UBX_LOG_INFO = 0x08, UBX_LOG_RETRIEVEBATCH = 0x10, UBX_LOG_RETRIEVEPOSEXTRA = 0x0f, UBX_LOG_RETRIEVEPOS = 0x0b, UBX_LOG_RETRIEVESTRING = 0x0d, UBX_LOG_RETRIEVE = 0x09, UBX_LOG_STRING = 0x04, }; enum ubx_multiple_gnss_assistance_message { UBX_MGA_ACK = 0x60, UBX_MGA_ANO = 0x20, UBX_MGA_BDS = 0x03, UBX_MGA_DBD = 0x80, UBX_MGA_FLASH = 0x21, UBX_MGA_GAL = 0x02, UBX_MGA_GLO = 0x06, UBX_MGA_GPS = 0x00, UBX_MGA_INI = 0x40, UBX_MGA_QZSS = 0x05, }; enum ubx_monitoring_message { UBX_MON_BATCH = 0x32, UBX_MON_GNSS = 0x28, UBX_MON_HW2 = 0x0B, UBX_MON_HW = 0x09, UBX_MON_IO = 0x02, UBX_MON_MSGPP = 0x06, UBX_MON_PATCH = 0x27, UBX_MON_RXBUF = 0x07, UBX_MON_RXR = 0x21, UBX_MON_SMGR = 0x2E, UBX_MON_SPT = 0x2F, UBX_MON_TXBUF = 0x08, UBX_MON_VER = 0x04, }; enum ubx_nagivation_results_message { UBX_NAV_AOPSTATUS = 0x60, UBX_NAV_ATT = 0x05, UBX_NAV_CLOCK = 0x22, UBX_NAV_COV = 0x36, UBX_NAV_DGPS = 0x31, UBX_NAV_DOP = 0x04, UBX_NAV_EELL = 0x3d, UBX_NAV_EOE = 0x61, UBX_NAV_GEOFENCE = 0x39, UBX_NAV_HPPOSECEF = 0x13, UBX_NAV_HPPOSLLH = 0x14, UBX_NAV_NMI = 0x28, UBX_NAV_ODO = 0x09, UBX_NAV_ORB = 0x34, UBX_NAV_POSECEF = 0x01, UBX_NAV_POSLLH = 0x02, UBX_NAV_PVT = 0x07, UBX_NAV_RELPOSNED = 0x3C, UBX_NAV_RESETODO = 0x10, UBX_NAV_SAT = 0x35, UBX_NAV_SBAS = 0x32, UBX_NAV_SLAS = 0x42, UBX_NAV_SOL = 0x06, UBX_NAV_STATUS = 0x03, UBX_NAV_SVINFO = 0x30, UBX_NAV_SVIN = 0x3B, UBX_NAV_TIMEBDS = 0x24, UBX_NAV_TIMEGAL = 0x25, UBX_NAV_TIMEGLO = 0x23, UBX_NAV_TIMEGPS = 0x20, UBX_NAV_TIMELS = 0x26, UBX_NAV_TIMEUTC = 0x21, UBX_NAV_VELECEF = 0x11, UBX_NAV_VELNED = 0x12, }; enum ubx_receiver_manager_message { UBX_RXM_IMES = 0x61, UBX_RXM_MEASX = 0x14, UBX_RXM_PMREQ = 0x41, UBX_RXM_RAWX = 0x15, UBX_RXM_RLM = 0x59, UBX_RXM_RTCM = 0x32, UBX_RXM_SFRBX = 0x13, }; enum ubx_timing_message { UBX_TIM_DOSC = 0x11, UBX_TIM_FCHG = 0x16, UBX_TIM_HOC = 0x17, UBX_TIM_SMEAS = 0x13, UBX_TIM_SVIN = 0x04, UBX_TIM_TM2 = 0x03, UBX_TIM_TOS = 0x12, UBX_TIM_TP = 0x01, UBX_TIM_VCOCAL = 0x15, UBX_TIM_VRFY = 0x06, }; enum ubx_nmea_message_id { UBX_NMEA_DTM = 0x0A, UBX_NMEA_GBQ = 0x44, UBX_NMEA_GBS = 0x09, UBX_NMEA_GGA = 0x00, UBX_NMEA_GLL = 0x01, UBX_NMEA_GLQ = 0x43, UBX_NMEA_GNQ = 0x42, UBX_NMEA_GNS = 0x0D, UBX_NMEA_GPQ = 0x40, UBX_NMEA_GRS = 0x06, UBX_NMEA_GSA = 0x02, UBX_NMEA_GST = 0x07, UBX_NMEA_GSV = 0x03, UBX_NMEA_RMC = 0x04, UBX_NMEA_THS = 0x0E, UBX_NMEA_TXT = 0x41, UBX_NMEA_VLW = 0x0F, UBX_NMEA_VTG = 0x05, UBX_NMEA_ZDA = 0x08, }; #endif /* ZEPHYR_U_BLOX_PROTOCOL_DEFINES_ */ ```
/content/code_sandbox/drivers/gnss/gnss_u_blox_protocol/gnss_u_blox_protocol_defines.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,629
```unknown menuconfig DISK_DRIVER_LOOPBACK bool "Loopback Disk" depends on FILE_SYSTEM help Enables mounting the contents of a file as a separate disk. if DISK_DRIVER_LOOPBACK config LOOPBACK_DISK_SECTOR_SIZE int "Loopback disk sector size" default 512 help Sets the sector size used for loopback-mounted disks. module = LOOPBACK_DISK module-str = loopback_disk source "subsys/logging/Kconfig.template.log_config" endif # DISK_DRIVER_LOOPBACK ```
/content/code_sandbox/drivers/disk/Kconfig.loopback
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
110
```unknown config DISK_DRIVER_RAM bool "RAM Disk" depends on DT_HAS_ZEPHYR_RAM_DISK_ENABLED default y help RAM buffer used to emulate storage disk. This option can be used to test the file system. if DISK_DRIVER_RAM module = RAMDISK module-str = ramdisk source "subsys/logging/Kconfig.template.log_config" endif # DISK_DRIVER_RAM ```
/content/code_sandbox/drivers/disk/Kconfig.ram
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90
```c /* * */ #include <sys/unistd.h> #include <zephyr/drivers/disk.h> #include <zephyr/fs/fs.h> #include <zephyr/fs/fs_interface.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/loopback_disk.h> LOG_MODULE_REGISTER(loopback_disk_access, CONFIG_LOOPBACK_DISK_LOG_LEVEL); #define LOOPBACK_SECTOR_SIZE CONFIG_LOOPBACK_DISK_SECTOR_SIZE static inline struct loopback_disk_access *get_ctx(struct disk_info *info) { return CONTAINER_OF(info, struct loopback_disk_access, info); } static int loopback_disk_access_status(struct disk_info *disk) { return DISK_STATUS_OK; } static int loopback_disk_access_read(struct disk_info *disk, uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { struct loopback_disk_access *ctx = get_ctx(disk); int ret = fs_seek(&ctx->file, start_sector * LOOPBACK_SECTOR_SIZE, FS_SEEK_SET); if (ret != 0) { LOG_ERR("Failed to seek backing file: %d", ret); return ret; } const size_t total_len = num_sector * LOOPBACK_SECTOR_SIZE; size_t len_left = total_len; while (len_left > 0) { ret = fs_read(&ctx->file, data_buf, len_left); if (ret < 0) { LOG_ERR("Failed to read from backing file: %d", ret); return ret; } if (ret == 0) { LOG_WRN("Tried to read past end of backing file"); return -EIO; } __ASSERT(ret <= len_left, "fs_read returned more than we asked for: %d instead of %ld", ret, len_left); len_left -= ret; } return 0; } static int loopback_disk_access_write(struct disk_info *disk, const uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { struct loopback_disk_access *ctx = get_ctx(disk); if (start_sector + num_sector > ctx->num_sectors) { LOG_WRN("Tried to write past end of backing file"); return -EIO; } int ret = fs_seek(&ctx->file, start_sector * LOOPBACK_SECTOR_SIZE, FS_SEEK_SET); if (ret != 0) { LOG_ERR("Failed to seek backing file: %d", ret); return ret; } const size_t total_len = num_sector * LOOPBACK_SECTOR_SIZE; size_t buf_offset = 0; while (buf_offset < total_len) { ret = fs_write(&ctx->file, &data_buf[buf_offset], total_len - buf_offset); if (ret < 0) { LOG_ERR("Failed to write to backing file: %d", ret); return ret; } if (ret == 0) { LOG_ERR("0-byte write to backing file"); return -EIO; } buf_offset += ret; } return 0; } static int loopback_disk_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buff) { struct loopback_disk_access *ctx = get_ctx(disk); switch (cmd) { case DISK_IOCTL_GET_SECTOR_COUNT: { *(uint32_t *)buff = ctx->num_sectors; return 0; } case DISK_IOCTL_GET_SECTOR_SIZE: { *(uint32_t *)buff = LOOPBACK_SECTOR_SIZE; return 0; } case DISK_IOCTL_CTRL_DEINIT: case DISK_IOCTL_CTRL_SYNC: return fs_sync(&ctx->file); case DISK_IOCTL_CTRL_INIT: return 0; default: return -ENOTSUP; } } static int loopback_disk_access_init(struct disk_info *disk) { return loopback_disk_access_ioctl(disk, DISK_IOCTL_CTRL_INIT, NULL); } static const struct disk_operations loopback_disk_operations = { .init = loopback_disk_access_init, .status = loopback_disk_access_status, .read = loopback_disk_access_read, .write = loopback_disk_access_write, .ioctl = loopback_disk_access_ioctl, }; int loopback_disk_access_register(struct loopback_disk_access *ctx, const char *file_path, const char *disk_access_name) { ctx->file_path = file_path; ctx->info.name = disk_access_name; ctx->info.ops = &loopback_disk_operations; struct fs_dirent entry; int ret = fs_stat(ctx->file_path, &entry); if (ret != 0) { LOG_ERR("Failed to stat backing file: %d", ret); return ret; } if (entry.size % LOOPBACK_SECTOR_SIZE != 0) { LOG_WRN("Backing file is not a multiple of sector size (%d bytes), " "rounding down: %ld bytes", LOOPBACK_SECTOR_SIZE, entry.size); } ctx->num_sectors = entry.size / LOOPBACK_SECTOR_SIZE; fs_file_t_init(&ctx->file); ret = fs_open(&ctx->file, ctx->file_path, FS_O_READ | FS_O_WRITE); if (ret != 0) { LOG_ERR("Failed to open backing file: %d", ret); return ret; } ret = disk_access_register(&ctx->info); if (ret != 0) { LOG_ERR("Failed to register disk access: %d", ret); return ret; } return 0; } int loopback_disk_access_unregister(struct loopback_disk_access *ctx) { int ret; ret = disk_access_unregister(&ctx->info); if (ret != 0) { LOG_ERR("Failed to unregister disk access: %d", ret); return ret; } ctx->info.name = NULL; ctx->info.ops = NULL; ret = fs_close(&ctx->file); if (ret != 0) { LOG_ERR("Failed to close backing file: %d", ret); return ret; } return 0; } ```
/content/code_sandbox/drivers/disk/loopback_disk.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,319