text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```unknown
config CLOCK_CONTROL_RENESAS_RA_CGC
bool "RA CGC driver"
default y
depends on SOC_FAMILY_RENESAS_RA
depends on HAS_RENESAS_RA_FSP
help
Enable support for Renesas RA CGC driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.renesas_ra_cgc | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 57 |
```unknown
# Fixed clock control driver config
config CLOCK_CONTROL_FIXED_RATE_CLOCK
bool "Fixed Clock Clock Control"
default n
help
Enable driver for devicetree defined fixed clocks.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.fixed | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 39 |
```c
/*
*
*/
#include <zephyr/drivers/sensor.h>
#include <zephyr/drivers/clock_control.h>
#include "nrf_clock_calibration.h"
#include <zephyr/drivers/clock_control/nrf_clock_control.h>
#include <nrfx_clock.h>
#include <zephyr/logging/log.h>
#include <stdlib.h>
LOG_MODULE_DECLARE(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
/**
* Terms:
* - calibration - overall process of LFRC clock calibration which is performed
* periodically, calibration may include temperature monitoring, hf XTAL
* starting and stopping.
* - cycle - all calibration phases (waiting, temperature monitoring,
* calibration).
* - process - calibration process which may consists of hf XTAL clock
* requesting, performing hw calibration and releasing hf clock.
* - hw_cal - calibration action performed by the hardware.
*
* Those terms are later on used in function names.
*
* In order to ensure that low frequency clock is not released when calibration
* is ongoing, it is requested by the calibration process and released when
* calibration is done.
*/
static atomic_t cal_process_in_progress;
static uint8_t calib_skip_cnt; /* Counting down skipped calibrations. */
static volatile int total_cnt; /* Total number of calibrations. */
static volatile int total_skips_cnt; /* Total number of skipped calibrations. */
static void cal_hf_callback(struct onoff_manager *mgr,
struct onoff_client *cli,
uint32_t state, int res);
static void cal_lf_callback(struct onoff_manager *mgr,
struct onoff_client *cli,
uint32_t state, int res);
static struct onoff_client client;
static struct onoff_manager *mgrs;
/* Temperature sensor is only needed if
* CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP > 0, since a value of 0
* indicates performing calibration periodically regardless of temperature
* change.
*/
#define USE_TEMP_SENSOR \
(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP > 0)
#if USE_TEMP_SENSOR
static const struct device *const temp_sensor =
DEVICE_DT_GET_OR_NULL(DT_INST(0, nordic_nrf_temp));
static void measure_temperature(struct k_work *work);
static K_WORK_DEFINE(temp_measure_work, measure_temperature);
static int16_t prev_temperature; /* Previous temperature measurement. */
#endif /* USE_TEMP_SENSOR */
static void timeout_handler(struct k_timer *timer);
static K_TIMER_DEFINE(backoff_timer, timeout_handler, NULL);
static void clk_request(struct onoff_manager *mgr, struct onoff_client *cli,
onoff_client_callback callback)
{
int err;
sys_notify_init_callback(&cli->notify, callback);
err = onoff_request(mgr, cli);
__ASSERT_NO_MSG(err >= 0);
}
static void clk_release(struct onoff_manager *mgr)
{
int err;
err = onoff_release(mgr);
__ASSERT_NO_MSG(err >= 0);
}
static void hf_request(void)
{
clk_request(&mgrs[CLOCK_CONTROL_NRF_TYPE_HFCLK], &client, cal_hf_callback);
}
static void lf_request(void)
{
clk_request(&mgrs[CLOCK_CONTROL_NRF_TYPE_LFCLK], &client, cal_lf_callback);
}
static void hf_release(void)
{
clk_release(&mgrs[CLOCK_CONTROL_NRF_TYPE_HFCLK]);
}
static void lf_release(void)
{
clk_release(&mgrs[CLOCK_CONTROL_NRF_TYPE_LFCLK]);
}
static void cal_lf_callback(struct onoff_manager *mgr,
struct onoff_client *cli,
uint32_t state, int res)
{
hf_request();
}
/* Start actual HW calibration assuming that HFCLK XTAL is on. */
static void start_hw_cal(void)
{
nrfx_clock_calibration_start();
calib_skip_cnt = CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP;
}
/* Start cycle by starting backoff timer and releasing HFCLK XTAL. */
static void start_cycle(void)
{
k_timer_start(&backoff_timer,
K_MSEC(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_PERIOD),
K_NO_WAIT);
hf_release();
if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_LF_ALWAYS_ON)) {
lf_release();
}
cal_process_in_progress = 0;
}
static void start_cal_process(void)
{
if (atomic_cas(&cal_process_in_progress, 0, 1) == false) {
return;
}
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_LF_ALWAYS_ON)) {
hf_request();
} else {
/* LF clock is probably running but it is requested to ensure
* that it is not released while calibration process in ongoing.
* If system releases the clock during calibration process it
* will be released at the end of calibration process and
* stopped in consequence.
*/
lf_request();
}
}
static void timeout_handler(struct k_timer *timer)
{
start_cal_process();
}
/* Called when HFCLK XTAL is on. Schedules temperature measurement or triggers
* calibration.
*/
static void cal_hf_callback(struct onoff_manager *mgr,
struct onoff_client *cli,
uint32_t state, int res)
{
#if USE_TEMP_SENSOR
if (!device_is_ready(temp_sensor)) {
start_hw_cal();
} else {
k_work_submit(&temp_measure_work);
}
#else
start_hw_cal();
#endif /* USE_TEMP_SENSOR */
}
#if USE_TEMP_SENSOR
/* Convert sensor value to 0.25'C units. */
static inline int16_t sensor_value_to_temp_unit(struct sensor_value *val)
{
return (int16_t)(4 * val->val1 + val->val2 / 250000);
}
/* Function reads from temperature sensor and converts to 0.25'C units. */
static int get_temperature(int16_t *tvp)
{
struct sensor_value sensor_val;
int rc = sensor_sample_fetch(temp_sensor);
if (rc == 0) {
rc = sensor_channel_get(temp_sensor, SENSOR_CHAN_DIE_TEMP,
&sensor_val);
}
if (rc == 0) {
*tvp = sensor_value_to_temp_unit(&sensor_val);
}
return rc;
}
/* Function determines if calibration should be performed based on temperature
* measurement. Function is called from system work queue context. It is
* reading temperature from TEMP sensor and compares with last measurement.
*/
static void measure_temperature(struct k_work *work)
{
int16_t temperature = 0;
int16_t diff = 0;
bool started = false;
int rc;
rc = get_temperature(&temperature);
if (rc != 0) {
/* Temperature read failed, force calibration. */
calib_skip_cnt = 0;
} else {
diff = abs(temperature - prev_temperature);
}
if ((calib_skip_cnt == 0) ||
(diff >= CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF)) {
prev_temperature = temperature;
started = true;
start_hw_cal();
} else {
calib_skip_cnt--;
total_skips_cnt++;
start_cycle();
}
LOG_DBG("Calibration %s. Temperature diff: %d (in 0.25'C units).",
started ? "started" : "skipped", diff);
}
#endif /* USE_TEMP_SENSOR */
void z_nrf_clock_calibration_init(struct onoff_manager *onoff_mgrs)
{
mgrs = onoff_mgrs;
total_cnt = 0;
total_skips_cnt = 0;
}
static void start_unconditional_cal_process(void)
{
calib_skip_cnt = 0;
start_cal_process();
}
void z_nrf_clock_calibration_force_start(void)
{
/* if it's already in progress that is good enough. */
if (cal_process_in_progress) {
return;
}
start_unconditional_cal_process();
}
void z_nrf_clock_calibration_lfclk_started(void)
{
start_unconditional_cal_process();
}
void z_nrf_clock_calibration_lfclk_stopped(void)
{
k_timer_stop(&backoff_timer);
LOG_DBG("Calibration stopped");
}
void z_nrf_clock_calibration_done_handler(void)
{
total_cnt++;
LOG_DBG("Calibration done.");
start_cycle();
}
int z_nrf_clock_calibration_count(void)
{
if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_DEBUG)) {
return -1;
}
return total_cnt;
}
int z_nrf_clock_calibration_skips_count(void)
{
if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_DEBUG)) {
return -1;
}
return total_skips_cnt;
}
``` | /content/code_sandbox/drivers/clock_control/nrf_clock_calibration.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,805 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_imx_ccm
#include <errno.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/imx_ccm.h>
#include <fsl_clock.h>
#if defined(CONFIG_SOC_MIMX8QM6_ADSP) || defined(CONFIG_SOC_MIMX8QX6_ADSP)
#include <main/ipc.h>
#endif
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control);
#ifdef CONFIG_SPI_MCUX_LPSPI
static const clock_name_t lpspi_clocks[] = {
kCLOCK_Usb1PllPfd1Clk,
kCLOCK_Usb1PllPfd0Clk,
kCLOCK_SysPllClk,
kCLOCK_SysPllPfd2Clk,
};
#endif
#ifdef CONFIG_UART_MCUX_IUART
static const clock_root_control_t uart_clk_root[] = {
kCLOCK_RootUart1,
kCLOCK_RootUart2,
kCLOCK_RootUart3,
kCLOCK_RootUart4,
};
static const clock_ip_name_t uart_clocks[] = {
kCLOCK_Uart1,
kCLOCK_Uart2,
kCLOCK_Uart3,
kCLOCK_Uart4,
};
#endif
#ifdef CONFIG_UART_MCUX_LPUART
#ifdef CONFIG_SOC_MIMX8QM6_ADSP
static const clock_ip_name_t lpuart_clocks[] = {
kCLOCK_DMA_Lpuart0,
kCLOCK_DMA_Lpuart1,
kCLOCK_DMA_Lpuart2,
kCLOCK_DMA_Lpuart3,
kCLOCK_DMA_Lpuart4,
};
static const uint32_t lpuart_rate = MHZ(80);
#endif /* CONFIG_SOC_MIMX8QM6_ADSP */
#ifdef CONFIG_SOC_MIMX8QX6_ADSP
static const clock_ip_name_t lpuart_clocks[] = {
kCLOCK_DMA_Lpuart0,
kCLOCK_DMA_Lpuart1,
kCLOCK_DMA_Lpuart2,
kCLOCK_DMA_Lpuart3,
};
static const uint32_t lpuart_rate = MHZ(80);
#endif /* CONFIG_SOC_MIMX8QX6_ADSP */
#endif /* CONFIG_UART_MCUX_LPUART */
static int mcux_ccm_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
uint32_t clock_name = (uintptr_t)sub_system;
uint32_t instance = clock_name & IMX_CCM_INSTANCE_MASK;
switch (clock_name) {
#ifdef CONFIG_UART_MCUX_IUART
case IMX_CCM_UART1_CLK:
case IMX_CCM_UART2_CLK:
case IMX_CCM_UART3_CLK:
case IMX_CCM_UART4_CLK:
CLOCK_EnableClock(uart_clocks[instance]);
return 0;
#endif
#if defined(CONFIG_UART_MCUX_LPUART) && defined(CONFIG_SOC_MIMX8QM6_ADSP)
case IMX_CCM_LPUART1_CLK:
case IMX_CCM_LPUART2_CLK:
case IMX_CCM_LPUART3_CLK:
case IMX_CCM_LPUART4_CLK:
case IMX_CCM_LPUART5_CLK:
CLOCK_EnableClock(lpuart_clocks[instance]);
return 0;
#endif
#if defined(CONFIG_UART_MCUX_LPUART) && defined(CONFIG_SOC_MIMX8QX6_ADSP)
case IMX_CCM_LPUART1_CLK:
case IMX_CCM_LPUART2_CLK:
case IMX_CCM_LPUART3_CLK:
case IMX_CCM_LPUART4_CLK:
CLOCK_EnableClock(lpuart_clocks[instance]);
return 0;
#endif
#if defined(CONFIG_ETH_NXP_ENET)
#ifdef CONFIG_SOC_SERIES_IMX8M
#define ENET_CLOCK kCLOCK_Enet1
#else
#define ENET_CLOCK kCLOCK_Enet
#endif
case IMX_CCM_ENET_CLK:
CLOCK_EnableClock(ENET_CLOCK);
return 0;
#endif
default:
(void)instance;
return 0;
}
}
static int mcux_ccm_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
uint32_t clock_name = (uintptr_t)sub_system;
uint32_t instance = clock_name & IMX_CCM_INSTANCE_MASK;
switch (clock_name) {
#ifdef CONFIG_UART_MCUX_IUART
case IMX_CCM_UART1_CLK:
case IMX_CCM_UART2_CLK:
case IMX_CCM_UART3_CLK:
case IMX_CCM_UART4_CLK:
CLOCK_DisableClock(uart_clocks[instance]);
return 0;
#endif
default:
(void)instance;
return 0;
}
}
static int mcux_ccm_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
uint32_t clock_name = (uintptr_t)sub_system;
switch (clock_name) {
#ifdef CONFIG_I2C_MCUX_LPI2C
case IMX_CCM_LPI2C_CLK:
if (CLOCK_GetMux(kCLOCK_Lpi2cMux) == 0) {
*rate = CLOCK_GetPllFreq(kCLOCK_PllUsb1) / 8
/ (CLOCK_GetDiv(kCLOCK_Lpi2cDiv) + 1);
} else {
*rate = CLOCK_GetOscFreq()
/ (CLOCK_GetDiv(kCLOCK_Lpi2cDiv) + 1);
}
break;
#endif
#ifdef CONFIG_SPI_MCUX_LPSPI
case IMX_CCM_LPSPI_CLK:
{
uint32_t lpspi_mux = CLOCK_GetMux(kCLOCK_LpspiMux);
clock_name_t lpspi_clock = lpspi_clocks[lpspi_mux];
*rate = CLOCK_GetFreq(lpspi_clock)
/ (CLOCK_GetDiv(kCLOCK_LpspiDiv) + 1);
break;
}
#endif
#ifdef CONFIG_UART_MCUX_LPUART
#if defined(CONFIG_SOC_MIMX8QM6_ADSP)
case IMX_CCM_LPUART1_CLK:
case IMX_CCM_LPUART2_CLK:
case IMX_CCM_LPUART3_CLK:
case IMX_CCM_LPUART4_CLK:
case IMX_CCM_LPUART5_CLK:
uint32_t instance = clock_name & IMX_CCM_INSTANCE_MASK;
CLOCK_SetIpFreq(lpuart_clocks[instance], lpuart_rate);
*rate = CLOCK_GetIpFreq(lpuart_clocks[instance]);
break;
#elif defined(CONFIG_SOC_MIMX8QX6_ADSP)
case IMX_CCM_LPUART1_CLK:
case IMX_CCM_LPUART2_CLK:
case IMX_CCM_LPUART3_CLK:
case IMX_CCM_LPUART4_CLK:
uint32_t instance = clock_name & IMX_CCM_INSTANCE_MASK;
CLOCK_SetIpFreq(lpuart_clocks[instance], lpuart_rate);
*rate = CLOCK_GetIpFreq(lpuart_clocks[instance]);
break;
#else
case IMX_CCM_LPUART_CLK:
if (CLOCK_GetMux(kCLOCK_UartMux) == 0) {
*rate = CLOCK_GetPllFreq(kCLOCK_PllUsb1) / 6
/ (CLOCK_GetDiv(kCLOCK_UartDiv) + 1);
} else {
*rate = CLOCK_GetOscFreq()
/ (CLOCK_GetDiv(kCLOCK_UartDiv) + 1);
}
break;
#endif
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(usdhc1), okay) && CONFIG_IMX_USDHC
case IMX_CCM_USDHC1_CLK:
*rate = CLOCK_GetSysPfdFreq(kCLOCK_Pfd0) /
(CLOCK_GetDiv(kCLOCK_Usdhc1Div) + 1U);
break;
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(usdhc2), okay) && CONFIG_IMX_USDHC
case IMX_CCM_USDHC2_CLK:
*rate = CLOCK_GetSysPfdFreq(kCLOCK_Pfd0) /
(CLOCK_GetDiv(kCLOCK_Usdhc2Div) + 1U);
break;
#endif
#ifdef CONFIG_DMA_MCUX_EDMA
case IMX_CCM_EDMA_CLK:
*rate = CLOCK_GetIpgFreq();
break;
#endif
#ifdef CONFIG_PWM_MCUX
case IMX_CCM_PWM_CLK:
*rate = CLOCK_GetIpgFreq();
break;
#endif
#ifdef CONFIG_ETH_NXP_ENET
case IMX_CCM_ENET_CLK:
#ifdef CONFIG_SOC_SERIES_IMX8M
*rate = CLOCK_GetFreq(kCLOCK_EnetIpgClk);
#else
*rate = CLOCK_GetIpgFreq();
#endif
#endif
break;
#ifdef CONFIG_PTP_CLOCK_NXP_ENET
case IMX_CCM_ENET_PLL:
*rate = CLOCK_GetPllFreq(kCLOCK_PllEnet);
break;
#endif
#ifdef CONFIG_UART_MCUX_IUART
case IMX_CCM_UART1_CLK:
case IMX_CCM_UART2_CLK:
case IMX_CCM_UART3_CLK:
case IMX_CCM_UART4_CLK:
{
uint32_t instance = clock_name & IMX_CCM_INSTANCE_MASK;
clock_root_control_t clk_root = uart_clk_root[instance];
uint32_t uart_mux = CLOCK_GetRootMux(clk_root);
if (uart_mux == 0) {
*rate = MHZ(24);
} else if (uart_mux == 1) {
*rate = CLOCK_GetPllFreq(kCLOCK_SystemPll1Ctrl) /
(CLOCK_GetRootPreDivider(clk_root)) /
(CLOCK_GetRootPostDivider(clk_root)) /
10;
}
} break;
#endif
#ifdef CONFIG_CAN_MCUX_FLEXCAN
case IMX_CCM_CAN_CLK:
{
uint32_t can_mux = CLOCK_GetMux(kCLOCK_CanMux);
if (can_mux == 0) {
*rate = CLOCK_GetPllFreq(kCLOCK_PllUsb1) / 8
/ (CLOCK_GetDiv(kCLOCK_CanDiv) + 1);
} else if (can_mux == 1) {
*rate = CLOCK_GetOscFreq()
/ (CLOCK_GetDiv(kCLOCK_CanDiv) + 1);
} else {
*rate = CLOCK_GetPllFreq(kCLOCK_PllUsb1) / 6
/ (CLOCK_GetDiv(kCLOCK_CanDiv) + 1);
}
} break;
#endif
#ifdef CONFIG_COUNTER_MCUX_GPT
case IMX_CCM_GPT_CLK:
*rate = CLOCK_GetFreq(kCLOCK_PerClk);
break;
#ifdef CONFIG_SOC_SERIES_IMX8M
case IMX_CCM_GPT_IPG_CLK:
{
uint32_t mux = CLOCK_GetRootMux(kCLOCK_RootGpt1);
if (mux == 0)
*rate = OSC24M_CLK_FREQ;
else
*rate = 0;
} break;
#endif
#endif
#ifdef CONFIG_COUNTER_MCUX_QTMR
case IMX_CCM_QTMR_CLK:
*rate = CLOCK_GetIpgFreq();
break;
#endif
#ifdef CONFIG_I2S_MCUX_SAI
case IMX_CCM_SAI1_CLK:
*rate = CLOCK_GetFreq(kCLOCK_AudioPllClk)
/ (CLOCK_GetDiv(kCLOCK_Sai1PreDiv) + 1)
/ (CLOCK_GetDiv(kCLOCK_Sai1Div) + 1);
break;
case IMX_CCM_SAI2_CLK:
*rate = CLOCK_GetFreq(kCLOCK_AudioPllClk)
/ (CLOCK_GetDiv(kCLOCK_Sai2PreDiv) + 1)
/ (CLOCK_GetDiv(kCLOCK_Sai2Div) + 1);
break;
case IMX_CCM_SAI3_CLK:
*rate = CLOCK_GetFreq(kCLOCK_AudioPllClk)
/ (CLOCK_GetDiv(kCLOCK_Sai3PreDiv) + 1)
/ (CLOCK_GetDiv(kCLOCK_Sai3Div) + 1);
break;
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(flexspi), okay)
case IMX_CCM_FLEXSPI_CLK:
*rate = CLOCK_GetClockRootFreq(kCLOCK_FlexspiClkRoot);
break;
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(flexspi2), okay)
case IMX_CCM_FLEXSPI2_CLK:
*rate = CLOCK_GetClockRootFreq(kCLOCK_Flexspi2ClkRoot);
break;
#endif
#ifdef CONFIG_COUNTER_NXP_PIT
case IMX_CCM_PIT_CLK:
*rate = CLOCK_GetFreq(kCLOCK_PerClk);
break;
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(flexio1), okay) && CONFIG_MCUX_FLEXIO
case IMX_CCM_FLEXIO1_CLK:
{
uint32_t flexio_mux = CLOCK_GetMux(kCLOCK_Flexio1Mux);
uint32_t source_clk_freq = 0;
if (flexio_mux == 0) {
source_clk_freq = CLOCK_GetPllFreq(kCLOCK_PllAudio);
} else if (flexio_mux == 1) {
source_clk_freq = CLOCK_GetUsb1PfdFreq(kCLOCK_Pfd2);
#ifdef PLL_VIDEO_OFFSET /* fsl_clock.h */
} else if (flexio_mux == 2) {
source_clk_freq = CLOCK_GetPllFreq(kCLOCK_PllVideo);
#endif
} else {
source_clk_freq = CLOCK_GetPllFreq(kCLOCK_PllUsb1);
}
*rate = source_clk_freq / (CLOCK_GetDiv(kCLOCK_Flexio1PreDiv) + 1)
/ (CLOCK_GetDiv(kCLOCK_Flexio1Div) + 1);
} break;
#endif
#if (DT_NODE_HAS_STATUS(DT_NODELABEL(flexio2), okay) \
|| DT_NODE_HAS_STATUS(DT_NODELABEL(flexio3), okay)) && CONFIG_MCUX_FLEXIO
case IMX_CCM_FLEXIO2_3_CLK:
{
uint32_t flexio_mux = CLOCK_GetMux(kCLOCK_Flexio2Mux);
uint32_t source_clk_freq = 0;
if (flexio_mux == 0) {
source_clk_freq = CLOCK_GetPllFreq(kCLOCK_PllAudio);
} else if (flexio_mux == 1) {
source_clk_freq = CLOCK_GetUsb1PfdFreq(kCLOCK_Pfd2);
#ifdef PLL_VIDEO_OFFSET /* fsl_clock.h */
} else if (flexio_mux == 2) {
source_clk_freq = CLOCK_GetPllFreq(kCLOCK_PllVideo);
#endif
} else {
source_clk_freq = CLOCK_GetPllFreq(kCLOCK_PllUsb1);
}
*rate = source_clk_freq / (CLOCK_GetDiv(kCLOCK_Flexio2PreDiv) + 1)
/ (CLOCK_GetDiv(kCLOCK_Flexio2Div) + 1);
} break;
#endif
#ifdef CONFIG_SPI_MCUX_ECSPI
case IMX_CCM_ECSPI1_CLK:
*rate = CLOCK_GetPllFreq(kCLOCK_SystemPll1Ctrl) /
(CLOCK_GetRootPreDivider(kCLOCK_RootEcspi1)) /
(CLOCK_GetRootPostDivider(kCLOCK_RootEcspi1));
break;
case IMX_CCM_ECSPI2_CLK:
*rate = CLOCK_GetPllFreq(kCLOCK_SystemPll1Ctrl) /
(CLOCK_GetRootPreDivider(kCLOCK_RootEcspi2)) /
(CLOCK_GetRootPostDivider(kCLOCK_RootEcspi2));
break;
case IMX_CCM_ECSPI3_CLK:
*rate = CLOCK_GetPllFreq(kCLOCK_SystemPll1Ctrl) /
(CLOCK_GetRootPreDivider(kCLOCK_RootEcspi3)) /
(CLOCK_GetRootPostDivider(kCLOCK_RootEcspi3));
break;
#endif /* CONFIG_SPI_MCUX_ECSPI */
}
return 0;
}
/*
* Since this function is used to reclock the FlexSPI when running in
* XIP, it must be located in RAM when MEMC Flexspi driver is enabled.
*/
#ifdef CONFIG_MEMC_MCUX_FLEXSPI
#define CCM_SET_FUNC_ATTR __ramfunc
#else
#define CCM_SET_FUNC_ATTR
#endif
static int CCM_SET_FUNC_ATTR mcux_ccm_set_subsys_rate(const struct device *dev,
clock_control_subsys_t subsys,
clock_control_subsys_rate_t rate)
{
uint32_t clock_name = (uintptr_t)subsys;
uint32_t clock_rate = (uintptr_t)rate;
switch (clock_name) {
case IMX_CCM_FLEXSPI_CLK:
__fallthrough;
case IMX_CCM_FLEXSPI2_CLK:
#if defined(CONFIG_SOC_SERIES_IMXRT10XX) && defined(CONFIG_MEMC_MCUX_FLEXSPI)
/* The SOC is using the FlexSPI for XIP. Therefore,
* the FlexSPI itself must be managed within the function,
* which is SOC specific.
*/
return flexspi_clock_set_freq(clock_name, clock_rate);
#endif
default:
/* Silence unused variable warning */
ARG_UNUSED(clock_rate);
return -ENOTSUP;
}
}
static const struct clock_control_driver_api mcux_ccm_driver_api = {
.on = mcux_ccm_on,
.off = mcux_ccm_off,
.get_rate = mcux_ccm_get_subsys_rate,
.set_rate = mcux_ccm_set_subsys_rate,
};
static int mcux_ccm_init(const struct device *dev)
{
#if defined(CONFIG_SOC_MIMX8QM6_ADSP) || defined(CONFIG_SOC_MIMX8QX6_ADSP)
sc_ipc_t ipc_handle;
int ret;
ret = sc_ipc_open(&ipc_handle, DT_REG_ADDR(DT_NODELABEL(scu_mu)));
if (ret != SC_ERR_NONE) {
return -ENODEV;
}
CLOCK_Init(ipc_handle);
#endif
return 0;
}
DEVICE_DT_INST_DEFINE(0, mcux_ccm_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&mcux_ccm_driver_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_mcux_ccm.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,174 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_lpc_syscon
#include <errno.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/mcux_lpc_syscon_clock.h>
#include <soc.h>
#include <fsl_clock.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control);
static int mcux_lpc_syscon_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
#if defined(CONFIG_CAN_MCUX_MCAN)
if ((uint32_t)sub_system == MCUX_MCAN_CLK) {
CLOCK_EnableClock(kCLOCK_Mcan);
}
#endif /* defined(CONFIG_CAN_MCUX_MCAN) */
#if defined(CONFIG_COUNTER_NXP_MRT)
if ((uint32_t)sub_system == MCUX_MRT_CLK) {
#if defined(CONFIG_SOC_FAMILY_LPC) || defined(CONFIG_SOC_SERIES_RW6XX)
CLOCK_EnableClock(kCLOCK_Mrt);
#elif defined(CONFIG_SOC_FAMILY_NXP_IMXRT)
CLOCK_EnableClock(kCLOCK_Mrt0);
#endif
}
#if defined(CONFIG_SOC_SERIES_RW6XX)
if ((uint32_t)sub_system == MCUX_FREEMRT_CLK) {
CLOCK_EnableClock(kCLOCK_FreeMrt);
}
#endif
#endif /* defined(CONFIG_COUNTER_NXP_MRT) */
#if defined(CONFIG_MIPI_DBI_NXP_LCDIC)
if ((uint32_t)sub_system == MCUX_LCDIC_CLK) {
CLOCK_EnableClock(kCLOCK_Lcdic);
}
#endif
#if defined(CONFIG_PINCTRL_NXP_KINETIS)
switch ((uint32_t)sub_system) {
case MCUX_PORT0_CLK:
CLOCK_EnableClock(kCLOCK_Port0);
break;
case MCUX_PORT1_CLK:
CLOCK_EnableClock(kCLOCK_Port1);
break;
case MCUX_PORT2_CLK:
CLOCK_EnableClock(kCLOCK_Port2);
break;
case MCUX_PORT3_CLK:
CLOCK_EnableClock(kCLOCK_Port3);
break;
case MCUX_PORT4_CLK:
CLOCK_EnableClock(kCLOCK_Port4);
break;
default:
break;
}
#endif /* defined(CONFIG_PINCTRL_NXP_KINETIS) */
#ifdef CONFIG_ETH_NXP_ENET_QOS
if ((uint32_t)sub_system == MCUX_ENET_QOS_CLK) {
CLOCK_EnableClock(kCLOCK_Enet);
}
#endif
#if defined(CONFIG_CAN_MCUX_FLEXCAN)
switch ((uint32_t)sub_system) {
case MCUX_FLEXCAN0_CLK:
CLOCK_EnableClock(kCLOCK_Flexcan0);
break;
case MCUX_FLEXCAN1_CLK:
CLOCK_EnableClock(kCLOCK_Flexcan1);
break;
default:
break;
}
#endif /* defined(CONFIG_CAN_MCUX_MCAN) */
#ifdef CONFIG_ETH_NXP_ENET
if ((uint32_t)sub_system == MCUX_ENET_CLK) {
#ifdef CONFIG_SOC_SERIES_RW6XX
CLOCK_EnableClock(kCLOCK_TddrMciEnetClk);
CLOCK_EnableClock(kCLOCK_EnetIpg);
CLOCK_EnableClock(kCLOCK_EnetIpgS);
#endif
}
#endif
return 0;
}
static int mcux_lpc_syscon_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
return 0;
}
static int mcux_lpc_syscon_clock_control_get_subsys_rate(
const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
uint32_t clock_name = (uint32_t) sub_system;
switch (clock_name) {
#if defined(CONFIG_I2C_MCUX_FLEXCOMM) || \
defined(CONFIG_SPI_MCUX_FLEXCOMM) || \
defined(CONFIG_UART_MCUX_FLEXCOMM)
case MCUX_FLEXCOMM0_CLK:
*rate = CLOCK_GetFlexCommClkFreq(0);
break;
case MCUX_FLEXCOMM1_CLK:
*rate = CLOCK_GetFlexCommClkFreq(1);
break;
case MCUX_FLEXCOMM2_CLK:
*rate = CLOCK_GetFlexCommClkFreq(2);
break;
case MCUX_FLEXCOMM3_CLK:
*rate = CLOCK_GetFlexCommClkFreq(3);
break;
case MCUX_FLEXCOMM4_CLK:
*rate = CLOCK_GetFlexCommClkFreq(4);
break;
case MCUX_FLEXCOMM5_CLK:
*rate = CLOCK_GetFlexCommClkFreq(5);
break;
case MCUX_FLEXCOMM6_CLK:
*rate = CLOCK_GetFlexCommClkFreq(6);
break;
case MCUX_FLEXCOMM7_CLK:
*rate = CLOCK_GetFlexCommClkFreq(7);
break;
case MCUX_FLEXCOMM8_CLK:
*rate = CLOCK_GetFlexCommClkFreq(8);
break;
case MCUX_FLEXCOMM9_CLK:
*rate = CLOCK_GetFlexCommClkFreq(9);
break;
case MCUX_FLEXCOMM10_CLK:
*rate = CLOCK_GetFlexCommClkFreq(10);
break;
case MCUX_FLEXCOMM11_CLK:
*rate = CLOCK_GetFlexCommClkFreq(11);
break;
case MCUX_FLEXCOMM12_CLK:
*rate = CLOCK_GetFlexCommClkFreq(12);
break;
case MCUX_FLEXCOMM13_CLK:
*rate = CLOCK_GetFlexCommClkFreq(13);
break;
case MCUX_PMIC_I2C_CLK:
*rate = CLOCK_GetFlexCommClkFreq(15);
break;
case MCUX_HS_SPI_CLK:
#if defined(SYSCON_HSLSPICLKSEL_SEL_MASK)
*rate = CLOCK_GetHsLspiClkFreq();
#else
*rate = CLOCK_GetFlexCommClkFreq(14);
#endif
break;
case MCUX_HS_SPI1_CLK:
*rate = CLOCK_GetFlexCommClkFreq(16);
break;
#elif defined(CONFIG_NXP_LP_FLEXCOMM)
case MCUX_FLEXCOMM0_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(0);
break;
case MCUX_FLEXCOMM1_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(1);
break;
case MCUX_FLEXCOMM2_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(2);
break;
case MCUX_FLEXCOMM3_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(3);
break;
case MCUX_FLEXCOMM4_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(4);
break;
case MCUX_FLEXCOMM5_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(5);
break;
case MCUX_FLEXCOMM6_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(6);
break;
case MCUX_FLEXCOMM7_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(7);
break;
case MCUX_FLEXCOMM8_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(8);
break;
case MCUX_FLEXCOMM9_CLK:
*rate = CLOCK_GetLPFlexCommClkFreq(9);
break;
#endif
#if (defined(FSL_FEATURE_SOC_USDHC_COUNT) && FSL_FEATURE_SOC_USDHC_COUNT)
#if CONFIG_SOC_FAMILY_NXP_MCX
case MCUX_USDHC1_CLK:
*rate = CLOCK_GetUsdhcClkFreq();
break;
#else
case MCUX_USDHC1_CLK:
*rate = CLOCK_GetSdioClkFreq(0);
break;
case MCUX_USDHC2_CLK:
*rate = CLOCK_GetSdioClkFreq(1);
break;
#endif
#endif
#if (defined(FSL_FEATURE_SOC_SDIF_COUNT) && \
(FSL_FEATURE_SOC_SDIF_COUNT)) && \
CONFIG_MCUX_SDIF
case MCUX_SDIF_CLK:
*rate = CLOCK_GetSdioClkFreq();
break;
#endif
#if defined(CONFIG_CAN_MCUX_MCAN)
case MCUX_MCAN_CLK:
*rate = CLOCK_GetMCanClkFreq();
break;
#endif /* defined(CONFIG_CAN_MCUX_MCAN) */
#if defined(CONFIG_COUNTER_MCUX_CTIMER) || defined(CONFIG_PWM_MCUX_CTIMER)
case MCUX_CTIMER0_CLK:
*rate = CLOCK_GetCTimerClkFreq(0);
break;
case MCUX_CTIMER1_CLK:
*rate = CLOCK_GetCTimerClkFreq(1);
break;
case MCUX_CTIMER2_CLK:
*rate = CLOCK_GetCTimerClkFreq(2);
break;
case MCUX_CTIMER3_CLK:
*rate = CLOCK_GetCTimerClkFreq(3);
break;
case MCUX_CTIMER4_CLK:
*rate = CLOCK_GetCTimerClkFreq(4);
break;
#endif
#if defined(CONFIG_COUNTER_NXP_MRT)
case MCUX_MRT_CLK:
#if defined(CONFIG_SOC_SERIES_RW6XX)
case MCUX_FREEMRT_CLK:
#endif /* RW */
#endif /* MRT */
#if defined(CONFIG_PWM_MCUX_SCTIMER)
case MCUX_SCTIMER_CLK:
#endif
#ifdef CONFIG_SOC_SERIES_RW6XX
/* RW6XX uses core clock for SCTimer, not bus clock */
*rate = CLOCK_GetCoreSysClkFreq();
break;
#else
case MCUX_BUS_CLK:
*rate = CLOCK_GetFreq(kCLOCK_BusClk);
break;
#endif
#if defined(CONFIG_I3C_MCUX)
case MCUX_I3C_CLK:
*rate = CLOCK_GetI3cClkFreq();
break;
#endif
#if defined(CONFIG_MIPI_DSI_MCUX_2L)
case MCUX_MIPI_DSI_DPHY_CLK:
*rate = CLOCK_GetMipiDphyClkFreq();
break;
case MCUX_MIPI_DSI_ESC_CLK:
*rate = CLOCK_GetMipiDphyEscTxClkFreq();
break;
case MCUX_LCDIF_PIXEL_CLK:
*rate = CLOCK_GetDcPixelClkFreq();
break;
#endif
#if defined(CONFIG_AUDIO_DMIC_MCUX)
case MCUX_DMIC_CLK:
*rate = CLOCK_GetDmicClkFreq();
break;
#endif
#if defined(CONFIG_MEMC_MCUX_FLEXSPI)
case MCUX_FLEXSPI_CLK:
#if (FSL_FEATURE_SOC_FLEXSPI_COUNT == 1)
*rate = CLOCK_GetFlexspiClkFreq();
#else
*rate = CLOCK_GetFlexspiClkFreq(0);
#endif
break;
#if (FSL_FEATURE_SOC_FLEXSPI_COUNT == 2)
case MCUX_FLEXSPI2_CLK:
*rate = CLOCK_GetFlexspiClkFreq(1);
break;
#endif
#endif /* CONFIG_MEMC_MCUX_FLEXSPI */
#ifdef CONFIG_ETH_NXP_ENET_QOS
case MCUX_ENET_QOS_CLK:
*rate = CLOCK_GetFreq(kCLOCK_BusClk);
break;
#endif
#ifdef CONFIG_ETH_NXP_ENET
case MCUX_ENET_CLK:
#ifdef CONFIG_SOC_SERIES_RW6XX
*rate = CLOCK_GetTddrMciEnetClkFreq();
#endif
break;
#endif
#if defined(CONFIG_MIPI_DBI_NXP_LCDIC)
case MCUX_LCDIC_CLK:
*rate = CLOCK_GetLcdClkFreq();
break;
#endif
#if defined(CONFIG_ADC_MCUX_LPADC)
case MCUX_LPADC1_CLK:
#if (FSL_FEATURE_SOC_LPADC_COUNT == 1)
*rate = CLOCK_GetAdcClkFreq();
#else
*rate = CLOCK_GetAdcClkFreq(0);
#endif
break;
#if (FSL_FEATURE_SOC_LPADC_COUNT == 2)
case MCUX_LPADC2_CLK:
*rate = CLOCK_GetAdcClkFreq(1);
break;
#endif
#endif /* CONFIG_ADC_MCUX_LPADC */
#if defined(CONFIG_CAN_MCUX_FLEXCAN)
case MCUX_FLEXCAN0_CLK:
*rate = CLOCK_GetFlexcanClkFreq(0);
break;
case MCUX_FLEXCAN1_CLK:
*rate = CLOCK_GetFlexcanClkFreq(1);
break;
#endif /* defined(CONFIG_CAN_MCUX_FLEXCAN) */
#if defined(CONFIG_MCUX_FLEXIO)
case MCUX_FLEXIO0_CLK:
*rate = CLOCK_GetFlexioClkFreq();
break;
#endif /* defined(CONFIG_MCUX_FLEXIO) */
}
return 0;
}
#if defined(CONFIG_MEMC)
/*
* Weak implemenetation of flexspi_clock_set_freq- SOC implementations are
* expected to override this
*/
__weak int flexspi_clock_set_freq(uint32_t clock_name, uint32_t freq)
{
ARG_UNUSED(clock_name);
ARG_UNUSED(freq);
return -ENOTSUP;
}
#endif
/*
* Since this function is used to reclock the FlexSPI when running in
* XIP, it must be located in RAM when MEMC driver is enabled.
*/
#ifdef CONFIG_MEMC
#define SYSCON_SET_FUNC_ATTR __ramfunc
#else
#define SYSCON_SET_FUNC_ATTR
#endif
static int SYSCON_SET_FUNC_ATTR
mcux_lpc_syscon_clock_control_set_subsys_rate(const struct device *dev,
clock_control_subsys_t subsys,
clock_control_subsys_rate_t rate)
{
uint32_t clock_name = (uintptr_t)subsys;
uint32_t clock_rate = (uintptr_t)rate;
switch (clock_name) {
case MCUX_FLEXSPI_CLK:
#if defined(CONFIG_MEMC)
/* The SOC is using the FlexSPI for XIP. Therefore,
* the FlexSPI itself must be managed within the function,
* which is SOC specific.
*/
return flexspi_clock_set_freq(clock_name, clock_rate);
#endif
#if defined(CONFIG_MIPI_DBI_NXP_LCDIC)
case MCUX_LCDIC_CLK:
/* Set LCDIC clock div */
uint32_t root_rate = (CLOCK_GetLcdClkFreq() *
((CLKCTL0->LCDFCLKDIV & CLKCTL0_LCDFCLKDIV_DIV_MASK) + 1));
CLOCK_SetClkDiv(kCLOCK_DivLcdClk, (root_rate / clock_rate));
return 0;
#endif
default:
/* Silence unused variable warning */
ARG_UNUSED(clock_rate);
return -ENOTSUP;
}
}
static const struct clock_control_driver_api mcux_lpc_syscon_api = {
.on = mcux_lpc_syscon_clock_control_on,
.off = mcux_lpc_syscon_clock_control_off,
.get_rate = mcux_lpc_syscon_clock_control_get_subsys_rate,
.set_rate = mcux_lpc_syscon_clock_control_set_subsys_rate,
};
#define LPC_CLOCK_INIT(n) \
\
DEVICE_DT_INST_DEFINE(n, \
NULL, \
NULL, \
NULL, NULL, \
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&mcux_lpc_syscon_api);
DT_INST_FOREACH_STATUS_OKAY(LPC_CLOCK_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_mcux_syscon.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,335 |
```c
/*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_system.h>
#include <stm32_ll_utils.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_common.h"
#include "clock_stm32_ll_mco.h"
#include "stm32_hsem.h"
/* Macros to fill up prescaler values */
#define z_hsi_divider(v) LL_RCC_HSI_DIV_ ## v
#define hsi_divider(v) z_hsi_divider(v)
#define fn_ahb_prescaler(v) LL_RCC_SYSCLK_DIV_ ## v
#define ahb_prescaler(v) fn_ahb_prescaler(v)
#define fn_apb1_prescaler(v) LL_RCC_APB1_DIV_ ## v
#define apb1_prescaler(v) fn_apb1_prescaler(v)
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), apb2_prescaler)
#define fn_apb2_prescaler(v) LL_RCC_APB2_DIV_ ## v
#define apb2_prescaler(v) fn_apb2_prescaler(v)
#endif
#if defined(RCC_CFGR_ADCPRE)
#define z_adc12_prescaler(v) LL_RCC_ADC_CLKSRC_PCLK2_DIV_ ## v
#define adc12_prescaler(v) z_adc12_prescaler(v)
#elif defined(RCC_CFGR2_ADC1PRES)
#define z_adc12_prescaler(v) \
COND_CODE_1(IS_EQ(v, 0), \
LL_RCC_ADC1_CLKSRC_HCLK, \
LL_RCC_ADC1_CLKSRC_PLL_DIV_ ## v)
#define adc12_prescaler(v) z_adc12_prescaler(v)
#else
#define z_adc12_prescaler(v) \
COND_CODE_1(IS_EQ(v, 0), \
(LL_RCC_ADC12_CLKSRC_HCLK), \
(LL_RCC_ADC12_CLKSRC_PLL_DIV_ ## v))
#define adc12_prescaler(v) z_adc12_prescaler(v)
#define z_adc34_prescaler(v) \
COND_CODE_1(IS_EQ(v, 0), \
(LL_RCC_ADC34_CLKSRC_HCLK), \
(LL_RCC_ADC34_CLKSRC_PLL_DIV_ ## v))
#define adc34_prescaler(v) z_adc34_prescaler(v)
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), ahb4_prescaler)
#define RCC_CALC_FLASH_FREQ __LL_RCC_CALC_HCLK4_FREQ
#define GET_CURRENT_FLASH_PRESCALER LL_RCC_GetAHB4Prescaler
#elif DT_NODE_HAS_PROP(DT_NODELABEL(rcc), ahb3_prescaler)
#define RCC_CALC_FLASH_FREQ __LL_RCC_CALC_HCLK3_FREQ
#define GET_CURRENT_FLASH_PRESCALER LL_RCC_GetAHB3Prescaler
#else
#define RCC_CALC_FLASH_FREQ __LL_RCC_CALC_HCLK_FREQ
#define GET_CURRENT_FLASH_PRESCALER LL_RCC_GetAHBPrescaler
#endif
#if defined(RCC_PLLCFGR_PLLPEN)
#define RCC_PLLP_ENABLE() SET_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLPEN)
#else
#define RCC_PLLP_ENABLE()
#endif /* RCC_PLLCFGR_PLLPEN */
#if defined(RCC_PLLCFGR_PLLQEN)
#define RCC_PLLQ_ENABLE() SET_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLQEN)
#else
#define RCC_PLLQ_ENABLE()
#endif /* RCC_PLLCFGR_PLLQEN */
/**
* @brief Return frequency for pll with 2 dividers and a multiplier
*/
__unused
static uint32_t get_pll_div_frequency(uint32_t pllsrc_freq,
int pllm_div,
int plln_mul,
int pllout_div)
{
__ASSERT_NO_MSG(pllm_div && pllout_div);
return pllsrc_freq / pllm_div * plln_mul / pllout_div;
}
static uint32_t get_bus_clock(uint32_t clock, uint32_t prescaler)
{
return clock / prescaler;
}
__unused
static uint32_t get_msi_frequency(void)
{
#if defined(STM32_MSI_ENABLED)
#if !defined(LL_RCC_MSIRANGESEL_RUN)
return __LL_RCC_CALC_MSI_FREQ(LL_RCC_MSI_GetRange());
#else
return __LL_RCC_CALC_MSI_FREQ(LL_RCC_MSIRANGESEL_RUN,
LL_RCC_MSI_GetRange());
#endif
#endif
return 0;
}
/** @brief Verifies clock is part of active clock configuration */
__unused
static int enabled_clock(uint32_t src_clk)
{
int r = 0;
switch (src_clk) {
#if defined(STM32_SRC_SYSCLK)
case STM32_SRC_SYSCLK:
break;
#endif /* STM32_SRC_SYSCLK */
#if defined(STM32_SRC_PCLK)
case STM32_SRC_PCLK:
break;
#endif /* STM32_SRC_PCLK */
#if defined(STM32_SRC_HSE)
case STM32_SRC_HSE:
if (!IS_ENABLED(STM32_HSE_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_HSE */
#if defined(STM32_SRC_HSI)
case STM32_SRC_HSI:
if (!IS_ENABLED(STM32_HSI_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_HSI */
#if defined(STM32_SRC_LSE)
case STM32_SRC_LSE:
if (!IS_ENABLED(STM32_LSE_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_LSE */
#if defined(STM32_SRC_LSI)
case STM32_SRC_LSI:
if (!IS_ENABLED(STM32_LSI_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_LSI */
#if defined(STM32_SRC_HSI14)
case STM32_SRC_HSI14:
if (!IS_ENABLED(STM32_HSI14_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_HSI14 */
#if defined(STM32_SRC_HSI48)
case STM32_SRC_HSI48:
if (!IS_ENABLED(STM32_HSI48_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_HSI48 */
#if defined(STM32_SRC_MSI)
case STM32_SRC_MSI:
if (!IS_ENABLED(STM32_MSI_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_MSI */
#if defined(STM32_SRC_PLLCLK)
case STM32_SRC_PLLCLK:
if (!IS_ENABLED(STM32_PLL_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_PLLCLK */
#if defined(STM32_SRC_PLL_P)
case STM32_SRC_PLL_P:
if (!IS_ENABLED(STM32_PLL_P_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_PLL_P */
#if defined(STM32_SRC_PLL_Q)
case STM32_SRC_PLL_Q:
if (!IS_ENABLED(STM32_PLL_Q_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_PLL_Q */
#if defined(STM32_SRC_PLL_R)
case STM32_SRC_PLL_R:
if (!IS_ENABLED(STM32_PLL_R_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_PLL_R */
#if defined(STM32_SRC_PLLI2S_R)
case STM32_SRC_PLLI2S_R:
if (!IS_ENABLED(STM32_PLLI2S_R_ENABLED)) {
r = -ENOTSUP;
}
break;
#endif /* STM32_SRC_PLLI2S_R */
default:
return -ENOTSUP;
}
return r;
}
static inline int stm32_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
volatile int temp;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to change a wrong periph clock bit */
return -ENOTSUP;
}
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
/* Delay after enabling the clock, to allow it to become active.
* See (for example) RM0440 7.2.17
*/
temp = sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus);
UNUSED(temp);
return 0;
}
static inline int stm32_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
return 0;
}
static inline int stm32_clock_control_configure(const struct device *dev,
clock_control_subsys_t sub_system,
void *data)
{
#if defined(STM32_SRC_SYSCLK)
/* At least one alt src clock available */
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
int err;
ARG_UNUSED(dev);
ARG_UNUSED(data);
err = enabled_clock(pclken->bus);
if (err < 0) {
/* Attempt to configure a src clock not available or not valid */
return err;
}
if (pclken->enr == NO_SEL) {
/* Domain clock is fixed. Nothing to set. Exit */
return 0;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_MASK_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_VAL_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
return 0;
#else
/* No src clock available: Not supported */
return -ENOTSUP;
#endif
}
static int stm32_clock_control_get_subsys_rate(const struct device *clock,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
/*
* Get AHB Clock (= SystemCoreClock = SYSCLK/prescaler)
* SystemCoreClock is preferred to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
* since it will be updated after clock configuration and hence
* more likely to contain actual clock speed
*/
uint32_t ahb_clock = SystemCoreClock;
uint32_t apb1_clock = get_bus_clock(ahb_clock, STM32_APB1_PRESCALER);
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), apb2_prescaler)
uint32_t apb2_clock = get_bus_clock(ahb_clock, STM32_APB2_PRESCALER);
#elif defined(STM32_CLOCK_BUS_APB2)
/* APB2 bus exists, but w/o dedicated prescaler */
uint32_t apb2_clock = apb1_clock;
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), ahb3_prescaler)
uint32_t ahb3_clock = get_bus_clock(ahb_clock * STM32_CPU1_PRESCALER,
STM32_AHB3_PRESCALER);
#elif defined(STM32_CLOCK_BUS_AHB3)
/* AHB3 bus exists, but w/o dedicated prescaler */
uint32_t ahb3_clock = ahb_clock;
#endif
#if defined(STM32_SRC_PCLK)
if (pclken->bus == STM32_SRC_PCLK) {
/* STM32_SRC_PCLK can't be used to request a subsys freq */
/* Use STM32_CLOCK_BUS_FOO instead. */
return -ENOTSUP;
}
#endif
ARG_UNUSED(clock);
switch (pclken->bus) {
case STM32_CLOCK_BUS_AHB1:
#if defined(STM32_CLOCK_BUS_AHB2)
case STM32_CLOCK_BUS_AHB2:
#endif
#if defined(STM32_CLOCK_BUS_IOP)
case STM32_CLOCK_BUS_IOP:
#endif
*rate = ahb_clock;
break;
#if defined(STM32_CLOCK_BUS_AHB3)
case STM32_CLOCK_BUS_AHB3:
*rate = ahb3_clock;
break;
#endif
case STM32_CLOCK_BUS_APB1:
#if defined(STM32_CLOCK_BUS_APB1_2)
case STM32_CLOCK_BUS_APB1_2:
#endif
*rate = apb1_clock;
break;
#if defined(STM32_CLOCK_BUS_APB2)
case STM32_CLOCK_BUS_APB2:
*rate = apb2_clock;
break;
#endif
#if defined(STM32_CLOCK_BUS_APB3)
case STM32_CLOCK_BUS_APB3:
/* STM32WL: AHB3 and APB3 share the same clock and prescaler. */
*rate = ahb3_clock;
break;
#endif
#if defined(STM32_SRC_SYSCLK)
case STM32_SRC_SYSCLK:
*rate = SystemCoreClock * STM32_CORE_PRESCALER;
break;
#endif
#if defined(STM32_SRC_PLLCLK) & defined(STM32_SYSCLK_SRC_PLL)
case STM32_SRC_PLLCLK:
if (get_pllout_frequency() == 0) {
return -EIO;
}
*rate = get_pllout_frequency();
break;
#endif
#if defined(STM32_SRC_PLL_P) & STM32_PLL_P_ENABLED
case STM32_SRC_PLL_P:
*rate = get_pll_div_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_P_DIVISOR);
break;
#endif
#if defined(STM32_SRC_PLL_Q) & STM32_PLL_Q_ENABLED
case STM32_SRC_PLL_Q:
*rate = get_pll_div_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_Q_DIVISOR);
break;
#endif
#if defined(STM32_SRC_PLL_R) & STM32_PLL_R_ENABLED
case STM32_SRC_PLL_R:
*rate = get_pll_div_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
break;
#endif
#if defined(STM32_SRC_PLLI2S_R) & STM32_PLLI2S_ENABLED
case STM32_SRC_PLLI2S_R:
*rate = get_pll_div_frequency(get_pllsrc_frequency(),
STM32_PLLI2S_M_DIVISOR,
STM32_PLLI2S_N_MULTIPLIER,
STM32_PLLI2S_R_DIVISOR);
break;
#endif /* STM32_SRC_PLLI2S_R */
/* PLLSAI1x not supported yet */
/* PLLSAI2x not supported yet */
#if defined(STM32_SRC_LSE)
case STM32_SRC_LSE:
*rate = STM32_LSE_FREQ;
break;
#endif
#if defined(STM32_SRC_LSI)
case STM32_SRC_LSI:
*rate = STM32_LSI_FREQ;
break;
#endif
#if defined(STM32_SRC_HSI)
case STM32_SRC_HSI:
*rate = STM32_HSI_FREQ;
break;
#endif
#if defined(STM32_SRC_MSI)
case STM32_SRC_MSI:
*rate = get_msi_frequency();
break;
#endif
#if defined(STM32_SRC_HSE)
case STM32_SRC_HSE:
*rate = STM32_HSE_FREQ;
break;
#endif
#if defined(STM32_HSI48_ENABLED)
case STM32_SRC_HSI48:
*rate = STM32_HSI48_FREQ;
break;
#endif /* STM32_HSI48_ENABLED */
default:
return -ENOTSUP;
}
return 0;
}
static enum clock_control_status stm32_clock_control_get_status(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)sub_system;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == true) {
/* Gated clocks */
if ((sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus) & pclken->enr)
== pclken->enr) {
return CLOCK_CONTROL_STATUS_ON;
} else {
return CLOCK_CONTROL_STATUS_OFF;
}
} else {
/* Domain clock sources */
if (enabled_clock(pclken->bus) == 0) {
return CLOCK_CONTROL_STATUS_ON;
} else {
return CLOCK_CONTROL_STATUS_OFF;
}
}
}
static const struct clock_control_driver_api stm32_clock_control_api = {
.on = stm32_clock_control_on,
.off = stm32_clock_control_off,
.get_rate = stm32_clock_control_get_subsys_rate,
.get_status = stm32_clock_control_get_status,
.configure = stm32_clock_control_configure,
};
/*
* Unconditionally switch the system clock source to HSI.
*/
__unused
static void stm32_clock_switch_to_hsi(void)
{
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
}
__unused
static void set_up_plls(void)
{
#if defined(STM32_PLL_ENABLED)
/*
* Case of chain-loaded applications:
* Switch to HSI and disable the PLL before configuration.
* (Switching to HSI makes sure we have a SYSCLK source in
* case we're currently running from the PLL we're about to
* turn off and reconfigure.)
*
*/
if (LL_RCC_GetSysClkSource() == LL_RCC_SYS_CLKSOURCE_STATUS_PLL) {
stm32_clock_switch_to_hsi();
LL_RCC_SetAHBPrescaler(LL_RCC_SYSCLK_DIV_1);
}
LL_RCC_PLL_Disable();
#endif
#if defined(STM32_PLL2_ENABLED)
/*
* Disable PLL2 after switching to HSI for SysClk
* and disabling PLL, but before enabling PLL again,
* since PLL source can be PLL2.
*/
LL_RCC_PLL2_Disable();
config_pll2();
/* Enable PLL2 */
LL_RCC_PLL2_Enable();
while (LL_RCC_PLL2_IsReady() != 1U) {
/* Wait for PLL2 ready */
}
#endif /* STM32_PLL2_ENABLED */
#if defined(STM32_PLL_ENABLED)
#if defined(STM32_SRC_PLL_P) & STM32_PLL_P_ENABLED
MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLP, pllp(STM32_PLL_P_DIVISOR));
RCC_PLLP_ENABLE();
#endif
#if defined(STM32_SRC_PLL_Q) & STM32_PLL_Q_ENABLED
MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ, pllq(STM32_PLL_Q_DIVISOR));
RCC_PLLQ_ENABLE();
#endif
config_pll_sysclock();
/* Enable PLL */
LL_RCC_PLL_Enable();
while (LL_RCC_PLL_IsReady() != 1U) {
/* Wait for PLL ready */
}
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLLI2S_ENABLED)
config_plli2s();
/* Enable PLL */
LL_RCC_PLLI2S_Enable();
while (LL_RCC_PLLI2S_IsReady() != 1U) {
/* Wait for PLL ready */
}
#endif /* STM32_PLLI2S_ENABLED */
}
static void set_up_fixed_clock_sources(void)
{
if (IS_ENABLED(STM32_HSE_ENABLED)) {
#if defined(STM32_HSE_BYPASS)
/* Check if need to enable HSE bypass feature or not */
if (IS_ENABLED(STM32_HSE_BYPASS)) {
LL_RCC_HSE_EnableBypass();
} else {
LL_RCC_HSE_DisableBypass();
}
#endif
#if STM32_HSE_TCXO
LL_RCC_HSE_EnableTcxo();
#endif
#if STM32_HSE_DIV2
LL_RCC_HSE_EnableDiv2();
#endif
/* Enable HSE */
LL_RCC_HSE_Enable();
while (LL_RCC_HSE_IsReady() != 1) {
/* Wait for HSE ready */
}
/* Check if we need to enable HSE clock security system or not */
#if STM32_HSE_CSS
z_arm_nmi_set_handler(HAL_RCC_NMI_IRQHandler);
LL_RCC_HSE_EnableCSS();
#endif /* STM32_HSE_CSS */
}
if (IS_ENABLED(STM32_HSI_ENABLED)) {
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
#if STM32_HSI_DIV_ENABLED
LL_RCC_SetHSIDiv(hsi_divider(STM32_HSI_DIVISOR));
#endif
}
#if defined(STM32_MSI_ENABLED)
if (IS_ENABLED(STM32_MSI_ENABLED)) {
/* Set MSI Range */
#if defined(RCC_CR_MSIRGSEL)
LL_RCC_MSI_EnableRangeSelection();
#endif /* RCC_CR_MSIRGSEL */
#if defined(CONFIG_SOC_SERIES_STM32L0X) || defined(CONFIG_SOC_SERIES_STM32L1X)
LL_RCC_MSI_SetRange(STM32_MSI_RANGE << RCC_ICSCR_MSIRANGE_Pos);
#else
LL_RCC_MSI_SetRange(STM32_MSI_RANGE << RCC_CR_MSIRANGE_Pos);
#endif /* CONFIG_SOC_SERIES_STM32L0X || CONFIG_SOC_SERIES_STM32L1X */
#if STM32_MSI_PLL_MODE
/* Enable MSI hardware auto calibration */
LL_RCC_MSI_EnablePLLMode();
#endif
LL_RCC_MSI_SetCalibTrimming(0);
/* Enable MSI if not enabled */
if (LL_RCC_MSI_IsReady() != 1) {
/* Enable MSI */
LL_RCC_MSI_Enable();
while (LL_RCC_MSI_IsReady() != 1) {
/* Wait for MSI ready */
}
}
}
#endif /* STM32_MSI_ENABLED */
if (IS_ENABLED(STM32_LSI_ENABLED)) {
#if defined(CONFIG_SOC_SERIES_STM32WBX)
LL_RCC_LSI1_Enable();
while (LL_RCC_LSI1_IsReady() != 1) {
}
#else
LL_RCC_LSI_Enable();
while (LL_RCC_LSI_IsReady() != 1) {
}
#endif
}
if (IS_ENABLED(STM32_LSE_ENABLED)) {
/* LSE belongs to the back-up domain, enable access.*/
z_stm32_hsem_lock(CFG_HW_RCC_SEMID, HSEM_LOCK_DEFAULT_RETRY);
#if defined(PWR_CR_DBP) || defined(PWR_CR1_DBP) || defined(PWR_DBPR_DBP)
/* Set the DBP bit in the Power control register 1 (PWR_CR1) */
LL_PWR_EnableBkUpAccess();
while (!LL_PWR_IsEnabledBkUpAccess()) {
/* Wait for Backup domain access */
}
#endif /* PWR_CR_DBP || PWR_CR1_DBP || PWR_DBPR_DBP */
#if STM32_LSE_DRIVING
/* Configure driving capability */
LL_RCC_LSE_SetDriveCapability(STM32_LSE_DRIVING << RCC_BDCR_LSEDRV_Pos);
#endif
if (IS_ENABLED(STM32_LSE_BYPASS)) {
/* Configure LSE bypass */
LL_RCC_LSE_EnableBypass();
}
/* Enable LSE Oscillator (32.768 kHz) */
LL_RCC_LSE_Enable();
while (!LL_RCC_LSE_IsReady()) {
/* Wait for LSE ready */
}
#ifdef RCC_BDCR_LSESYSEN
LL_RCC_LSE_EnablePropagation();
/* Wait till LSESYS is ready */
while (!LL_RCC_LSE_IsPropagationReady()) {
}
#endif /* RCC_BDCR_LSESYSEN */
#if defined(PWR_CR_DBP) || defined(PWR_CR1_DBP) || defined(PWR_DBPR_DBP)
LL_PWR_DisableBkUpAccess();
#endif /* PWR_CR_DBP || PWR_CR1_DBP || PWR_DBPR_DBP */
z_stm32_hsem_unlock(CFG_HW_RCC_SEMID);
}
#if defined(STM32_HSI14_ENABLED)
/* For all series with HSI 14 clock support */
if (IS_ENABLED(STM32_HSI14_ENABLED)) {
LL_RCC_HSI14_Enable();
while (LL_RCC_HSI14_IsReady() != 1) {
}
}
#endif /* STM32_HSI48_ENABLED */
#if defined(STM32_HSI48_ENABLED)
/* For all series with HSI 48 clock support */
if (IS_ENABLED(STM32_HSI48_ENABLED)) {
#if defined(CONFIG_SOC_SERIES_STM32L0X)
/*
* HSI48 requires VREFINT (see RM0376 section 7.2.4).
* The SYSCFG is needed to control VREFINT, so clock it.
*/
LL_APB2_GRP1_EnableClock(LL_APB2_GRP1_PERIPH_SYSCFG);
LL_SYSCFG_VREFINT_EnableHSI48();
#endif /* CONFIG_SOC_SERIES_STM32L0X */
/*
* STM32WB: Lock the CLK48 HSEM and do not release to prevent
* M0 core to disable this clock (used for RNG on M0).
* No-op on other series.
*/
z_stm32_hsem_lock(CFG_HW_CLK48_CONFIG_SEMID, HSEM_LOCK_DEFAULT_RETRY);
LL_RCC_HSI48_Enable();
while (LL_RCC_HSI48_IsReady() != 1) {
}
}
#endif /* STM32_HSI48_ENABLED */
}
/**
* @brief Initialize clocks for the stm32
*
* This routine is called to enable and configure the clocks and PLL
* of the soc on the board. It depends on the board definition.
* This function is called on the startup and also to restore the config
* when exiting for low power mode.
*
* @param dev clock device struct
*
* @return 0
*/
int stm32_clock_control_init(const struct device *dev)
{
ARG_UNUSED(dev);
/* Some clocks would be activated by default */
config_enable_default_clocks();
config_regulator_voltage(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
#if defined(FLASH_ACR_LATENCY)
uint32_t old_flash_freq;
uint32_t new_flash_freq;
old_flash_freq = RCC_CALC_FLASH_FREQ(HAL_RCC_GetSysClockFreq(),
GET_CURRENT_FLASH_PRESCALER());
new_flash_freq = RCC_CALC_FLASH_FREQ(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
STM32_FLASH_PRESCALER);
/* If HCLK increases, set flash latency before any clock setting */
if (old_flash_freq < new_flash_freq) {
LL_SetFlashLatency(new_flash_freq);
}
#endif /* FLASH_ACR_LATENCY */
/* Set up individual enabled clocks */
set_up_fixed_clock_sources();
/* Set up PLLs */
set_up_plls();
if (DT_PROP(DT_NODELABEL(rcc), undershoot_prevention) &&
(STM32_CORE_PRESCALER == LL_RCC_SYSCLK_DIV_1) &&
(MHZ(80) < CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)) {
LL_RCC_SetAHBPrescaler(LL_RCC_SYSCLK_DIV_2);
} else {
LL_RCC_SetAHBPrescaler(ahb_prescaler(STM32_CORE_PRESCALER));
}
#if STM32_SYSCLK_SRC_PLL
/* Set PLL as System Clock Source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_PLL);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_PLL) {
}
#elif STM32_SYSCLK_SRC_HSE
/* Set HSE as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSE);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSE) {
}
#elif STM32_SYSCLK_SRC_MSI
/* Set MSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_MSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_MSI) {
}
#elif STM32_SYSCLK_SRC_HSI
stm32_clock_switch_to_hsi();
#endif /* STM32_SYSCLK_SRC_... */
if (DT_PROP(DT_NODELABEL(rcc), undershoot_prevention) &&
(STM32_CORE_PRESCALER == LL_RCC_SYSCLK_DIV_1) &&
(MHZ(80) < CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)) {
LL_RCC_SetAHBPrescaler(ahb_prescaler(STM32_CORE_PRESCALER));
}
#if defined(FLASH_ACR_LATENCY)
/* If HCLK not increased, set flash latency after all clock setting */
if (old_flash_freq >= new_flash_freq) {
LL_SetFlashLatency(new_flash_freq);
}
#endif /* FLASH_ACR_LATENCY */
SystemCoreClock = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
/* Set bus prescalers prescaler */
LL_RCC_SetAPB1Prescaler(apb1_prescaler(STM32_APB1_PRESCALER));
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), apb2_prescaler)
LL_RCC_SetAPB2Prescaler(apb2_prescaler(STM32_APB2_PRESCALER));
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), cpu2_prescaler)
LL_C2_RCC_SetAHBPrescaler(ahb_prescaler(STM32_CPU2_PRESCALER));
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), ahb3_prescaler)
LL_RCC_SetAHB3Prescaler(ahb_prescaler(STM32_AHB3_PRESCALER));
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), ahb4_prescaler)
LL_RCC_SetAHB4Prescaler(ahb_prescaler(STM32_AHB4_PRESCALER));
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), adc_prescaler)
LL_RCC_SetADCClockSource(adc12_prescaler(STM32_ADC_PRESCALER));
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), adc12_prescaler)
LL_RCC_SetADCClockSource(adc12_prescaler(STM32_ADC12_PRESCALER));
#endif
#if DT_NODE_HAS_PROP(DT_NODELABEL(rcc), adc34_prescaler)
LL_RCC_SetADCClockSource(adc34_prescaler(STM32_ADC34_PRESCALER));
#endif
/* configure MCO1/MCO2 based on Kconfig */
stm32_clock_control_mco_init();
return 0;
}
#if defined(STM32_HSE_CSS)
void __weak stm32_hse_css_callback(void) {}
/* Called by the HAL in response to an HSE CSS interrupt */
void HAL_RCC_CSSCallback(void)
{
stm32_hse_css_callback();
}
#endif
void __weak config_regulator_voltage(uint32_t hclk_freq) {}
/**
* @brief RCC device, note that priority is intentionally set to 1 so
* that the device init runs just after SOC init
*/
DEVICE_DT_DEFINE(DT_NODELABEL(rcc),
stm32_clock_control_init,
NULL,
NULL, NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&stm32_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_common.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,110 |
```c
/*
*
*/
#define DT_DRV_COMPAT aspeed_ast10x0_clock
#include <errno.h>
#include <zephyr/dt-bindings/clock/ast10x0_clock.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/syscon.h>
#include <zephyr/sys/util.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_ast10x0);
#define HPLL_FREQ MHZ(1000)
/*
* CLK_STOP_CTRL0/1_SET registers:
* - Each bit in these registers controls a clock gate
* - Write '1' to a bit: turn OFF the corresponding clock
* - Write '0' to a bit: no effect
* CLK_STOP_CTRL0/1_CLEAR register:
* - Write '1' to a bit: clear the corresponding bit in CLK_STOP_CTRL0/1.
* (turn ON the corresponding clock)
*/
#define CLK_STOP_CTRL0_SET 0x80
#define CLK_STOP_CTRL0_CLEAR 0x84
#define CLK_STOP_CTRL1_SET 0x90
#define CLK_STOP_CTRL1_CLEAR 0x94
#define CLK_SELECTION_REG4 0x310
#define I3C_CLK_SRC_SEL BIT(31)
#define I3C_CLK_SRC_HPLL 0
#define I3C_CLK_SRC_480M 1
#define I3C_CLK_DIV_SEL GENMASK(30, 28)
#define I3C_CLK_DIV_REG_TO_VAL(x) ((x == 0) ? 2 : (x + 1))
#define PCLK_DIV_SEL GENMASK(11, 8)
#define PCLK_DIV_REG_TO_VAL(x) ((x + 1) << 1)
#define CLK_SELECTION_REG5 0x314
#define HCLK_DIV_SEL GENMASK(30, 28)
#define HCLK_DIV_REG_TO_VAL(x) ((x == 0) ? 2 : x + 1)
struct clock_aspeed_config {
const struct device *syscon;
};
#define DEV_CFG(dev) ((const struct clock_aspeed_config *const)(dev)->config)
static int aspeed_clock_control_on(const struct device *dev, clock_control_subsys_t sub_system)
{
const struct device *syscon = DEV_CFG(dev)->syscon;
uint32_t clk_gate = (uint32_t)sub_system;
uint32_t addr = CLK_STOP_CTRL0_CLEAR;
/* there is no on/off control for group2 clocks */
if (clk_gate >= ASPEED_CLK_GRP_2_OFFSET) {
return 0;
}
if (clk_gate >= ASPEED_CLK_GRP_1_OFFSET) {
clk_gate -= ASPEED_CLK_GRP_1_OFFSET;
addr = CLK_STOP_CTRL1_CLEAR;
}
syscon_write_reg(syscon, addr, BIT(clk_gate));
return 0;
}
static int aspeed_clock_control_off(const struct device *dev, clock_control_subsys_t sub_system)
{
const struct device *syscon = DEV_CFG(dev)->syscon;
uint32_t clk_gate = (uint32_t)sub_system;
uint32_t addr = CLK_STOP_CTRL0_SET;
/* there is no on/off control for group2 clocks */
if (clk_gate >= ASPEED_CLK_GRP_2_OFFSET) {
return 0;
}
if (clk_gate >= ASPEED_CLK_GRP_1_OFFSET) {
clk_gate -= ASPEED_CLK_GRP_1_OFFSET;
addr = CLK_STOP_CTRL1_SET;
}
syscon_write_reg(syscon, addr, BIT(clk_gate));
return 0;
}
static int aspeed_clock_control_get_rate(const struct device *dev,
clock_control_subsys_t sub_system, uint32_t *rate)
{
const struct device *syscon = DEV_CFG(dev)->syscon;
uint32_t clk_id = (uint32_t)sub_system;
uint32_t reg, src, clk_div;
switch (clk_id) {
case ASPEED_CLK_I3C0:
case ASPEED_CLK_I3C1:
case ASPEED_CLK_I3C2:
case ASPEED_CLK_I3C3:
syscon_read_reg(syscon, CLK_SELECTION_REG4, ®);
if (FIELD_GET(I3C_CLK_SRC_SEL, reg) == I3C_CLK_SRC_HPLL) {
src = HPLL_FREQ;
} else {
src = MHZ(480);
}
clk_div = I3C_CLK_DIV_REG_TO_VAL(FIELD_GET(I3C_CLK_DIV_SEL, reg));
*rate = src / clk_div;
break;
case ASPEED_CLK_HCLK:
src = HPLL_FREQ;
syscon_read_reg(syscon, CLK_SELECTION_REG5, ®);
clk_div = HCLK_DIV_REG_TO_VAL(FIELD_GET(HCLK_DIV_SEL, reg));
*rate = src / clk_div;
break;
case ASPEED_CLK_PCLK:
src = HPLL_FREQ;
syscon_read_reg(syscon, CLK_SELECTION_REG4, ®);
clk_div = PCLK_DIV_REG_TO_VAL(FIELD_GET(PCLK_DIV_SEL, reg));
*rate = src / clk_div;
break;
case ASPEED_CLK_UART1:
case ASPEED_CLK_UART2:
case ASPEED_CLK_UART3:
case ASPEED_CLK_UART4:
case ASPEED_CLK_UART5:
case ASPEED_CLK_UART6:
case ASPEED_CLK_UART7:
case ASPEED_CLK_UART8:
case ASPEED_CLK_UART9:
case ASPEED_CLK_UART10:
case ASPEED_CLK_UART11:
case ASPEED_CLK_UART12:
case ASPEED_CLK_UART13:
*rate = MHZ(24) / 13;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct clock_control_driver_api aspeed_clk_api = {
.on = aspeed_clock_control_on,
.off = aspeed_clock_control_off,
.get_rate = aspeed_clock_control_get_rate,
};
#define ASPEED_CLOCK_INIT(n) \
static const struct clock_aspeed_config clock_aspeed_cfg_##n = { \
.syscon = DEVICE_DT_GET(DT_NODELABEL(syscon)), \
}; \
DEVICE_DT_INST_DEFINE(n, NULL, NULL, NULL, &clock_aspeed_cfg_##n, PRE_KERNEL_1, \
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &aspeed_clk_api);
DT_INST_FOREACH_STATUS_OKAY(ASPEED_CLOCK_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_ast10x0.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,385 |
```unknown
# STM32 MCU clock control driver config
menuconfig CLOCK_CONTROL_STM32_CUBE
bool "STM32 Reset & Clock Control"
depends on SOC_FAMILY_STM32
select USE_STM32_LL_UTILS
select USE_STM32_LL_RCC if (SOC_SERIES_STM32MP1X || SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X)
select RUNTIME_NMI if ($(dt_nodelabel_enabled,clk_hse) && \
$(dt_nodelabel_has_prop,clk_hse,css-enabled))
help
Enable driver for Reset & Clock Control subsystem found
in STM32 family of MCUs
if CLOCK_CONTROL_STM32_CUBE
DT_STM32_HSE_CLOCK := $(dt_nodelabel_path,clk_hse)
DT_STM32_HSE_CLOCK_FREQ := $(dt_node_int_prop_int,$(DT_STM32_HSE_CLOCK),clock-frequency)
config CLOCK_STM32_HSE_CLOCK
int "HSE clock value"
default "$(DT_STM32_HSE_CLOCK_FREQ)" if "$(dt_nodelabel_enabled,clk_hse)"
default 8000000
help
Value of external high-speed clock (HSE). This symbol could be optionally
configured using device tree by setting "clock-frequency" value of clk_hse
node. For instance:
&clk_hse{
status = "okay";
clock-frequency = <DT_FREQ_M(25)>;
};
Note: Device tree configuration is overridden when current symbol is set:
CONFIG_CLOCK_STM32_HSE_CLOCK=32000000
config CLOCK_STM32_MUX
bool "STM32 clock mux driver"
default y
depends on DT_HAS_ST_STM32_CLOCK_MUX_ENABLED
help
Enable driver for STM32 clock mux which don't match an
existing clock hardware block but allows to select a clock
for a specific domain. For instance per_ck clock on STM32H7 or
CLK48 clock
# Micro-controller Clock output configuration options
choice
prompt "STM32 MCO1 Clock Source"
default CLOCK_STM32_MCO1_SRC_NOCLOCK
config CLOCK_STM32_MCO1_SRC_NOCLOCK
bool "NOCLOCK"
help
MCO1 output disabled, no clock on MCO1
config CLOCK_STM32_MCO1_SRC_EXT_HSE
bool "EXT_HSE"
depends on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
help
Use EXT_HSE as source of MCO1
config CLOCK_STM32_MCO1_SRC_LSE
bool "LSE"
depends on SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X || \
SOC_SERIES_STM32U5X
help
Use LSE as source of MCO1
config CLOCK_STM32_MCO1_SRC_HSE
bool "HSE"
depends on SOC_SERIES_STM32F1X || \
SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X || \
SOC_SERIES_STM32U5X
help
Use HSE as source of MCO1
config CLOCK_STM32_MCO1_SRC_LSI
bool "LSI"
depends on SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32U5X
help
Use LSI as source of MCO1
config CLOCK_STM32_MCO1_SRC_MSI
bool "MSI"
depends on SOC_SERIES_STM32L4X
help
Use MSI as source of MCO1
config CLOCK_STM32_MCO1_SRC_MSIK
bool "MSIK"
depends on SOC_SERIES_STM32U5X
help
Use MSIK as source of MCO1
config CLOCK_STM32_MCO1_SRC_MSIS
bool "MSIS"
depends on SOC_SERIES_STM32U5X
help
Use MSIS as source of MCO1
config CLOCK_STM32_MCO1_SRC_HSI
bool "HSI"
depends on SOC_SERIES_STM32F1X || \
SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use HSI as source of MCO1
config CLOCK_STM32_MCO1_SRC_HSI16
bool "HSI16"
depends on SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32U5X
help
Use HSI16 as source of MCO1
config CLOCK_STM32_MCO1_SRC_HSI48
bool "HSI48"
depends on SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X || \
SOC_SERIES_STM32U5X
help
Use HSI48 as source of MCO1
config CLOCK_STM32_MCO1_SRC_PLLCLK
bool "PLLCLK"
depends on SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32U5X
help
Use PLLCLK as source of MCO1
config CLOCK_STM32_MCO1_SRC_PLLQCLK
bool "PLLQ"
depends on SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use PLLQ as source of MCO1
config CLOCK_STM32_MCO1_SRC_PLLCLK_DIV2
bool "PLLCLK_DIV2"
depends on SOC_SERIES_STM32F1X
help
Use PLLCLK/2 as source of MCO1
config CLOCK_STM32_MCO1_SRC_PLL2CLK
bool "PLL2CLK"
depends on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
help
Use PLL2CLK as source of MCO1
config CLOCK_STM32_MCO1_SRC_PLLI2SCLK
bool "PLLI2SCLK"
depends on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
help
Use PLLI2SCLK as source of MCO1
config CLOCK_STM32_MCO1_SRC_PLLI2SCLK_DIV2
bool "PLLI2SCLK_DIV2"
depends on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
help
Use PLLI2SCLK/2 as source of MCO1
config CLOCK_STM32_MCO1_SRC_SYSCLK
bool "SYSCLK"
depends on SOC_SERIES_STM32F1X || \
SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32U5X
help
Use SYSCLK as source of MCO1
endchoice
config CLOCK_STM32_MCO1_DIV
int "MCO1 prescaler"
depends on !CLOCK_STM32_MCO1_SRC_NOCLOCK && (\
SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X || \
SOC_SERIES_STM32U5X \
)
default 1
range 1 5 if SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X
range 1 15 if SOC_SERIES_STM32H7X || SOC_SERIES_STM32H7RSX || SOC_SERIES_STM32H5X
range 1 16 if SOC_SERIES_STM32L4X || SOC_SERIES_STM32U5X
help
Prescaler for MCO1 output clock
choice
prompt "STM32 MCO2 Clock Source"
default CLOCK_STM32_MCO2_SRC_NOCLOCK
config CLOCK_STM32_MCO2_SRC_NOCLOCK
bool "NOCLOCK"
help
MCO2 output disabled, no clock on MCO2
config CLOCK_STM32_MCO2_SRC_SYSCLK
bool "SYSCLK"
depends on SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use SYSCLK as source of MCO2
config CLOCK_STM32_MCO2_SRC_PLLI2S
bool "PLLI2S"
depends on SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X
help
Use PLLI2S as source of MCO2
config CLOCK_STM32_MCO2_SRC_HSE
bool "HSE"
depends on SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use HSE as source of MCO2
config CLOCK_STM32_MCO2_SRC_LSI
bool "LSI"
depends on SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use LSI as source of MCO2
config CLOCK_STM32_MCO2_SRC_CSI
bool "CSI"
depends on SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use CSI as source of MCO2
config CLOCK_STM32_MCO2_SRC_PLLCLK
bool "PLLCLK"
depends on SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X
help
Use PLLCLK as source of MCO2
config CLOCK_STM32_MCO2_SRC_PLLPCLK
bool "PLLPCLK"
depends on SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use PLLPCLK as source of MC02
config CLOCK_STM32_MCO2_SRC_PLL2PCLK
bool "PLL2PCLK"
depends on SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX || \
SOC_SERIES_STM32H5X
help
Use PLL2PCLK as source of MC02
endchoice
config CLOCK_STM32_MCO2_DIV
int "MCO2 prescaler"
depends on !CLOCK_STM32_MCO2_SRC_NOCLOCK && (\
SOC_SERIES_STM32F4X || \
SOC_SERIES_STM32F7X || \
SOC_SERIES_STM32H5X || \
SOC_SERIES_STM32H7X || \
SOC_SERIES_STM32H7RSX \
)
default 1
range 1 5 if SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X
range 1 15 if SOC_SERIES_STM32H7X || SOC_SERIES_STM32H7RSX || SOC_SERIES_STM32H5X
help
Prescaler for MCO2 output clock
endif # CLOCK_CONTROL_STM32_CUBE
``` | /content/code_sandbox/drivers/clock_control/Kconfig.stm32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,730 |
```objective-c
/*
*
*/
#ifndef LITEX_MMCM_H
#define LITEX_MMCM_H
#include <zephyr/types.h>
/* Common values */
#define PICOS_IN_SEC 1000000000000
#define BITS_PER_BYTE 8
/* MMCM specific numbers */
#define CLKOUT_MAX 7
#define DELAY_TIME_MAX 63
#define PHASE_MUX_MAX 7
#define HIGH_LOW_TIME_REG_MAX 63
#define PHASE_MUX_RES_FACTOR 8
/* DRP registers index */
#define DRP_RESET 0
#define DRP_LOCKED 1
#define DRP_READ 2
#define DRP_WRITE 3
#define DRP_DRDY 4
#define DRP_ADR 5
#define DRP_DAT_W 6
#define DRP_DAT_R 7
/* Base address */
#define DRP_BASE DT_REG_ADDR_BY_IDX(MMCM, 0)
/* Register address */
#define DRP_ADDR_RESET DT_REG_ADDR_BY_NAME(MMCM, drp_reset)
#define DRP_ADDR_LOCKED DT_REG_ADDR_BY_NAME(MMCM, drp_locked)
#define DRP_ADDR_READ DT_REG_ADDR_BY_NAME(MMCM, drp_read)
#define DRP_ADDR_WRITE DT_REG_ADDR_BY_NAME(MMCM, drp_write)
#define DRP_ADDR_DRDY DT_REG_ADDR_BY_NAME(MMCM, drp_drdy)
#define DRP_ADDR_ADR DT_REG_ADDR_BY_NAME(MMCM, drp_adr)
#define DRP_ADDR_DAT_W DT_REG_ADDR_BY_NAME(MMCM, drp_dat_w)
#define DRP_ADDR_DAT_R DT_REG_ADDR_BY_NAME(MMCM, drp_dat_r)
/* Devicetree global defines */
#define LOCK_TIMEOUT DT_PROP(MMCM, litex_lock_timeout)
#define DRDY_TIMEOUT DT_PROP(MMCM, litex_drdy_timeout)
#define DIVCLK_DIVIDE_MIN DT_PROP(MMCM, litex_divclk_divide_min)
#define DIVCLK_DIVIDE_MAX DT_PROP(MMCM, litex_divclk_divide_max)
#define CLKFBOUT_MULT_MIN DT_PROP(MMCM, litex_clkfbout_mult_min)
#define CLKFBOUT_MULT_MAX DT_PROP(MMCM, litex_clkfbout_mult_max)
#define VCO_FREQ_MIN DT_PROP(MMCM, litex_vco_freq_min)
#define VCO_FREQ_MAX DT_PROP(MMCM, litex_vco_freq_max)
#define CLKOUT_DIVIDE_MIN DT_PROP(MMCM, litex_clkout_divide_min)
#define CLKOUT_DIVIDE_MAX DT_PROP(MMCM, litex_clkout_divide_max)
#define VCO_MARGIN DT_PROP(MMCM, litex_vco_margin)
#define CLKOUT_INIT(N) \
BUILD_ASSERT(CLKOUT_DUTY_DEN(N) > 0 && \
CLKOUT_DUTY_NUM(N) > 0 && \
CLKOUT_DUTY_NUM(N) <= CLKOUT_DUTY_DEN(N), \
"Invalid default duty"); \
BUILD_ASSERT(CLKOUT_ID(N) < NCLKOUT, "Invalid CLKOUT index"); \
lcko = &ldev->clkouts[N]; \
lcko->id = CLKOUT_ID(N); \
\
lcko->clkout_div = clkout_div; \
lcko->def.freq = CLKOUT_FREQ(N); \
lcko->def.phase = CLKOUT_PHASE(N); \
lcko->def.duty.num = CLKOUT_DUTY_NUM(N); \
lcko->def.duty.den = CLKOUT_DUTY_DEN(N); \
lcko->margin.m = CLKOUT_MARGIN(N); \
lcko->margin.exp = CLKOUT_MARGIN_EXP(N);
/* Devicetree clkout defines */
#define CLKOUT_EXIST(N) DT_NODE_HAS_STATUS(DT_NODELABEL(clk##N), okay)
#define CLKOUT_ID(N) DT_REG_ADDR(DT_NODELABEL(clk##N))
#define CLKOUT_FREQ(N) DT_PROP(DT_NODELABEL(clk##N), \
litex_clock_frequency)
#define CLKOUT_PHASE(N) DT_PROP(DT_NODELABEL(clk##N), \
litex_clock_phase)
#define CLKOUT_DUTY_NUM(N) DT_PROP(DT_NODELABEL(clk##N), \
litex_clock_duty_num)
#define CLKOUT_DUTY_DEN(N) DT_PROP(DT_NODELABEL(clk##N), \
litex_clock_duty_den)
#define CLKOUT_MARGIN(N) DT_PROP(DT_NODELABEL(clk##N), \
litex_clock_margin)
#define CLKOUT_MARGIN_EXP(N) DT_PROP(DT_NODELABEL(clk##N), \
litex_clock_margin_exp)
/* Register values */
#define FULL_REG_16 0xFFFF
#define ZERO_REG 0x0
#define KEEP_IN_MUL_REG1 0xF000
#define KEEP_IN_MUL_REG2 0xFF3F
#define KEEP_IN_DIV 0xC000
#define REG1_FREQ_MASK 0xF000
#define REG2_FREQ_MASK 0x803F
#define REG1_DUTY_MASK 0xF000
#define REG2_DUTY_MASK 0xFF7F
#define REG1_PHASE_MASK 0x1FFF
#define REG2_PHASE_MASK 0xFCC0
#define FILT1_MASK 0x66FF
#define FILT2_MASK 0x666F
#define LOCK1_MASK 0xFC00
#define LOCK23_MASK 0x8000
/* Control bits extraction masks */
#define HL_TIME_MASK 0x3F
#define FRAC_MASK 0x7
#define EDGE_MASK 0x1
#define NO_CNT_MASK 0x1
#define FRAC_EN_MASK 0x1
#define PHASE_MUX_MASK 0x7
/* Bit groups start position in DRP registers */
#define HIGH_TIME_POS 6
#define LOW_TIME_POS 0
#define PHASE_MUX_POS 13
#define FRAC_POS 12
#define FRAC_EN_POS 11
#define FRAC_WF_R_POS 10
#define EDGE_POS 7
#define NO_CNT_POS 6
#define EDGE_DIVREG_POS 13
#define NO_CNT_DIVREG_POS 12
#define DELAY_TIME_POS 0
/* MMCM Register addresses */
#define POWER_REG 0x28
#define DIV_REG 0x16
#define LOCK_REG1 0x18
#define LOCK_REG2 0x19
#define LOCK_REG3 0x1A
#define FILT_REG1 0x4E
#define FILT_REG2 0x4F
#define CLKOUT0_REG1 0x08
#define CLKOUT0_REG2 0x09
#define CLKOUT1_REG1 0x0A
#define CLKOUT1_REG2 0x0B
#define CLKOUT2_REG1 0x0C
#define CLKOUT2_REG2 0x0D
#define CLKOUT3_REG1 0x0E
#define CLKOUT3_REG2 0x0F
#define CLKOUT4_REG1 0x10
#define CLKOUT4_REG2 0x11
#define CLKOUT5_REG1 0x06
#define CLKOUT5_REG2 0x07
#define CLKOUT6_REG1 0x12
#define CLKOUT6_REG2 0x13
#define CLKFBOUT_REG1 0x14
#define CLKFBOUT_REG2 0x15
/* Basic structure for DRP registers */
struct litex_drp_reg {
uint32_t addr;
uint32_t size;
};
struct litex_clk_range {
uint32_t min;
uint32_t max;
};
struct clk_duty {
uint32_t num;
uint32_t den;
};
struct litex_clk_default {
struct clk_duty duty;
int phase;
uint32_t freq;
};
struct litex_clk_glob_params {
uint64_t freq;
uint32_t div;
uint32_t mul;
};
/* Divider configuration bits group */
struct litex_clk_div_params {
uint8_t high_time;
uint8_t low_time;
uint8_t no_cnt;
uint8_t edge;
};
/* Phase configuration bits group */
struct litex_clk_phase_params {
uint8_t phase_mux;
uint8_t delay_time;
uint8_t mx;
};
/* Fractional configuration bits group */
struct litex_clk_frac_params {
uint8_t frac_en;
uint8_t frac;
uint8_t phase_mux_f;
uint8_t frac_wf_r;
uint8_t frac_wf_f;
};
struct litex_clk_params {
struct clk_duty duty;
int phase;
uint32_t freq;
uint32_t period_off;
uint8_t div;
};
struct litex_clk_timeout {
uint32_t lock;
uint32_t drdy;
};
/* Basic structure for MMCM reg addresses */
struct litex_clk_clkout_addr {
uint8_t reg1;
uint8_t reg2;
};
/* Structure for all MMCM regs */
struct litex_clk_regs_addr {
struct litex_clk_clkout_addr clkout[CLKOUT_MAX];
};
struct litex_clk_clkout_margin {
uint32_t m; /* margin factor scaled to integer */
uint32_t exp;
};
struct litex_clk_device {
uint32_t *base;
/*struct clk_hw clk_hw;*/
struct litex_clk_clkout *clkouts; /* array of clock outputs */
struct litex_clk_timeout timeout; /* timeouts for wait functions*/
struct litex_clk_glob_params g_config; /* general MMCM settings */
struct litex_clk_glob_params ts_g_config;/* settings to set*/
struct litex_clk_range divclk; /* divclk_divide_range */
struct litex_clk_range clkfbout; /* clkfbout_mult_frange */
struct litex_clk_range vco; /* vco_freq_range */
uint8_t *update_clkout; /* which clkout needs update */
uint32_t vco_margin;
uint32_t nclkout;
};
struct litex_clk_clkout {
uint32_t *base;
struct litex_clk_device *ldev; /* global data */
struct litex_clk_default def; /* DTS defaults */
struct litex_clk_params config; /* real CLKOUT settings */
struct litex_clk_params ts_config; /* CLKOUT settings to set */
struct litex_clk_div_params div; /* CLKOUT configuration groups*/
struct litex_clk_phase_params phase;
struct litex_clk_frac_params frac;
struct litex_clk_range clkout_div; /* clkout_divide_range */
struct litex_clk_clkout_margin margin;
uint32_t id;
};
#endif /* LITEX_MMCM_H */
``` | /content/code_sandbox/drivers/clock_control/clock_control_litex.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,333 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/drivers/clock_control/clock_control_adsp.h>
#include <zephyr/drivers/clock_control.h>
static int cavs_clock_ctrl_set_rate(const struct device *clk,
clock_control_subsys_t sys,
clock_control_subsys_rate_t rate)
{
uint32_t freq_idx = (uint32_t)rate;
return adsp_clock_set_cpu_freq(freq_idx);
}
static int cavs_clock_ctrl_init(const struct device *dev)
{
/* Nothing to do. All initialisation should've been handled
* by SOC level driver.
*/
return 0;
}
static const struct clock_control_driver_api cavs_clock_api = {
.set_rate = cavs_clock_ctrl_set_rate
};
DEVICE_DT_DEFINE(DT_NODELABEL(clkctl), cavs_clock_ctrl_init, NULL,
NULL, NULL, POST_KERNEL,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, &cavs_clock_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_adsp.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 202 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <stm32_ll_system.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_mco.h"
/* Macros to fill up prescaler values */
#define z_hsi_divider(v) LL_RCC_HSI_DIV_ ## v
#define hsi_divider(v) z_hsi_divider(v)
#define z_ahb_prescaler(v) LL_RCC_SYSCLK_DIV_ ## v
#define ahb_prescaler(v) z_ahb_prescaler(v)
#define z_apb1_prescaler(v) LL_RCC_APB1_DIV_ ## v
#define apb1_prescaler(v) z_apb1_prescaler(v)
#define z_apb2_prescaler(v) LL_RCC_APB2_DIV_ ## v
#define apb2_prescaler(v) z_apb2_prescaler(v)
#define z_apb3_prescaler(v) LL_RCC_APB3_DIV_ ## v
#define apb3_prescaler(v) z_apb3_prescaler(v)
#define PLL1_ID 1
#define PLL2_ID 2
#define PLL3_ID 3
static uint32_t get_bus_clock(uint32_t clock, uint32_t prescaler)
{
return clock / prescaler;
}
__unused
/** @brief returns the pll source frequency of given pll_id */
static uint32_t get_pllsrc_frequency(size_t pll_id)
{
if ((IS_ENABLED(STM32_PLL_SRC_HSI) && pll_id == PLL1_ID) ||
(IS_ENABLED(STM32_PLL2_SRC_HSI) && pll_id == PLL2_ID) ||
(IS_ENABLED(STM32_PLL3_SRC_HSI) && pll_id == PLL3_ID)) {
return STM32_HSI_FREQ;
} else if ((IS_ENABLED(STM32_PLL_SRC_HSE) && pll_id == PLL1_ID) ||
(IS_ENABLED(STM32_PLL2_SRC_HSE) && pll_id == PLL2_ID) ||
(IS_ENABLED(STM32_PLL3_SRC_HSE) && pll_id == PLL3_ID)) {
return STM32_HSE_FREQ;
} else if ((IS_ENABLED(STM32_PLL_SRC_CSI) && pll_id == PLL1_ID) ||
(IS_ENABLED(STM32_PLL2_SRC_CSI) && pll_id == PLL2_ID) ||
(IS_ENABLED(STM32_PLL3_SRC_CSI) && pll_id == PLL3_ID)) {
return STM32_CSI_FREQ;
}
__ASSERT(0, "No PLL Source configured");
return 0;
}
static uint32_t get_startup_frequency(void)
{
switch (LL_RCC_GetSysClkSource()) {
case LL_RCC_SYS_CLKSOURCE_STATUS_CSI:
return STM32_CSI_FREQ;
case LL_RCC_SYS_CLKSOURCE_STATUS_HSI:
return STM32_HSI_FREQ;
case LL_RCC_SYS_CLKSOURCE_STATUS_HSE:
return STM32_HSE_FREQ;
case LL_RCC_SYS_CLKSOURCE_STATUS_PLL1:
return get_pllsrc_frequency(PLL1_ID);
default:
__ASSERT(0, "Unexpected startup freq");
return 0;
}
}
__unused
static uint32_t get_pllout_frequency(uint32_t pllsrc_freq,
int pllm_div,
int plln_mul,
int pllout_div)
{
__ASSERT_NO_MSG(pllm_div && pllout_div);
return (pllsrc_freq / pllm_div) * plln_mul / pllout_div;
}
static uint32_t get_sysclk_frequency(void)
{
#if defined(STM32_SYSCLK_SRC_PLL)
return get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
#elif defined(STM32_SYSCLK_SRC_CSI)
return STM32_CSI_FREQ;
#elif defined(STM32_SYSCLK_SRC_HSE)
return STM32_HSE_FREQ;
#elif defined(STM32_SYSCLK_SRC_HSI)
return STM32_HSI_FREQ;
#else
__ASSERT(0, "No SYSCLK Source configured");
return 0;
#endif
}
/** @brief Verifies clock is part of active clock configuration */
static int enabled_clock(uint32_t src_clk)
{
if ((src_clk == STM32_SRC_SYSCLK) ||
((src_clk == STM32_SRC_HSE) && IS_ENABLED(STM32_HSE_ENABLED)) ||
((src_clk == STM32_SRC_HSI) && IS_ENABLED(STM32_HSI_ENABLED)) ||
((src_clk == STM32_SRC_HSI48) && IS_ENABLED(STM32_HSI48_ENABLED)) ||
((src_clk == STM32_SRC_LSE) && IS_ENABLED(STM32_LSE_ENABLED)) ||
((src_clk == STM32_SRC_LSI) && IS_ENABLED(STM32_LSI_ENABLED)) ||
((src_clk == STM32_SRC_CSI) && IS_ENABLED(STM32_CSI_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_P) && IS_ENABLED(STM32_PLL_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_Q) && IS_ENABLED(STM32_PLL_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_R) && IS_ENABLED(STM32_PLL_R_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_P) && IS_ENABLED(STM32_PLL2_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_Q) && IS_ENABLED(STM32_PLL2_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_R) && IS_ENABLED(STM32_PLL2_R_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_P) && IS_ENABLED(STM32_PLL3_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_Q) && IS_ENABLED(STM32_PLL3_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_R) && IS_ENABLED(STM32_PLL3_R_ENABLED))) {
return 0;
}
return -ENOTSUP;
}
static inline int stm32_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
volatile int temp;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
/* Delay after enabling the clock, to allow it to become active */
temp = sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus);
UNUSED(temp);
return 0;
}
static inline int stm32_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
return 0;
}
static inline int stm32_clock_control_configure(const struct device *dev,
clock_control_subsys_t sub_system,
void *data)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
int err;
ARG_UNUSED(dev);
ARG_UNUSED(data);
err = enabled_clock(pclken->bus);
if (err < 0) {
/* Attempt to configure a src clock not available or not valid */
return err;
}
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_VAL_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
return 0;
}
static int stm32_clock_control_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sys);
/*
* Get AHB Clock (= SystemCoreClock = SYSCLK/prescaler)
* SystemCoreClock is preferred to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
* since it will be updated after clock configuration and hence
* more likely to contain actual clock speed
*/
uint32_t ahb_clock = SystemCoreClock;
uint32_t apb1_clock = get_bus_clock(ahb_clock, STM32_APB1_PRESCALER);
uint32_t apb2_clock = get_bus_clock(ahb_clock, STM32_APB2_PRESCALER);
uint32_t apb3_clock = get_bus_clock(ahb_clock, STM32_APB3_PRESCALER);
ARG_UNUSED(dev);
switch (pclken->bus) {
case STM32_CLOCK_BUS_AHB1:
case STM32_CLOCK_BUS_AHB2:
case STM32_CLOCK_BUS_AHB4:
*rate = ahb_clock;
break;
case STM32_CLOCK_BUS_APB1:
case STM32_CLOCK_BUS_APB1_2:
*rate = apb1_clock;
break;
case STM32_CLOCK_BUS_APB2:
*rate = apb2_clock;
break;
case STM32_CLOCK_BUS_APB3:
*rate = apb3_clock;
break;
case STM32_SRC_SYSCLK:
*rate = get_sysclk_frequency();
break;
#if defined(STM32_HSI_ENABLED)
case STM32_SRC_HSI:
*rate = STM32_HSI_FREQ;
break;
#endif /* STM32_HSI_ENABLED */
#if defined(STM32_CSI_ENABLED)
case STM32_SRC_CSI:
*rate = STM32_CSI_FREQ;
break;
#endif /* STM32_MSIS_ENABLED */
#if defined(STM32_HSE_ENABLED)
case STM32_SRC_HSE:
*rate = STM32_HSE_FREQ;
break;
#endif /* STM32_HSE_ENABLED */
#if defined(STM32_LSE_ENABLED)
case STM32_SRC_LSE:
*rate = STM32_LSE_FREQ;
break;
#endif /* STM32_LSE_ENABLED */
#if defined(STM32_LSI_ENABLED)
case STM32_SRC_LSI:
*rate = STM32_LSI_FREQ;
break;
#endif /* STM32_LSI_ENABLED */
#if defined(STM32_HSI48_ENABLED)
case STM32_SRC_HSI48:
*rate = STM32_HSI48_FREQ;
break;
#endif /* STM32_HSI48_ENABLED */
#if defined(STM32_PLL_ENABLED)
case STM32_SRC_PLL1_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_P_DIVISOR);
break;
case STM32_SRC_PLL1_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_Q_DIVISOR);
break;
case STM32_SRC_PLL1_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
break;
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLL2_ENABLED)
case STM32_SRC_PLL2_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL2_ID),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_P_DIVISOR);
break;
case STM32_SRC_PLL2_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL2_ID),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_Q_DIVISOR);
break;
case STM32_SRC_PLL2_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL2_ID),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_R_DIVISOR);
break;
#endif /* STM32_PLL2_ENABLED */
#if defined(STM32_PLL3_ENABLED)
case STM32_SRC_PLL3_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL3_ID),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_P_DIVISOR);
break;
case STM32_SRC_PLL3_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL3_ID),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_Q_DIVISOR);
break;
case STM32_SRC_PLL3_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL3_ID),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_R_DIVISOR);
break;
#endif /* STM32_PLL3_ENABLED */
default:
return -ENOTSUP;
}
return 0;
}
static const struct clock_control_driver_api stm32_clock_control_api = {
.on = stm32_clock_control_on,
.off = stm32_clock_control_off,
.get_rate = stm32_clock_control_get_subsys_rate,
.configure = stm32_clock_control_configure,
};
__unused
static int get_vco_input_range(uint32_t m_div, uint32_t *range, size_t pll_id)
{
uint32_t vco_freq;
vco_freq = get_pllsrc_frequency(pll_id) / m_div;
if (MHZ(1) <= vco_freq && vco_freq <= MHZ(2)) {
*range = LL_RCC_PLLINPUTRANGE_1_2;
} else if (MHZ(2) < vco_freq && vco_freq <= MHZ(4)) {
*range = LL_RCC_PLLINPUTRANGE_2_4;
} else if (MHZ(4) < vco_freq && vco_freq <= MHZ(8)) {
*range = LL_RCC_PLLINPUTRANGE_4_8;
} else if (MHZ(8) < vco_freq && vco_freq <= MHZ(16)) {
*range = LL_RCC_PLLINPUTRANGE_8_16;
} else {
return -ERANGE;
}
return 0;
}
__unused
static uint32_t get_vco_output_range(uint32_t vco_input_range)
{
if (vco_input_range == LL_RCC_PLLINPUTRANGE_1_2) {
return LL_RCC_PLLVCORANGE_MEDIUM;
}
return LL_RCC_PLLVCORANGE_WIDE;
}
static void set_regu_voltage(uint32_t hclk_freq)
{
if (hclk_freq <= MHZ(100)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE3);
} else if (hclk_freq <= MHZ(150)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE2);
} else if (hclk_freq <= MHZ(200)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE1);
} else {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE0);
}
while (LL_PWR_IsActiveFlag_VOS() == 0) {
}
}
__unused
static void clock_switch_to_hsi(void)
{
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
LL_RCC_SetAHBPrescaler(LL_RCC_SYSCLK_DIV_1);
}
__unused
static int set_up_plls(void)
{
#if defined(STM32_PLL_ENABLED) || defined(STM32_PLL2_ENABLED) || \
defined(STM32_PLL3_ENABLED)
int r;
uint32_t vco_input_range;
uint32_t vco_output_range;
#endif
#if defined(STM32_PLL_ENABLED)
/*
* Switch to HSI and disable the PLL before configuration.
* (Switching to HSI makes sure we have a SYSCLK source in
* case we're currently running from the PLL we're about to
* turn off and reconfigure.)
*/
if (LL_RCC_GetSysClkSource() == LL_RCC_SYS_CLKSOURCE_STATUS_PLL1) {
clock_switch_to_hsi();
}
LL_RCC_PLL1_Disable();
/* Configure PLL source : Can be HSE, HSI, MSIS */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetSource(LL_RCC_PLL1SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL_SRC_CSI)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetSource(LL_RCC_PLL1SOURCE_CSI);
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetSource(LL_RCC_PLL1SOURCE_HSI);
} else {
return -ENOTSUP;
}
r = get_vco_input_range(STM32_PLL_M_DIVISOR, &vco_input_range, PLL1_ID);
if (r < 0) {
return r;
}
vco_output_range = get_vco_output_range(vco_input_range);
LL_RCC_PLL1_SetM(STM32_PLL_M_DIVISOR);
/* Set VCO Input before enabling the PLL, depends on the freq of the PLL1 */
LL_RCC_PLL1_SetVCOInputRange(vco_input_range);
/* Select VCO freq range before enabling the PLL, depends on the freq of the PLL1 */
LL_RCC_PLL1_SetVCOOutputRange(vco_output_range);
LL_RCC_PLL1_SetN(STM32_PLL_N_MULTIPLIER);
LL_RCC_PLL1FRACN_Disable();
if (IS_ENABLED(STM32_PLL_P_ENABLED)) {
LL_RCC_PLL1_SetP(STM32_PLL_P_DIVISOR);
LL_RCC_PLL1P_Enable();
}
if (IS_ENABLED(STM32_PLL_Q_ENABLED)) {
LL_RCC_PLL1_SetQ(STM32_PLL_Q_DIVISOR);
LL_RCC_PLL1Q_Enable();
}
if (IS_ENABLED(STM32_PLL_R_ENABLED)) {
LL_RCC_PLL1_SetR(STM32_PLL_R_DIVISOR);
LL_RCC_PLL1R_Enable();
}
LL_RCC_PLL1_Enable();
while (LL_RCC_PLL1_IsReady() != 1U) {
}
#else
/* Init PLL source to None */
LL_RCC_PLL1_SetSource(LL_RCC_PLL1SOURCE_NONE);
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLL2_ENABLED)
/* Configure PLL2 source */
if (IS_ENABLED(STM32_PLL2_SRC_HSE)) {
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL2_SRC_CSI)) {
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_CSI);
} else if (IS_ENABLED(STM32_PLL2_SRC_HSI)) {
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_HSI);
} else {
return -ENOTSUP;
}
r = get_vco_input_range(STM32_PLL2_M_DIVISOR, &vco_input_range, PLL2_ID);
if (r < 0) {
return r;
}
vco_output_range = get_vco_output_range(vco_input_range);
LL_RCC_PLL2_SetM(STM32_PLL2_M_DIVISOR);
/* Set VCO Input before enabling the PLL, depends on the freq of the PLL2 */
LL_RCC_PLL2_SetVCOInputRange(vco_input_range);
/* Select VCO freq range before enabling the PLL, depends on the freq of the PLL2 */
LL_RCC_PLL2_SetVCOOutputRange(vco_output_range);
LL_RCC_PLL2_SetN(STM32_PLL2_N_MULTIPLIER);
LL_RCC_PLL2FRACN_Disable();
if (IS_ENABLED(STM32_PLL2_P_ENABLED)) {
LL_RCC_PLL2_SetP(STM32_PLL2_P_DIVISOR);
LL_RCC_PLL2P_Enable();
}
if (IS_ENABLED(STM32_PLL2_Q_ENABLED)) {
LL_RCC_PLL2_SetQ(STM32_PLL2_Q_DIVISOR);
LL_RCC_PLL2Q_Enable();
}
if (IS_ENABLED(STM32_PLL2_R_ENABLED)) {
LL_RCC_PLL2_SetR(STM32_PLL2_R_DIVISOR);
LL_RCC_PLL2R_Enable();
}
LL_RCC_PLL2_Enable();
while (LL_RCC_PLL2_IsReady() != 1U) {
}
#else
/* Init PLL2 source to None */
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_NONE);
#endif /* STM32_PLL2_ENABLED */
#if defined(RCC_CR_PLL3ON)
#if defined(STM32_PLL3_ENABLED)
/* Configure PLL3 source */
if (IS_ENABLED(STM32_PLL3_SRC_HSE)) {
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL3_SRC_CSI)) {
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_CSI);
} else if (IS_ENABLED(STM32_PLL3_SRC_HSI)) {
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_HSI);
} else {
return -ENOTSUP;
}
r = get_vco_input_range(STM32_PLL3_M_DIVISOR, &vco_input_range, PLL3_ID);
if (r < 0) {
return r;
}
vco_output_range = get_vco_output_range(vco_input_range);
LL_RCC_PLL3_SetM(STM32_PLL3_M_DIVISOR);
/* Set VCO Input before enabling the PLL, depends on the freq of the PLL3 */
LL_RCC_PLL3_SetVCOInputRange(vco_input_range);
/* Select VCO freq range before enabling the PLL, depends on the freq of the PLL3 */
LL_RCC_PLL3_SetVCOOutputRange(vco_output_range);
LL_RCC_PLL3_SetN(STM32_PLL3_N_MULTIPLIER);
LL_RCC_PLL3FRACN_Disable();
if (IS_ENABLED(STM32_PLL3_P_ENABLED)) {
LL_RCC_PLL3_SetP(STM32_PLL3_P_DIVISOR);
LL_RCC_PLL3P_Enable();
}
if (IS_ENABLED(STM32_PLL3_Q_ENABLED)) {
LL_RCC_PLL3_SetQ(STM32_PLL3_Q_DIVISOR);
LL_RCC_PLL3Q_Enable();
}
if (IS_ENABLED(STM32_PLL3_R_ENABLED)) {
LL_RCC_PLL3_SetR(STM32_PLL3_R_DIVISOR);
LL_RCC_PLL3R_Enable();
}
LL_RCC_PLL3_Enable();
while (LL_RCC_PLL3_IsReady() != 1U) {
}
#else
/* Init PLL3 source to None */
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_NONE);
#endif /* STM32_PLL3_ENABLED */
#endif /* (RCC_CR_PLL3ON) */
return 0;
}
static void set_up_fixed_clock_sources(void)
{
if (IS_ENABLED(STM32_HSE_ENABLED)) {
/* Check if need to enable HSE bypass feature or not */
if (IS_ENABLED(STM32_HSE_BYPASS)) {
LL_RCC_HSE_EnableBypass();
} else {
LL_RCC_HSE_DisableBypass();
}
/* Enable HSE */
LL_RCC_HSE_Enable();
while (LL_RCC_HSE_IsReady() != 1) {
/* Wait for HSE ready */
}
}
if (IS_ENABLED(STM32_HSI_ENABLED)) {
if (IS_ENABLED(STM32_PLL_SRC_HSI) ||
IS_ENABLED(STM32_PLL2_SRC_HSI) || IS_ENABLED(STM32_PLL3_SRC_HSI)) {
/* HSI calibration */
LL_RCC_HSI_SetCalibTrimming(RCC_HSICALIBRATION_DEFAULT);
}
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
/* HSI divider configuration */
LL_RCC_HSI_SetDivider(hsi_divider(STM32_HSI_DIVISOR));
}
if (IS_ENABLED(STM32_LSE_ENABLED)) {
if (!LL_PWR_IsEnabledBkUpAccess()) {
/* Enable write access to Backup domain */
LL_PWR_EnableBkUpAccess();
while (!LL_PWR_IsEnabledBkUpAccess()) {
/* Wait for Backup domain access */
}
}
/* Configure driving capability before enabling the LSE oscillator */
LL_RCC_LSE_SetDriveCapability(STM32_LSE_DRIVING << RCC_BDCR_LSEDRV_Pos);
if (IS_ENABLED(STM32_LSE_BYPASS)) {
/* Configure LSE bypass */
LL_RCC_LSE_EnableBypass();
}
/* Enable LSE Oscillator */
LL_RCC_LSE_Enable();
/* Wait for LSE ready */
while (!LL_RCC_LSE_IsReady()) {
}
LL_PWR_DisableBkUpAccess();
}
if (IS_ENABLED(STM32_CSI_ENABLED)) {
if (IS_ENABLED(STM32_PLL_SRC_CSI) ||
IS_ENABLED(STM32_PLL2_SRC_CSI) || IS_ENABLED(STM32_PLL3_SRC_CSI)) {
/* CSI calibration */
LL_RCC_CSI_SetCalibTrimming(RCC_CSICALIBRATION_DEFAULT);
}
/* Enable CSI */
LL_RCC_CSI_Enable();
/* Wait till CSI is ready */
while (LL_RCC_CSI_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_LSI_ENABLED)) {
/* Enable LSI oscillator */
LL_RCC_LSI_Enable();
while (LL_RCC_LSI_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_HSI48_ENABLED)) {
LL_RCC_HSI48_Enable();
while (LL_RCC_HSI48_IsReady() != 1) {
}
}
}
int stm32_clock_control_init(const struct device *dev)
{
uint32_t old_hclk_freq;
int r;
ARG_UNUSED(dev);
/* Current hclk value */
old_hclk_freq = __LL_RCC_CALC_HCLK_FREQ(get_startup_frequency(), LL_RCC_GetAHBPrescaler());
/* Set voltage regulator to comply with targeted system frequency */
set_regu_voltage(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
/* Set flash latency */
/* If freq increases, set flash latency before any clock setting */
if (old_hclk_freq < CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
LL_SetFlashLatency(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
}
/* Set up individual enabled clocks */
set_up_fixed_clock_sources();
/* Set up PLLs */
r = set_up_plls();
if (r < 0) {
return r;
}
/* Set peripheral buses prescalers */
LL_RCC_SetAHBPrescaler(ahb_prescaler(STM32_AHB_PRESCALER));
LL_RCC_SetAPB1Prescaler(apb1_prescaler(STM32_APB1_PRESCALER));
LL_RCC_SetAPB2Prescaler(apb2_prescaler(STM32_APB2_PRESCALER));
LL_RCC_SetAPB3Prescaler(apb3_prescaler(STM32_APB3_PRESCALER));
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
/* Set PLL1 as System Clock Source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_PLL1);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_PLL1) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSE)) {
/* Set HSE as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSE);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSE) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_CSI)) {
/* Set CSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_CSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_CSI) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSI)) {
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
} else {
return -ENOTSUP;
}
/* Set FLASH latency */
/* If freq not increased, set flash latency after all clock setting */
if (old_hclk_freq >= CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
LL_SetFlashLatency(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
}
/* Update CMSIS variable */
SystemCoreClock = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
/* configure MCO1/MCO2 based on Kconfig */
stm32_clock_control_mco_init();
return 0;
}
/**
* @brief RCC device, note that priority is intentionally set to 1 so
* that the device init runs just after SOC init
*/
DEVICE_DT_DEFINE(DT_NODELABEL(rcc),
stm32_clock_control_init,
NULL,
NULL, NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&stm32_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_h5.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,639 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_CLOCK_CONTROL_CLOCK_STM32_LL_MCO_H_
#define ZEPHYR_DRIVERS_CLOCK_CONTROL_CLOCK_STM32_LL_MCO_H_
#include <stm32_ll_utils.h>
#if CONFIG_CLOCK_STM32_MCO1_SRC_NOCLOCK
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_NOCLOCK
#elif CONFIG_CLOCK_STM32_MCO1_SRC_EXT_HSE
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_EXT_HSE
#elif CONFIG_CLOCK_STM32_MCO1_SRC_LSE
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_LSE
#elif CONFIG_CLOCK_STM32_MCO1_SRC_HSE
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_HSE
#elif CONFIG_CLOCK_STM32_MCO1_SRC_LSI
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_LSI
#elif CONFIG_CLOCK_STM32_MCO1_SRC_MSI
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_MSI
#elif CONFIG_CLOCK_STM32_MCO1_SRC_MSIK
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_MSIK
#elif CONFIG_CLOCK_STM32_MCO1_SRC_MSIS
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_MSIS
#elif CONFIG_CLOCK_STM32_MCO1_SRC_HSI
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_HSI
#elif CONFIG_CLOCK_STM32_MCO1_SRC_HSI16
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_HSI
#elif CONFIG_CLOCK_STM32_MCO1_SRC_HSI48
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_HSI48
#elif CONFIG_CLOCK_STM32_MCO1_SRC_PLLCLK
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLLCLK
#elif CONFIG_CLOCK_STM32_MCO1_SRC_PLLQCLK
#if (CONFIG_SOC_SERIES_STM32G0X || CONFIG_SOC_SERIES_STM32WLX)
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLLQCLK
#elif (CONFIG_SOC_SERIES_STM32H5X || \
CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H7RSX)
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLL1QCLK
#else
#error "PLLQCLK is not a valid clock source on your SOC"
#endif
#elif CONFIG_CLOCK_STM32_MCO1_SRC_PLLCLK_DIV2
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLLCLK_DIV_2
#elif CONFIG_CLOCK_STM32_MCO1_SRC_PLL2CLK
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLL2CLK
#elif CONFIG_CLOCK_STM32_MCO1_SRC_PLLI2SCLK
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLLI2SCLK
#elif CONFIG_CLOCK_STM32_MCO1_SRC_PLLI2SCLK_DIV2
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_PLLI2SCLK_DIV2
#elif CONFIG_CLOCK_STM32_MCO1_SRC_SYSCLK
#define MCO1_SOURCE LL_RCC_MCO1SOURCE_SYSCLK
#endif
#if CONFIG_CLOCK_STM32_MCO2_SRC_SYSCLK
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_SYSCLK
#elif CONFIG_CLOCK_STM32_MCO2_SRC_PLLI2S
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_PLLI2S
#elif CONFIG_CLOCK_STM32_MCO2_SRC_HSE
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_HSE
#elif CONFIG_CLOCK_STM32_MCO2_SRC_LSI
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_LSI
#elif CONFIG_CLOCK_STM32_MCO2_SRC_CSI
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_CSI
#elif CONFIG_CLOCK_STM32_MCO2_SRC_PLLCLK
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_PLLCLK
#elif CONFIG_CLOCK_STM32_MCO2_SRC_PLLPCLK
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_PLL1PCLK
#elif CONFIG_CLOCK_STM32_MCO2_SRC_PLL2PCLK
#define MCO2_SOURCE LL_RCC_MCO2SOURCE_PLL2PCLK
#endif
#define fn_mco1_prescaler(v) LL_RCC_MCO1_DIV_ ## v
#define mco1_prescaler(v) fn_mco1_prescaler(v)
#define fn_mco2_prescaler(v) LL_RCC_MCO2_DIV_ ## v
#define mco2_prescaler(v) fn_mco2_prescaler(v)
#ifdef __cplusplus
extern "C" {
#endif
/*
* MCO configure doesn't active requested clock source,
* so please make sure the clock source was enabled.
*/
__unused
static inline void stm32_clock_control_mco_init(void)
{
#ifndef CONFIG_CLOCK_STM32_MCO1_SRC_NOCLOCK
#ifdef CONFIG_SOC_SERIES_STM32F1X
LL_RCC_ConfigMCO(MCO1_SOURCE);
#else
LL_RCC_ConfigMCO(MCO1_SOURCE,
mco1_prescaler(CONFIG_CLOCK_STM32_MCO1_DIV));
#endif
#endif /* CONFIG_CLOCK_STM32_MCO1_SRC_NOCLOCK */
#ifndef CONFIG_CLOCK_STM32_MCO2_SRC_NOCLOCK
LL_RCC_ConfigMCO(MCO2_SOURCE,
mco2_prescaler(CONFIG_CLOCK_STM32_MCO2_DIV));
#endif /* CONFIG_CLOCK_STM32_MCO2_SRC_NOCLOCK */
}
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_CLOCK_CONTROL_CLOCK_STM32_LL_MCO_H_ */
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_mco.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,240 |
```unknown
config CLOCK_CONTROL_NXP_S32
bool "NXP S32 clock control driver"
default y
depends on DT_HAS_NXP_S32_CLOCK_ENABLED
help
Enable support for NXP S32 clock control driver.
if CLOCK_CONTROL_NXP_S32
config CLOCK_CONTROL_NXP_S32_CLOCK_CONFIG_IDX
int
default 0
help
This option specifies the zero-based index of the clock configuration
used to initialize the SoC clocks.
endif # CLOCK_CONTROL_NXP_S32
``` | /content/code_sandbox/drivers/clock_control/Kconfig.nxp_s32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 107 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_common.h"
#if defined(STM32_PLL_ENABLED)
/* Macros to fill up multiplication and division factors values */
#define z_pll_mul(v) LL_RCC_PLL_MUL_ ## v
#define pll_mul(v) z_pll_mul(v)
#define z_pll_div(v) LL_RCC_PLL_DIV_ ## v
#define pll_div(v) z_pll_div(v)
/**
* @brief Return PLL source
*/
__unused
static uint32_t get_pll_source(void)
{
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return LL_RCC_PLLSOURCE_HSI;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return LL_RCC_PLLSOURCE_HSE;
}
__ASSERT(0, "Invalid source");
return 0;
}
/**
* @brief get the pll source frequency
*/
__unused
uint32_t get_pllsrc_frequency(void)
{
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return STM32_HSI_FREQ;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return STM32_HSE_FREQ;
}
__ASSERT(0, "Invalid source");
return 0;
}
/**
* @brief Set up pll configuration
*/
__unused
void config_pll_sysclock(void)
{
LL_RCC_PLL_ConfigDomain_SYS(get_pll_source(),
pll_mul(STM32_PLL_MULTIPLIER),
pll_div(STM32_PLL_DIVISOR));
}
/**
* @brief Return pllout frequency
*/
__unused
uint32_t get_pllout_frequency(void)
{
return __LL_RCC_CALC_PLLCLK_FREQ(get_pllsrc_frequency(),
pll_mul(STM32_PLL_MULTIPLIER),
pll_div(STM32_PLL_DIVISOR));
}
#endif /* defined(STM32_PLL_ENABLED) */
/**
* @brief Set up voltage regulator voltage
*/
void config_regulator_voltage(uint32_t hclk_freq)
{
if (hclk_freq <= MHZ(4.2)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE3);
} else if (hclk_freq <= MHZ(16)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE2);
} else {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE1);
}
while (LL_PWR_IsActiveFlag_VOS() == 1) {
}
}
/**
* @brief Activate default clocks
*/
void config_enable_default_clocks(void)
{
#if defined(CONFIG_EXTI_STM32) || defined(CONFIG_USB_DC_STM32) || \
(defined(CONFIG_SOC_SERIES_STM32L0X) && \
defined(CONFIG_ENTROPY_STM32_RNG))
/* Enable System Configuration Controller clock. */
LL_APB2_GRP1_EnableClock(LL_APB2_GRP1_PERIPH_SYSCFG);
#endif
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
}
``` | /content/code_sandbox/drivers/clock_control/clock_stm32l0_l1.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 697 |
```unknown
config CLOCK_CONTROL_MAX32
bool "MAX32 Clock Control Driver"
default y
depends on DT_HAS_ADI_MAX32_GCR_ENABLED
help
Enable clock control support for Analog Devices MAX32xxx/MAX78xxx SoC series.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.max32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 53 |
```c
/*
*
*/
#define DT_DRV_COMPAT espressif_esp32_rtc
#define CPU_RESET_REASON RTC_SW_CPU_RESET
#if defined(CONFIG_SOC_SERIES_ESP32)
#define DT_CPU_COMPAT espressif_xtensa_lx6
#undef CPU_RESET_REASON
#define CPU_RESET_REASON SW_CPU_RESET
#include <zephyr/dt-bindings/clock/esp32_clock.h>
#include <esp32/rom/rtc.h>
#include <soc/dport_reg.h>
#include <soc/i2s_reg.h>
#elif defined(CONFIG_SOC_SERIES_ESP32S2)
#define DT_CPU_COMPAT espressif_xtensa_lx7
#include <zephyr/dt-bindings/clock/esp32s2_clock.h>
#include <esp32s2/rom/rtc.h>
#include <soc/dport_reg.h>
#include <soc/i2s_reg.h>
#elif defined(CONFIG_SOC_SERIES_ESP32S3)
#define DT_CPU_COMPAT espressif_xtensa_lx7
#include <zephyr/dt-bindings/clock/esp32s3_clock.h>
#include <esp32s3/rom/rtc.h>
#include <soc/dport_reg.h>
#elif defined(CONFIG_SOC_SERIES_ESP32C2)
#define DT_CPU_COMPAT espressif_riscv
#include <zephyr/dt-bindings/clock/esp32c2_clock.h>
#include <esp32c2/rom/rtc.h>
#elif defined(CONFIG_SOC_SERIES_ESP32C3)
#define DT_CPU_COMPAT espressif_riscv
#include <zephyr/dt-bindings/clock/esp32c3_clock.h>
#include <esp32c3/rom/rtc.h>
#elif defined(CONFIG_SOC_SERIES_ESP32C6)
#define DT_CPU_COMPAT espressif_riscv
#include <zephyr/dt-bindings/clock/esp32c6_clock.h>
#include <soc/lp_clkrst_reg.h>
#include <soc/regi2c_dig_reg.h>
#include <regi2c_ctrl.h>
#include <esp32c6/rom/rtc.h>
#include <soc/dport_access.h>
#include <hal/clk_tree_ll.h>
#include <hal/usb_serial_jtag_ll.h>
#include <esp_private/esp_pmu.h>
#include <ocode_init.h>
#endif
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/esp32_clock_control.h>
#include <esp_rom_caps.h>
#include <esp_rom_sys.h>
#include <esp_rom_uart.h>
#include <soc/periph_defs.h>
#include <soc/rtc.h>
#include <hal/clk_gate_ll.h>
#include <esp_private/periph_ctrl.h>
#include <esp_private/esp_clk.h>
#include <esp_cpu.h>
#include <hal/regi2c_ctrl_ll.h>
#include <hal/clk_tree_hal.h>
#include <esp_private/esp_clk_tree_common.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
static bool reset_reason_is_cpu_reset(void)
{
soc_reset_reason_t rst_reason = esp_rom_get_reset_reason(0);
if ((rst_reason == RESET_REASON_CPU0_MWDT0 || rst_reason == RESET_REASON_CPU0_SW ||
rst_reason == RESET_REASON_CPU0_RTC_WDT
#if !defined(CONFIG_SOC_SERIES_ESP32) && !defined(CONFIG_SOC_SERIES_ESP32C2)
|| rst_reason == RESET_REASON_CPU0_MWDT1
#endif
)) {
return true;
}
return false;
}
#if defined(CONFIG_SOC_SERIES_ESP32C6)
static void esp32_clock_perip_init(void)
{
soc_reset_reason_t rst_reason = esp_rom_get_reset_reason(0);
if ((rst_reason != RESET_REASON_CPU0_MWDT0) && (rst_reason != RESET_REASON_CPU0_MWDT1) &&
(rst_reason != RESET_REASON_CPU0_SW) && (rst_reason != RESET_REASON_CPU0_RTC_WDT)) {
periph_ll_disable_clk_set_rst(PERIPH_UART1_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_I2C0_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_RMT_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_LEDC_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_TIMG1_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_TWAI0_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_TWAI1_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_I2S1_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_PCNT_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_ETM_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_MCPWM0_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_PARLIO_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_GDMA_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_SPI2_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_TEMPSENSOR_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_UHCI0_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_SARADC_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_SDIO_SLAVE_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_RSA_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_AES_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_SHA_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_ECC_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_HMAC_MODULE);
periph_ll_disable_clk_set_rst(PERIPH_DS_MODULE);
REG_CLR_BIT(PCR_CTRL_TICK_CONF_REG, PCR_TICK_ENABLE);
REG_CLR_BIT(PCR_TRACE_CONF_REG, PCR_TRACE_CLK_EN);
REG_CLR_BIT(PCR_RETENTION_CONF_REG, PCR_RETENTION_CLK_EN);
REG_CLR_BIT(PCR_MEM_MONITOR_CONF_REG, PCR_MEM_MONITOR_CLK_EN);
REG_CLR_BIT(PCR_PVT_MONITOR_CONF_REG, PCR_PVT_MONITOR_CLK_EN);
REG_CLR_BIT(PCR_PVT_MONITOR_FUNC_CLK_CONF_REG, PCR_PVT_MONITOR_FUNC_CLK_EN);
WRITE_PERI_REG(PCR_CTRL_CLK_OUT_EN_REG, 0);
usb_serial_jtag_ll_enable_bus_clock(false);
}
if ((rst_reason == RESET_REASON_CHIP_POWER_ON) ||
(rst_reason == RESET_REASON_CHIP_BROWN_OUT) ||
(rst_reason == RESET_REASON_SYS_RTC_WDT) ||
(rst_reason == RESET_REASON_SYS_SUPER_WDT)) {
periph_ll_disable_clk_set_rst(PERIPH_LP_I2C0_MODULE);
CLEAR_PERI_REG_MASK(LPPERI_CLK_EN_REG, LPPERI_RNG_CK_EN);
CLEAR_PERI_REG_MASK(LPPERI_CLK_EN_REG, LPPERI_LP_UART_CK_EN);
CLEAR_PERI_REG_MASK(LPPERI_CLK_EN_REG, LPPERI_OTP_DBG_CK_EN);
CLEAR_PERI_REG_MASK(LPPERI_CLK_EN_REG, LPPERI_LP_EXT_I2C_CK_EN);
CLEAR_PERI_REG_MASK(LPPERI_CLK_EN_REG, LPPERI_LP_CPU_CK_EN);
WRITE_PERI_REG(LP_CLKRST_LP_CLK_PO_EN_REG, 0);
}
}
#else
static void esp32_clock_perip_init(void)
{
uint32_t common_perip_clk;
uint32_t hwcrypto_perip_clk;
uint32_t wifi_bt_sdio_clk;
#if !defined(CONFIG_SOC_SERIES_ESP32)
uint32_t common_perip_clk1;
#endif
/* For reason that only reset CPU, do not disable the clocks
* that have been enabled before reset.
*/
if (reset_reason_is_cpu_reset()) {
#if defined(CONFIG_SOC_SERIES_ESP32C2) || \
defined(CONFIG_SOC_SERIES_ESP32C3) || \
defined(CONFIG_SOC_SERIES_ESP32S3)
common_perip_clk = ~READ_PERI_REG(SYSTEM_PERIP_CLK_EN0_REG);
hwcrypto_perip_clk = ~READ_PERI_REG(SYSTEM_PERIP_CLK_EN1_REG);
wifi_bt_sdio_clk = ~READ_PERI_REG(SYSTEM_WIFI_CLK_EN_REG);
#else /* CONFIG_SOC_SERIES_ESP32 || CONFIG_SOC_SERIES_ESP32S2 */
common_perip_clk = ~DPORT_READ_PERI_REG(DPORT_PERIP_CLK_EN_REG);
hwcrypto_perip_clk = ~DPORT_READ_PERI_REG(DPORT_PERI_CLK_EN_REG);
wifi_bt_sdio_clk = ~DPORT_READ_PERI_REG(DPORT_WIFI_CLK_EN_REG);
#endif
#if defined(CONFIG_SOC_SERIES_ESP32S2)
hwcrypto_perip_clk = ~DPORT_READ_PERI_REG(DPORT_PERIP_CLK_EN1_REG);
#endif
} else {
common_perip_clk =
#if defined(CONFIG_SOC_SERIES_ESP32C2)
SYSTEM_SPI2_CLK_EN |
#if ESP_CONSOLE_UART_NUM != 0
SYSTEM_UART_CLK_EN |
#endif
#if ESP_CONSOLE_UART_NUM != 1
SYSTEM_UART1_CLK_EN |
#endif
SYSTEM_LEDC_CLK_EN |
SYSTEM_I2C_EXT0_CLK_EN |
SYSTEM_LEDC_CLK_EN;
#elif (defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32S3))
SYSTEM_WDG_CLK_EN |
SYSTEM_I2S0_CLK_EN |
#if ESP_CONSOLE_UART_NUM != 0
SYSTEM_UART_CLK_EN |
#endif
#if ESP_CONSOLE_UART_NUM != 1
SYSTEM_UART1_CLK_EN |
#endif
#if defined(CONFIG_SOC_SERIES_ESP32S3)
#if ESP_CONSOLE_UART_NUM != 2
SYSTEM_UART2_CLK_EN |
#endif
SYSTEM_USB_CLK_EN |
SYSTEM_PCNT_CLK_EN |
SYSTEM_LEDC_CLK_EN |
SYSTEM_PWM0_CLK_EN |
SYSTEM_PWM1_CLK_EN |
SYSTEM_PWM2_CLK_EN |
SYSTEM_PWM3_CLK_EN |
#endif /* CONFIG_SOC_SERIES_ESP32S3 */
SYSTEM_SPI2_CLK_EN |
SYSTEM_I2C_EXT0_CLK_EN |
SYSTEM_UHCI0_CLK_EN |
SYSTEM_RMT_CLK_EN |
SYSTEM_LEDC_CLK_EN |
SYSTEM_TIMERGROUP1_CLK_EN |
SYSTEM_SPI3_CLK_EN |
SYSTEM_SPI4_CLK_EN |
SYSTEM_TWAI_CLK_EN |
SYSTEM_I2S1_CLK_EN |
SYSTEM_SPI2_DMA_CLK_EN |
SYSTEM_SPI3_DMA_CLK_EN;
#else /* CONFIG_SOC_SERIES_ESP32 || CONFIG_SOC_SERIES_ESP32S2 */
DPORT_WDG_CLK_EN |
DPORT_PCNT_CLK_EN |
DPORT_LEDC_CLK_EN |
DPORT_TIMERGROUP1_CLK_EN |
DPORT_PWM0_CLK_EN |
DPORT_TWAI_CLK_EN |
DPORT_PWM1_CLK_EN |
DPORT_PWM2_CLK_EN |
#if defined(CONFIG_SOC_SERIES_ESP32S2)
DPORT_I2S0_CLK_EN |
DPORT_SPI2_CLK_EN |
DPORT_I2C_EXT0_CLK_EN |
DPORT_UHCI0_CLK_EN |
DPORT_RMT_CLK_EN |
DPORT_SPI3_CLK_EN |
DPORT_PWM0_CLK_EN |
DPORT_TWAI_CLK_EN |
DPORT_I2S1_CLK_EN |
DPORT_SPI2_DMA_CLK_EN |
DPORT_SPI3_DMA_CLK_EN |
#endif /* CONFIG_SOC_SERIES_ESP32S2 */
DPORT_PWM3_CLK_EN;
#endif
#if !defined(CONFIG_SOC_SERIES_ESP32)
common_perip_clk1 = 0;
#endif
hwcrypto_perip_clk =
#if defined(CONFIG_SOC_SERIES_ESP32)
DPORT_PERI_EN_AES |
DPORT_PERI_EN_SHA |
DPORT_PERI_EN_RSA |
DPORT_PERI_EN_SECUREBOOT;
#endif /* CONFIG_SOC_SERIES_ESP32 */
#if defined(CONFIG_SOC_SERIES_ESP32S2)
DPORT_CRYPTO_AES_CLK_EN |
DPORT_CRYPTO_SHA_CLK_EN |
DPORT_CRYPTO_RSA_CLK_EN;
#endif /* CONFIG_SOC_SERIES_ESP32S2 */
#if defined(CONFIG_SOC_SERIES_ESP32C2)
SYSTEM_CRYPTO_SHA_CLK_EN;
#endif
#if (defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32S3))
SYSTEM_CRYPTO_AES_CLK_EN |
SYSTEM_CRYPTO_SHA_CLK_EN |
SYSTEM_CRYPTO_RSA_CLK_EN;
#endif /* CONFIG_SOC_SERIES_ESP32C3 || CONFIG_SOC_SERIES_ESP32S3 */
wifi_bt_sdio_clk =
#if defined(CONFIG_SOC_SERIES_ESP32C2)
SYSTEM_WIFI_CLK_WIFI_EN |
SYSTEM_WIFI_CLK_BT_EN_M |
SYSTEM_WIFI_CLK_UNUSED_BIT5 |
SYSTEM_WIFI_CLK_UNUSED_BIT12;
#elif (defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32S3))
SYSTEM_WIFI_CLK_WIFI_EN |
SYSTEM_WIFI_CLK_BT_EN_M |
SYSTEM_WIFI_CLK_I2C_CLK_EN |
#if defined(CONFIG_SOC_SERIES_ESP32S3)
SYSTEM_WIFI_CLK_SDIO_HOST_EN |
#endif /* CONFIG_SOC_SERIES_ESP32S3 */
SYSTEM_WIFI_CLK_UNUSED_BIT12;
#else /* CONFIG_SOC_SERIES_ESP32 || CONFIG_SOC_SERIES_ESP32S2 */
DPORT_WIFI_CLK_WIFI_EN |
DPORT_WIFI_CLK_BT_EN_M |
DPORT_WIFI_CLK_UNUSED_BIT5 |
DPORT_WIFI_CLK_UNUSED_BIT12 |
DPORT_WIFI_CLK_SDIOSLAVE_EN |
DPORT_WIFI_CLK_SDIO_HOST_EN |
DPORT_WIFI_CLK_EMAC_EN;
#endif /* CONFIG_SOC_SERIES_ESP32C3 */
}
/* Reset peripherals like I2C, SPI, UART, I2S and bring them to known state */
common_perip_clk |=
#if defined(CONFIG_SOC_SERIES_ESP32C2)
SYSTEM_SPI2_CLK_EN |
#if ESP_CONSOLE_UART_NUM != 0
SYSTEM_UART_CLK_EN |
#endif
#if ESP_CONSOLE_UART_NUM != 1
SYSTEM_UART1_CLK_EN |
#endif
SYSTEM_I2C_EXT0_CLK_EN;
#elif (defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32S3))
SYSTEM_I2S0_CLK_EN |
#if ESP_CONSOLE_UART_NUM != 0
SYSTEM_UART_CLK_EN |
#endif
#if ESP_CONSOLE_UART_NUM != 1
SYSTEM_UART1_CLK_EN |
#endif
#if defined(CONFIG_SOC_SERIES_ESP32S3)
#if ESP_CONSOLE_UART_NUM != 2
SYSTEM_UART2_CLK_EN |
#endif
SYSTEM_USB_CLK_EN |
#endif
SYSTEM_SPI2_CLK_EN |
SYSTEM_I2C_EXT0_CLK_EN |
SYSTEM_UHCI0_CLK_EN |
SYSTEM_RMT_CLK_EN |
SYSTEM_UHCI1_CLK_EN |
SYSTEM_SPI3_CLK_EN |
SYSTEM_SPI4_CLK_EN |
SYSTEM_I2C_EXT1_CLK_EN |
SYSTEM_I2S1_CLK_EN |
SYSTEM_SPI2_DMA_CLK_EN |
SYSTEM_SPI3_DMA_CLK_EN;
#else
DPORT_I2S0_CLK_EN |
DPORT_SPI2_CLK_EN |
DPORT_I2C_EXT0_CLK_EN |
DPORT_UHCI0_CLK_EN |
DPORT_RMT_CLK_EN |
DPORT_UHCI1_CLK_EN |
DPORT_SPI3_CLK_EN |
DPORT_I2C_EXT1_CLK_EN |
#if ESP_CONSOLE_UART_NUM != 0
DPORT_UART_CLK_EN |
#endif
#if ESP_CONSOLE_UART_NUM != 1
DPORT_UART1_CLK_EN |
#endif
#if defined(CONFIG_SOC_SERIES_ESP32)
DPORT_SPI_DMA_CLK_EN |
#if ESP_CONSOLE_UART_NUM != 2
DPORT_UART2_CLK_EN |
#endif
#endif /* CONFIG_SOC_SERIES_ESP32 */
#if defined(CONFIG_SOC_SERIES_ESP32S2)
DPORT_USB_CLK_EN |
DPORT_SPI2_DMA_CLK_EN |
DPORT_SPI3_DMA_CLK_EN |
#endif /* CONFIG_SOC_SERIES_ESP32S2 */
DPORT_I2S1_CLK_EN;
#endif /* CONFIG_SOC_SERIES_ESP32C3 */
#if !defined(CONFIG_SOC_SERIES_ESP32)
common_perip_clk1 = 0;
#endif
#if defined(CONFIG_SOC_SERIES_ESP32)
common_perip_clk &= ~DPORT_SPI01_CLK_EN;
#if defined(CONFIG_SPIRAM_SPEED_80M)
/*
* 80MHz SPIRAM uses SPI2/SPI3 as well; it's initialized before this is called. Because it
* is used in a weird mode where clock to the peripheral is disabled but reset is also
* disabled, it 'hangs' in a state where it outputs a continuous 80MHz signal. Mask its bit
* here because we should not modify that state, regardless of what we calculated earlier.
*/
common_perip_clk &= ~DPORT_SPI2_CLK_EN;
common_perip_clk &= ~DPORT_SPI3_CLK_EN;
#endif
#endif /* CONFIG_SOC_SERIES_ESP32 */
/* Change I2S clock to audio PLL first. Because if I2S uses 160MHz clock,
* the current is not reduced when disable I2S clock.
*/
#if defined(CONFIG_SOC_SERIES_ESP32)
DPORT_SET_PERI_REG_MASK(I2S_CLKM_CONF_REG(0), I2S_CLKA_ENA);
DPORT_SET_PERI_REG_MASK(I2S_CLKM_CONF_REG(1), I2S_CLKA_ENA);
#endif /* CONFIG_SOC_SERIES_ESP32 */
#if defined(CONFIG_SOC_SERIES_ESP32S2)
REG_SET_FIELD(I2S_CLKM_CONF_REG(0), I2S_CLK_SEL, I2S_CLK_AUDIO_PLL);
REG_SET_FIELD(I2S_CLKM_CONF_REG(1), I2S_CLK_SEL, I2S_CLK_AUDIO_PLL);
#endif /* CONFIG_SOC_SERIES_ESP32S2 */
/* Disable some peripheral clocks. */
#if defined(CONFIG_SOC_SERIES_ESP32C2) || \
defined(CONFIG_SOC_SERIES_ESP32C3) || \
defined(CONFIG_SOC_SERIES_ESP32S3)
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN0_REG, common_perip_clk);
SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN0_REG, common_perip_clk);
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN1_REG, common_perip_clk1);
SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, common_perip_clk1);
#else /* CONFIG_SOC_SERIES_ESP32 || CONFIG_SOC_SERIES_ESP32S2 */
DPORT_CLEAR_PERI_REG_MASK(DPORT_PERIP_CLK_EN_REG, common_perip_clk);
DPORT_SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, common_perip_clk);
#endif /* CONFIG_SOC_SERIES_ESP32C3 || CONFIG_SOC_SERIES_ESP32S3 */
#if defined(CONFIG_SOC_SERIES_ESP32S2)
DPORT_CLEAR_PERI_REG_MASK(DPORT_PERIP_CLK_EN1_REG, common_perip_clk1);
DPORT_SET_PERI_REG_MASK(DPORT_PERIP_RST_EN1_REG, common_perip_clk1);
#endif
/* Disable hardware crypto clocks. */
#if defined(CONFIG_SOC_SERIES_ESP32C2) || \
defined(CONFIG_SOC_SERIES_ESP32C3) || \
defined(CONFIG_SOC_SERIES_ESP32S3)
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN1_REG, hwcrypto_perip_clk);
SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, hwcrypto_perip_clk);
#elif defined(CONFIG_SOC_SERIES_ESP32)
DPORT_CLEAR_PERI_REG_MASK(DPORT_PERI_CLK_EN_REG, hwcrypto_perip_clk);
DPORT_SET_PERI_REG_MASK(DPORT_PERI_RST_EN_REG, hwcrypto_perip_clk);
#elif defined(CONFIG_SOC_SERIES_ESP32S2)
DPORT_CLEAR_PERI_REG_MASK(DPORT_PERIP_CLK_EN1_REG, hwcrypto_perip_clk);
DPORT_SET_PERI_REG_MASK(DPORT_PERIP_RST_EN1_REG, hwcrypto_perip_clk);
#endif /* CONFIG_SOC_SERIES_ESP32C3 || CONFIG_SOC_SERIES_ESP32S3 */
#if defined(CONFIG_SOC_SERIES_ESP32S3)
/* Force clear backup dma reset signal. This is a fix to the backup dma
* implementation in the ROM, the reset signal was not cleared when the
* backup dma was started, which caused the backup dma operation to fail.
*/
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, SYSTEM_PERI_BACKUP_RST);
#endif /* CONFIG_SOC_SERIES_ESP32S3 */
/* Disable WiFi/BT/SDIO clocks. */
#if defined(CONFIG_SOC_SERIES_ESP32C2) || \
defined(CONFIG_SOC_SERIES_ESP32C3) || \
defined(CONFIG_SOC_SERIES_ESP32S3)
CLEAR_PERI_REG_MASK(SYSTEM_WIFI_CLK_EN_REG, wifi_bt_sdio_clk);
SET_PERI_REG_MASK(SYSTEM_WIFI_CLK_EN_REG, SYSTEM_WIFI_CLK_EN);
#else /* CONFIG_SOC_SERIES_ESP32 || CONFIG_SOC_SERIES_ESP32S2 */
DPORT_CLEAR_PERI_REG_MASK(DPORT_WIFI_CLK_EN_REG, wifi_bt_sdio_clk);
#endif /* CONFIG_SOC_SERIES_ESP32C3 || CONFIG_SOC_SERIES_ESP32S3 */
#if defined(CONFIG_SOC_SERIES_ESP32S2)
/* Enable WiFi MAC and POWER clocks */
DPORT_SET_PERI_REG_MASK(DPORT_WIFI_CLK_EN_REG, DPORT_WIFI_CLK_WIFI_EN);
#endif
#if defined(CONFIG_SOC_SERIES_ESP32C2) || \
defined(CONFIG_SOC_SERIES_ESP32C3) || \
defined(CONFIG_SOC_SERIES_ESP32S3)
/* Set WiFi light sleep clock source to RTC slow clock */
REG_SET_FIELD(SYSTEM_BT_LPCK_DIV_INT_REG, SYSTEM_BT_LPCK_DIV_NUM, 0);
CLEAR_PERI_REG_MASK(SYSTEM_BT_LPCK_DIV_FRAC_REG, SYSTEM_LPCLK_SEL_8M);
SET_PERI_REG_MASK(SYSTEM_BT_LPCK_DIV_FRAC_REG, SYSTEM_LPCLK_SEL_RTC_SLOW);
#elif defined(CONFIG_SOC_SERIES_ESP32S2)
/* Set WiFi light sleep clock source to RTC slow clock */
DPORT_REG_SET_FIELD(DPORT_BT_LPCK_DIV_INT_REG, DPORT_BT_LPCK_DIV_NUM, 0);
DPORT_CLEAR_PERI_REG_MASK(DPORT_BT_LPCK_DIV_FRAC_REG, DPORT_LPCLK_SEL_8M);
DPORT_SET_PERI_REG_MASK(DPORT_BT_LPCK_DIV_FRAC_REG, DPORT_LPCLK_SEL_RTC_SLOW);
#endif
/* Enable RNG clock. */
periph_module_enable(PERIPH_RNG_MODULE);
#if defined(CONFIG_SOC_SERIES_ESP32C2) || \
defined(CONFIG_SOC_SERIES_ESP32C3) || \
defined(CONFIG_SOC_SERIES_ESP32S3)
periph_module_enable(PERIPH_TIMG0_MODULE);
#endif
}
#endif
static enum clock_control_status clock_control_esp32_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
uint32_t clk_en_reg = periph_ll_get_clk_en_reg((periph_module_t)sys);
uint32_t clk_en_mask = periph_ll_get_clk_en_mask((periph_module_t)sys);
if (DPORT_GET_PERI_REG_MASK(clk_en_reg, clk_en_mask)) {
return CLOCK_CONTROL_STATUS_ON;
}
return CLOCK_CONTROL_STATUS_OFF;
}
static int clock_control_esp32_on(const struct device *dev, clock_control_subsys_t sys)
{
enum clock_control_status status = clock_control_esp32_get_status(dev, sys);
if (status == CLOCK_CONTROL_STATUS_ON && !reset_reason_is_cpu_reset()) {
return -EALREADY;
}
periph_module_enable((periph_module_t)sys);
return 0;
}
static int clock_control_esp32_off(const struct device *dev, clock_control_subsys_t sys)
{
enum clock_control_status status = clock_control_esp32_get_status(dev, sys);
if (status == CLOCK_CONTROL_STATUS_ON) {
periph_module_disable((periph_module_t)sys);
}
return 0;
}
static int clock_control_esp32_get_rate(const struct device *dev, clock_control_subsys_t sys,
uint32_t *rate)
{
ARG_UNUSED(dev);
switch ((int)sys) {
case ESP32_CLOCK_CONTROL_SUBSYS_RTC_FAST:
*rate = esp_clk_tree_lp_fast_get_freq_hz(ESP_CLK_TREE_SRC_FREQ_PRECISION_APPROX);
break;
case ESP32_CLOCK_CONTROL_SUBSYS_RTC_SLOW:
*rate = clk_hal_lp_slow_get_freq_hz();
break;
default:
*rate = clk_hal_cpu_get_freq_hz();
}
return 0;
}
static int esp32_select_rtc_slow_clk(uint8_t slow_clk)
{
#if !defined(CONFIG_SOC_SERIES_ESP32C6)
soc_rtc_slow_clk_src_t rtc_slow_clk_src = slow_clk & RTC_CNTL_ANA_CLK_RTC_SEL_V;
#else
soc_rtc_slow_clk_src_t rtc_slow_clk_src = slow_clk;
#endif
uint32_t cal_val = 0;
/* number of times to repeat 32k XTAL calibration
* before giving up and switching to the internal RC
*/
int retry_32k_xtal = 3;
do {
#if defined(CONFIG_SOC_SERIES_ESP32C2)
if (rtc_slow_clk_src == ESP32_RTC_SLOW_CLK_SRC_OSC_SLOW) {
/* external clock needs to be connected to PIN0 before it can
* be used. Here we use rtc_clk_cal function to count
* the number of ext clk cycles in the given number of ext clk
* cycles. If the ext clk has not started up, calibration
* will time out, returning 0.
*/
LOG_DBG("waiting for external clock by pin0 to start up");
rtc_clk_32k_enable_external();
#else
if (rtc_slow_clk_src == ESP32_RTC_SLOW_CLK_SRC_XTAL32K) {
/* 32k XTAL oscillator needs to be enabled and running before it can
* be used. Hardware doesn't have a direct way of checking if the
* oscillator is running. Here we use rtc_clk_cal function to count
* the number of main XTAL cycles in the given number of 32k XTAL
* oscillator cycles. If the 32k XTAL has not started up, calibration
* will time out, returning 0.
*/
LOG_DBG("waiting for 32k oscillator to start up");
if (slow_clk == ESP32_RTC_SLOW_CLK_SRC_XTAL32K) {
rtc_clk_32k_enable(true);
} else if (slow_clk == ESP32_RTC_SLOW_CLK_32K_EXT_OSC) {
rtc_clk_32k_enable_external();
}
#endif
/* When CONFIG_RTC_CLK_CAL_CYCLES is set to 0, clock calibration will not be
* performed at startup.
*/
if (CONFIG_RTC_CLK_CAL_CYCLES > 0) {
#if defined(CONFIG_SOC_SERIES_ESP32C2)
cal_val = rtc_clk_cal(RTC_CAL_32K_OSC_SLOW,
CONFIG_RTC_CLK_CAL_CYCLES);
#else
cal_val = rtc_clk_cal(RTC_CAL_32K_XTAL, CONFIG_RTC_CLK_CAL_CYCLES);
#endif
if (cal_val == 0) {
if (retry_32k_xtal-- > 0) {
continue;
}
LOG_ERR("32 kHz XTAL not found");
return -ENODEV;
}
}
#if defined(CONFIG_SOC_SERIES_ESP32C6)
} else if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_RC32K) {
rtc_clk_rc32k_enable(true);
}
#else
} else if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_RC_FAST_D256) {
rtc_clk_8m_enable(true, true);
}
#endif
rtc_clk_slow_src_set(rtc_slow_clk_src);
if (CONFIG_RTC_CLK_CAL_CYCLES > 0) {
cal_val = rtc_clk_cal(RTC_CAL_RTC_MUX, CONFIG_RTC_CLK_CAL_CYCLES);
} else {
const uint64_t cal_dividend = (1ULL << RTC_CLK_CAL_FRACT) * 1000000ULL;
cal_val = (uint32_t)(cal_dividend / rtc_clk_slow_freq_get_hz());
}
} while (cal_val == 0);
LOG_DBG("RTC_SLOW_CLK calibration value: %d", cal_val);
esp_clk_slowclk_cal_set(cal_val);
return 0;
}
static int esp32_cpu_clock_configure(const struct esp32_cpu_clock_config *cpu_cfg)
{
rtc_cpu_freq_config_t old_config;
rtc_cpu_freq_config_t new_config;
rtc_clk_config_t rtc_clk_cfg = RTC_CLK_CONFIG_DEFAULT();
uint32_t uart_clock_src_hz;
bool ret;
rtc_clk_cfg.xtal_freq = cpu_cfg->xtal_freq;
rtc_clk_cfg.cpu_freq_mhz = cpu_cfg->cpu_freq;
esp_rom_uart_tx_wait_idle(ESP_CONSOLE_UART_NUM);
#if defined(CONFIG_SOC_SERIES_ESP32C6)
rtc_clk_modem_clock_domain_active_state_icg_map_preinit();
REG_SET_FIELD(LP_CLKRST_FOSC_CNTL_REG, LP_CLKRST_FOSC_DFREQ, rtc_clk_cfg.clk_8m_dfreq);
REGI2C_WRITE_MASK(I2C_DIG_REG, I2C_DIG_REG_SCK_DCAP, rtc_clk_cfg.slow_clk_dcap);
REG_SET_FIELD(LP_CLKRST_RC32K_CNTL_REG, LP_CLKRST_RC32K_DFREQ, rtc_clk_cfg.rc32k_dfreq);
#else
REG_SET_FIELD(RTC_CNTL_REG, RTC_CNTL_SCK_DCAP, rtc_clk_cfg.slow_clk_dcap);
REG_SET_FIELD(RTC_CNTL_CLK_CONF_REG, RTC_CNTL_CK8M_DFREQ, rtc_clk_cfg.clk_8m_dfreq);
#endif
#if defined(CONFIG_SOC_SERIES_ESP32)
REG_SET_FIELD(RTC_CNTL_CLK_CONF_REG, RTC_CNTL_CK8M_DIV_SEL, rtc_clk_cfg.clk_8m_div - 1);
#elif defined(CONFIG_SOC_SERIES_ESP32C6)
clk_ll_rc_fast_tick_conf();
#else
/* Configure 150k clock division */
rtc_clk_divider_set(rtc_clk_cfg.clk_rtc_clk_div);
/* Configure 8M clock division */
rtc_clk_8m_divider_set(rtc_clk_cfg.clk_8m_clk_div);
#endif
#if !defined(CONFIG_SOC_SERIES_ESP32C6)
/* Reset (disable) i2c internal bus for all regi2c registers */
regi2c_ctrl_ll_i2c_reset();
/* Enable the internal bus used to configure BBPLL */
regi2c_ctrl_ll_i2c_bbpll_enable();
#endif
#if defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32)
regi2c_ctrl_ll_i2c_apll_enable();
#endif
#if !defined(CONFIG_SOC_SERIES_ESP32S2)
rtc_clk_xtal_freq_update(rtc_clk_cfg.xtal_freq);
#endif
#if defined(CONFIG_SOC_SERIES_ESP32C6)
/* On ESP32C6, MSPI source clock's default HS divider leads to 120MHz,
* which is unusable before calibration. Therefore, before switching
* SOC_ROOT_CLK to HS, we need to set MSPI source clock HS divider
* to make it run at 80MHz after the switch. PLL = 480MHz, so divider is 6.
*/
clk_ll_mspi_fast_set_hs_divider(6);
#else
rtc_clk_apb_freq_update(rtc_clk_cfg.xtal_freq * MHZ(1));
#endif
/* Set CPU frequency */
rtc_clk_cpu_freq_get_config(&old_config);
ret = rtc_clk_cpu_freq_mhz_to_config(rtc_clk_cfg.cpu_freq_mhz, &new_config);
if (!ret || (new_config.source != cpu_cfg->clk_src)) {
LOG_ERR("invalid CPU frequency value");
return -EINVAL;
}
rtc_clk_cpu_freq_set_config(&new_config);
/* Re-calculate the ccount to make time calculation correct. */
esp_cpu_set_cycle_count((uint64_t)esp_cpu_get_cycle_count() * rtc_clk_cfg.cpu_freq_mhz /
old_config.freq_mhz);
#if !defined(CONFIG_SOC_SERIES_ESP32C2) && !defined(CONFIG_SOC_SERIES_ESP32C6)
#if ESP_ROM_UART_CLK_IS_XTAL
uart_clock_src_hz = (uint32_t)rtc_clk_xtal_freq_get() * MHZ(1);
#else
uart_clock_src_hz = esp_clk_apb_freq();
#endif
#if !defined(ESP_CONSOLE_UART_NONE)
esp_rom_uart_set_clock_baudrate(ESP_CONSOLE_UART_NUM, uart_clock_src_hz,
ESP_CONSOLE_UART_BAUDRATE);
#endif
#endif
return 0;
}
static int clock_control_esp32_configure(const struct device *dev, clock_control_subsys_t sys,
void *data)
{
const struct esp32_clock_config *cfg = dev->config;
struct esp32_clock_config *new_cfg = data;
int ret = 0;
switch ((int)sys) {
case ESP32_CLOCK_CONTROL_SUBSYS_RTC_FAST:
rtc_clk_fast_src_set(new_cfg->rtc.rtc_fast_clock_src);
break;
case ESP32_CLOCK_CONTROL_SUBSYS_RTC_SLOW:
ret = esp32_select_rtc_slow_clk(new_cfg->rtc.rtc_slow_clock_src);
break;
case ESP32_CLOCK_CONTROL_SUBSYS_CPU:
/* Normalize frequency */
new_cfg->cpu.xtal_freq = new_cfg->cpu.xtal_freq > MHZ(1)
? new_cfg->cpu.xtal_freq / MHZ(1)
: new_cfg->cpu.xtal_freq;
new_cfg->cpu.cpu_freq = new_cfg->cpu.cpu_freq > MHZ(1)
? new_cfg->cpu.cpu_freq / MHZ(1)
: new_cfg->cpu.cpu_freq;
ret = esp32_cpu_clock_configure(&new_cfg->cpu);
break;
default:
LOG_ERR("Unsupported subsystem %d", (int)sys);
return -EINVAL;
}
return ret;
}
static int clock_control_esp32_init(const struct device *dev)
{
const struct esp32_clock_config *cfg = dev->config;
bool ret;
soc_reset_reason_t rst_reas;
rst_reas = esp_rom_get_reset_reason(0);
#if defined(CONFIG_SOC_SERIES_ESP32C6)
pmu_init();
if (rst_reas == RESET_REASON_CHIP_POWER_ON) {
esp_ocode_calib_init();
}
#else /* CONFIG_SOC_SERIES_ESP32C6 */
rtc_config_t rtc_cfg = RTC_CONFIG_DEFAULT();
#if !defined(CONFIG_SOC_SERIES_ESP32)
if (rst_reas == RESET_REASON_CHIP_POWER_ON
#if SOC_EFUSE_HAS_EFUSE_RST_BUG
|| rst_reas == RESET_REASON_CORE_EFUSE_CRC
#endif /* SOC_EFUSE_HAS_EFUSE_RST_BUG */
) {
rtc_cfg.cali_ocode = 1;
}
#endif /* !CONFIG_SOC_SERIES_ESP32 */
rtc_init(rtc_cfg);
#endif /* CONFIG_SOC_SERIES_ESP32C6 */
ret = esp32_cpu_clock_configure(&cfg->cpu);
if (ret) {
LOG_ERR("Failed to configure CPU clock");
return ret;
}
rtc_clk_fast_src_set(cfg->rtc.rtc_fast_clock_src);
ret = esp32_select_rtc_slow_clk(cfg->rtc.rtc_slow_clock_src);
if (ret) {
LOG_ERR("Failed to configure RTC clock");
return ret;
}
esp32_clock_perip_init();
return 0;
}
static const struct clock_control_driver_api clock_control_esp32_api = {
.on = clock_control_esp32_on,
.off = clock_control_esp32_off,
.get_rate = clock_control_esp32_get_rate,
.get_status = clock_control_esp32_get_status,
.configure = clock_control_esp32_configure,
};
static const struct esp32_cpu_clock_config esp32_cpu_clock_config0 = {
.clk_src = DT_PROP(DT_INST(0, DT_CPU_COMPAT), clock_source),
.cpu_freq = (DT_PROP(DT_INST(0, DT_CPU_COMPAT), clock_frequency) / MHZ(1)),
.xtal_freq = ((DT_PROP(DT_INST(0, DT_CPU_COMPAT), xtal_freq)) / MHZ(1)),
};
static const struct esp32_rtc_clock_config esp32_rtc_clock_config0 = {
.rtc_fast_clock_src = DT_PROP(DT_INST(0, espressif_esp32_rtc), fast_clk_src),
.rtc_slow_clock_src = DT_PROP(DT_INST(0, espressif_esp32_rtc), slow_clk_src),
};
static const struct esp32_clock_config esp32_clock_config0 = {
.cpu = esp32_cpu_clock_config0,
.rtc = esp32_rtc_clock_config0
};
DEVICE_DT_DEFINE(DT_NODELABEL(rtc),
clock_control_esp32_init,
NULL,
NULL,
&esp32_clock_config0,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&clock_control_esp32_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_esp32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 8,121 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_common.h"
/**
* @brief Activate default clocks
*/
void config_enable_default_clocks(void)
{
/* Enable the power interface clock */
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
}
``` | /content/code_sandbox/drivers/clock_control/clock_stm32c0.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 124 |
```unknown
# Clock controller driver configuration options
config CLOCK_CONTROL_NRF_FORCE_ALT
bool
depends on SOC_COMPATIBLE_NRF
help
This option can be enabled to force an alternative implementation
of the clock control driver.
menuconfig CLOCK_CONTROL_NRF
bool "NRF Clock controller support"
default y
depends on DT_HAS_NORDIC_NRF_CLOCK_ENABLED
select NRFX_CLOCK if !CLOCK_CONTROL_NRF_FORCE_ALT
select ONOFF
help
Enable support for the Nordic Semiconductor nRFxx series SoC clock
driver.
if CLOCK_CONTROL_NRF
config CLOCK_CONTROL_NRF_SHELL
bool "Shell commands"
depends on SHELL
choice CLOCK_CONTROL_NRF_SOURCE
prompt "32KHz clock source"
default CLOCK_CONTROL_NRF_K32SRC_XTAL
config CLOCK_CONTROL_NRF_K32SRC_RC
bool "RC Oscillator"
config CLOCK_CONTROL_NRF_K32SRC_XTAL
select NRFX_CLOCK_LFXO_TWO_STAGE_ENABLED if (!SOC_SERIES_BSIM_NRFXX && \
!CLOCK_CONTROL_NRF_FORCE_ALT)
bool "Crystal Oscillator"
config CLOCK_CONTROL_NRF_K32SRC_SYNTH
depends on !SOC_SERIES_NRF91X
bool "Synthesized from HFCLK"
config CLOCK_CONTROL_NRF_K32SRC_EXT_LOW_SWING
depends on SOC_SERIES_NRF52X
select NRFX_CLOCK_LFXO_TWO_STAGE_ENABLED if !CLOCK_CONTROL_NRF_FORCE_ALT
bool "External low swing"
config CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING
depends on SOC_SERIES_NRF52X
select NRFX_CLOCK_LFXO_TWO_STAGE_ENABLED if !CLOCK_CONTROL_NRF_FORCE_ALT
bool "External full swing"
endchoice
config CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION
bool "LF clock calibration"
depends on !SOC_SERIES_NRF91X && CLOCK_CONTROL_NRF_K32SRC_RC
default y if !SOC_NRF53_CPUNET_ENABLE
help
If calibration is disabled when RC is used for low frequency clock then
accuracy of the low frequency clock will degrade. Disable on your own
risk.
if CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION
config CLOCK_CONTROL_NRF_DRIVER_CALIBRATION
bool
depends on !CLOCK_CONTROL_NRF_FORCE_ALT
depends on MULTITHREADING
default y
help
Enabling indicates that calibration is performed by the clock control driver.
config CLOCK_CONTROL_NRF_CALIBRATION_LF_ALWAYS_ON
bool "LF clock is always on"
default y if NRF_RTC_TIMER
help
If RTC is used as system timer then LF clock is always on and handling
can be simplified.
config CLOCK_CONTROL_NRF_CALIBRATION_PERIOD
int "Calibration opportunity period in milliseconds"
default 4000
help
Periodically, calibration action is performed. Action includes
temperature measurement followed by clock calibration. Calibration may
be skipped if temperature change (compared to measurement of previous
calibration) did not exceeded CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF
and number of consecutive skips did not exceeded
CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP.
config CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP
int "Maximum number of calibration skips"
default 1
range 0 $(UINT8_MAX)
help
Calibration is skipped when temperature change since last calibration
was less than configured threshold. If number of consecutive skips
reaches configured value then calibration is performed
unconditionally. Set to 0 to perform calibration periodically
regardless of temperature change.
config CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF
int "Temperature change triggering calibration (in 0.25 degree units)"
default 2
help
Calibration is triggered if the temperature has changed by at least
this amount since the last calibration.
if CLOCK_CONTROL_NRF_DRIVER_CALIBRATION
config CLOCK_CONTROL_NRF_CALIBRATION_DEBUG
bool "Calibration instrumentation"
help
Enables retrieving debug information like number of performed or
skipped calibrations.
config CLOCK_CONTROL_NRF_USES_TEMP_SENSOR
bool
depends on HAS_HW_NRF_TEMP
default y if CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP > 0 && \
CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF > 0
select TEMP_NRF5
select SENSOR
endif # CLOCK_CONTROL_NRF_DRIVER_CALIBRATION
endif # CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION
choice CLOCK_CONTROL_NRF_ACCURACY_PPM
prompt "32KHz clock accuracy"
default CLOCK_CONTROL_NRF_K32SRC_500PPM if CLOCK_CONTROL_NRF_K32SRC_RC && SOC_COMPATIBLE_NRF52X
default CLOCK_CONTROL_NRF_K32SRC_250PPM if CLOCK_CONTROL_NRF_K32SRC_RC
default CLOCK_CONTROL_NRF_K32SRC_50PPM
config CLOCK_CONTROL_NRF_K32SRC_500PPM
bool "251 ppm to 500 ppm"
config CLOCK_CONTROL_NRF_K32SRC_250PPM
bool "151 ppm to 250 ppm"
config CLOCK_CONTROL_NRF_K32SRC_150PPM
bool "101 ppm to 150 ppm"
config CLOCK_CONTROL_NRF_K32SRC_100PPM
bool "76 ppm to 100 ppm"
config CLOCK_CONTROL_NRF_K32SRC_75PPM
bool "51 ppm to 75 ppm"
config CLOCK_CONTROL_NRF_K32SRC_50PPM
bool "31 ppm to 50 ppm"
config CLOCK_CONTROL_NRF_K32SRC_30PPM
bool "21 ppm to 30 ppm"
config CLOCK_CONTROL_NRF_K32SRC_20PPM
bool "0 ppm to 20 ppm"
endchoice
config CLOCK_CONTROL_NRF_ACCURACY
int
default 500 if CLOCK_CONTROL_NRF_K32SRC_500PPM
default 250 if CLOCK_CONTROL_NRF_K32SRC_250PPM
default 150 if CLOCK_CONTROL_NRF_K32SRC_150PPM
default 100 if CLOCK_CONTROL_NRF_K32SRC_100PPM
default 75 if CLOCK_CONTROL_NRF_K32SRC_75PPM
default 50 if CLOCK_CONTROL_NRF_K32SRC_50PPM
default 30 if CLOCK_CONTROL_NRF_K32SRC_30PPM
default 20 if CLOCK_CONTROL_NRF_K32SRC_20PPM
endif # CLOCK_CONTROL_NRF
``` | /content/code_sandbox/drivers/clock_control/Kconfig.nrf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,352 |
```unknown
config CLOCK_CONTROL_NRF_AUXPLL
bool "nRF Auxiliary PLL driver"
default y
depends on DT_HAS_NORDIC_NRF_AUXPLL_ENABLED
help
Driver for nRF Auxiliary PLL.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.nrf_auxpll | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 45 |
```objective-c
/*
*
*
*/
#ifndef ZEPHYR_DRIVERS_CLOCK_CONTROL_STM32_LL_CLOCK_H_
#define ZEPHYR_DRIVERS_CLOCK_CONTROL_STM32_LL_CLOCK_H_
#include <stdint.h>
#include <zephyr/device.h>
#include <stm32_ll_utils.h>
/* Macros to fill up multiplication and division factors values */
#define z_pllm(v) LL_RCC_PLLM_DIV_ ## v
#define pllm(v) z_pllm(v)
#define z_pllp(v) LL_RCC_PLLP_DIV_ ## v
#define pllp(v) z_pllp(v)
#define z_pllq(v) LL_RCC_PLLQ_DIV_ ## v
#define pllq(v) z_pllq(v)
#define z_pllr(v) LL_RCC_PLLR_DIV_ ## v
#define pllr(v) z_pllr(v)
#define z_plli2s_m(v) LL_RCC_PLLI2SM_DIV_ ## v
#define plli2sm(v) z_plli2s_m(v)
#define z_plli2s_r(v) LL_RCC_PLLI2SR_DIV_ ## v
#define plli2sr(v) z_plli2s_r(v)
#ifdef __cplusplus
extern "C" {
#endif
#if defined(STM32_PLL_ENABLED)
void config_pll_sysclock(void);
uint32_t get_pllout_frequency(void);
uint32_t get_pllsrc_frequency(void);
#endif
#if defined(STM32_PLL2_ENABLED)
void config_pll2(void);
#endif
#if defined(STM32_PLLI2S_ENABLED)
void config_plli2s(void);
#endif
void config_enable_default_clocks(void);
void config_regulator_voltage(uint32_t hclk_freq);
/* functions exported to the soc power.c */
int stm32_clock_control_init(const struct device *dev);
void stm32_clock_control_standby_exit(void);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_CLOCK_CONTROL_STM32_LL_CLOCK_H_ */
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_common.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 399 |
```unknown
# ESP32 Clock Driver configuration options
config CLOCK_CONTROL_ESP32
bool "ESP32 Clock driver"
default y
depends on DT_HAS_ESPRESSIF_ESP32_RTC_ENABLED
help
Enable support for ESP32 clock driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.esp32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 51 |
```c
/*
*
*/
#define DT_DRV_COMPAT pwm_clock
#include <stdint.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pwm.h>
#include <zephyr/dt-bindings/pwm/pwm.h>
#include <zephyr/kernel.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_pwm);
BUILD_ASSERT(CONFIG_CLOCK_CONTROL_PWM_INIT_PRIORITY > CONFIG_PWM_INIT_PRIORITY,
"PWM must have a higher priority than PWM clock control");
#define NUM_PWM_CLOCKS 1
struct clock_control_pwm_config {
const struct pwm_dt_spec pwm_dt;
const uint16_t pwm_on_delay;
};
struct clock_control_pwm_data {
uint32_t clock_frequency;
bool is_enabled;
};
static int clock_control_pwm_on(const struct device *dev, clock_control_subsys_t sys)
{
struct clock_control_pwm_data *data = dev->data;
const struct clock_control_pwm_config *config = dev->config;
const struct pwm_dt_spec *spec;
int id = (int)sys;
int ret;
if (id >= NUM_PWM_CLOCKS) {
return -EINVAL;
}
spec = &config->pwm_dt;
if (data->clock_frequency == 0) {
ret = pwm_set_dt(spec, spec->period, spec->period / 2);
} else {
uint64_t cycles_per_sec;
uint32_t period_cycles;
ret = pwm_get_cycles_per_sec(spec->dev, spec->channel, &cycles_per_sec);
if (ret) {
return ret;
}
if (cycles_per_sec % data->clock_frequency > 0) {
LOG_WRN("Target clock frequency cannot be expressed in PWM clock ticks");
}
period_cycles = cycles_per_sec / data->clock_frequency;
ret = pwm_set_cycles(spec->dev, spec->channel, period_cycles, period_cycles / 2,
spec->flags);
}
if (ret) {
return ret;
}
k_busy_wait(config->pwm_on_delay);
data->is_enabled = true;
return 0;
}
static int clock_control_pwm_get_rate(const struct device *dev, clock_control_subsys_t sys,
uint32_t *rate)
{
struct clock_control_pwm_data *data = dev->data;
const struct clock_control_pwm_config *config = dev->config;
int id = (int)sys;
if (id >= NUM_PWM_CLOCKS) {
return -EINVAL;
}
if (data->clock_frequency > 0) {
*rate = data->clock_frequency;
} else {
*rate = NSEC_PER_SEC / config->pwm_dt.period;
}
return 0;
}
static int clock_control_pwm_set_rate(const struct device *dev, clock_control_subsys_rate_t sys,
clock_control_subsys_rate_t rate)
{
struct clock_control_pwm_data *data = dev->data;
uint32_t rate_hz = (uint32_t)rate;
int id = (int)sys;
if (id >= NUM_PWM_CLOCKS) {
return -EINVAL;
}
if (data->clock_frequency == rate_hz && data->is_enabled) {
return -EALREADY;
}
data->clock_frequency = rate_hz;
return clock_control_pwm_on(dev, sys);
}
static int clock_control_pwm_init(const struct device *dev)
{
const struct clock_control_pwm_config *config = dev->config;
if (!device_is_ready(config->pwm_dt.dev)) {
return -ENODEV;
}
return 0;
}
static const struct clock_control_driver_api clock_control_pwm_api = {
.on = clock_control_pwm_on,
.get_rate = clock_control_pwm_get_rate,
.set_rate = clock_control_pwm_set_rate,
};
#define PWM_CLOCK_INIT(i) \
\
BUILD_ASSERT(DT_INST_PROP_LEN(i, pwms) <= 1, \
"One PWM per clock control node is supported"); \
\
BUILD_ASSERT(DT_INST_PROP(i, pwm_on_delay) <= UINT16_MAX, \
"Maximum pwm-on-delay is 65535 usec"); \
\
static const struct clock_control_pwm_config clock_control_pwm_config_##i = { \
.pwm_dt = PWM_DT_SPEC_INST_GET(i), \
.pwm_on_delay = DT_INST_PROP(i, pwm_on_delay), \
}; \
\
static struct clock_control_pwm_data clock_control_pwm_data_##i = { \
.clock_frequency = DT_INST_PROP_OR(i, clock_frequency, 0), \
}; \
\
DEVICE_DT_INST_DEFINE(i, clock_control_pwm_init, NULL, &clock_control_pwm_data_##i, \
&clock_control_pwm_config_##i, POST_KERNEL, \
CONFIG_CLOCK_CONTROL_PWM_INIT_PRIORITY, &clock_control_pwm_api);
DT_INST_FOREACH_STATUS_OKAY(PWM_CLOCK_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_pwm.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,057 |
```unknown
config CLOCK_CONTROL_RENESAS_RA
bool "Renesas RA series clock generation circuit driver"
default y
depends on DT_HAS_RENESAS_RA_CLOCK_GENERATION_CIRCUIT_ENABLED
help
Enable Renesas RA series clock generation circuit driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.renesas_ra | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 57 |
```c
/*
* an affiliate of Cypress Semiconductor Corporation
*
*/
/**
* @brief Clock control driver for Infineon CAT1 MCU family.
*/
#include <zephyr/drivers/clock_control.h>
#include <cyhal_clock.h>
#include <cyhal_utils.h>
#include <cyhal_clock_impl.h>
#define GET_CLK_SOURCE_ORD(N) DT_DEP_ORD(DT_CLOCKS_CTLR_BY_IDX(DT_NODELABEL(N), 0))
/* Enumeration of enabled in device tree Clock, uses for indexing clock info table */
enum {
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_imo), okay)
INFINEON_CAT1_CLOCK_IMO,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_iho), okay)
INFINEON_CAT1_CLOCK_IHO,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux0), okay)
INFINEON_CAT1_CLOCK_PATHMUX0,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux1), okay)
INFINEON_CAT1_CLOCK_PATHMUX1,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux2), okay)
INFINEON_CAT1_CLOCK_PATHMUX2,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux3), okay)
INFINEON_CAT1_CLOCK_PATHMUX3,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux4), okay)
INFINEON_CAT1_CLOCK_PATHMUX4,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf0), okay)
INFINEON_CAT1_CLOCK_HF0,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf1), okay)
INFINEON_CAT1_CLOCK_HF1,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf2), okay)
INFINEON_CAT1_CLOCK_HF2,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf3), okay)
INFINEON_CAT1_CLOCK_HF3,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf4), okay)
INFINEON_CAT1_CLOCK_HF4,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf5), okay)
INFINEON_CAT1_CLOCK_HF5,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf6), okay)
INFINEON_CAT1_CLOCK_HF6,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf7), okay)
INFINEON_CAT1_CLOCK_HF7,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf8), okay)
INFINEON_CAT1_CLOCK_HF8,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf9), okay)
INFINEON_CAT1_CLOCK_HF9,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf10), okay)
INFINEON_CAT1_CLOCK_HF10,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf11), okay)
INFINEON_CAT1_CLOCK_HF11,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf12), okay)
INFINEON_CAT1_CLOCK_HF12,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf13), okay)
INFINEON_CAT1_CLOCK_HF13,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_fast), okay)
INFINEON_CAT1_CLOCK_FAST,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_slow), okay)
INFINEON_CAT1_CLOCK_SLOW,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_peri), okay)
INFINEON_CAT1_CLOCK_PERI,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(pll0), okay)
INFINEON_CAT1_CLOCK_PLL0,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(pll1), okay)
INFINEON_CAT1_CLOCK_PLL1,
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(fll0), okay)
INFINEON_CAT1_CLOCK_FLL0,
#endif
/* Count of enabled clock */
INFINEON_CAT1_ENABLED_CLOCK_COUNT
}; /* infineon_cat1_clock_info_name_t */
/* Clock info structure */
struct infineon_cat1_clock_info_t {
cyhal_clock_t obj; /* Hal Clock object */
uint32_t dt_ord; /* Device tree node's dependency ordinal */
};
/* Lookup table which presents clock objects (cyhal_clock_t) correspondence to ordinal
* number of device tree clock nodes.
*/
static struct infineon_cat1_clock_info_t
clock_info_table[INFINEON_CAT1_ENABLED_CLOCK_COUNT] = {
/* We always have IMO */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_imo), okay)
[INFINEON_CAT1_CLOCK_IMO] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_imo)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_iho), okay)
[INFINEON_CAT1_CLOCK_IHO] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_iho)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux0), okay)
[INFINEON_CAT1_CLOCK_PATHMUX0] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(path_mux0)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux1), okay)
[INFINEON_CAT1_CLOCK_PATHMUX1] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(path_mux1)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux2), okay)
[INFINEON_CAT1_CLOCK_PATHMUX2] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(path_mux2)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux3), okay)
[INFINEON_CAT1_CLOCK_PATHMUX3] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(path_mux3)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux4), okay)
[INFINEON_CAT1_CLOCK_PATHMUX4] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(path_mux4)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf0), okay)
[INFINEON_CAT1_CLOCK_HF0] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf0)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf1), okay)
[INFINEON_CAT1_CLOCK_HF1] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf1)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf2), okay)
[INFINEON_CAT1_CLOCK_HF2] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf2)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf3), okay)
[INFINEON_CAT1_CLOCK_HF3] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf3)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf4), okay)
[INFINEON_CAT1_CLOCK_HF4] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf4)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf5), okay)
[INFINEON_CAT1_CLOCK_HF5] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf5)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf6), okay)
[INFINEON_CAT1_CLOCK_HF6] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf6)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf7), okay)
[INFINEON_CAT1_CLOCK_HF7] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf7)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf8), okay)
[INFINEON_CAT1_CLOCK_HF8] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf8)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf9), okay)
[INFINEON_CAT1_CLOCK_HF9] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf9)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf10), okay)
[INFINEON_CAT1_CLOCK_HF10] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf10)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf11), okay)
[INFINEON_CAT1_CLOCK_HF11] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf11)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf12), okay)
[INFINEON_CAT1_CLOCK_HF12] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf12)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf13), okay)
[INFINEON_CAT1_CLOCK_HF13] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_hf13)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_fast), okay)
[INFINEON_CAT1_CLOCK_FAST] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_fast)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_slow), okay)
[INFINEON_CAT1_CLOCK_SLOW] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_slow)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_peri), okay)
[INFINEON_CAT1_CLOCK_PERI] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(clk_peri)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(pll0), okay)
[INFINEON_CAT1_CLOCK_PLL0] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(pll0)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(pll1), okay)
[INFINEON_CAT1_CLOCK_PLL1] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(pll1)) },
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(fll0), okay)
[INFINEON_CAT1_CLOCK_FLL0] = { .dt_ord = DT_DEP_ORD(DT_NODELABEL(fll0)) },
#endif
};
static cy_rslt_t _configure_path_mux(cyhal_clock_t *clock_obj,
cyhal_clock_t *clock_source_obj,
const cyhal_clock_t *reserve_obj)
{
cy_rslt_t rslt;
ARG_UNUSED(clock_source_obj);
rslt = cyhal_clock_reserve(clock_obj, reserve_obj);
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_source(clock_obj, clock_source_obj);
}
return rslt;
}
static cy_rslt_t _configure_clk_hf(cyhal_clock_t *clock_obj,
cyhal_clock_t *clock_source_obj,
const cyhal_clock_t *reserve_obj,
uint32_t clock_div)
{
cy_rslt_t rslt;
rslt = cyhal_clock_reserve(clock_obj, reserve_obj);
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_source(clock_obj, clock_source_obj);
}
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_divider(clock_obj, clock_div);
}
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_enabled(clock_obj, true, true);
}
return rslt;
}
static cy_rslt_t _configure_clk_frequency_and_enable(cyhal_clock_t *clock_obj,
cyhal_clock_t *clock_source_obj,
const cyhal_clock_t *reserve_obj,
uint32_t frequency)
{
ARG_UNUSED(clock_source_obj);
cy_rslt_t rslt;
rslt = cyhal_clock_reserve(clock_obj, reserve_obj);
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_frequency(clock_obj, frequency, NULL);
}
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_enabled(clock_obj, true, true);
}
return rslt;
}
static cyhal_clock_t *_get_hal_obj_from_ord(uint32_t dt_ord)
{
cyhal_clock_t *ret_obj = NULL;
for (uint32_t i = 0u; i < INFINEON_CAT1_ENABLED_CLOCK_COUNT; i++) {
if (clock_info_table[i].dt_ord == dt_ord) {
ret_obj = &clock_info_table[i].obj;
return ret_obj;
}
}
return ret_obj;
}
#if DT_NODE_HAS_STATUS(DT_NODELABEL(dpll_hp), okay)
__WEAK void cycfg_ClockStartupError(uint32_t error)
{
(void)error; /* Suppress the compiler warning */
while (1) {
}
}
void Cy_SysClk_Dpll_Hp0_Init(void)
{
#define CY_CFG_SYSCLK_PLL_ERROR 3
static cy_stc_dpll_hp_config_t srss_0_clock_0_pll500m_0_hp_pllConfig = {
.pDiv = 0,
.nDiv = 15,
.kDiv = 1,
.nDivFract = 0,
.freqModeSel = CY_SYSCLK_DPLL_HP_CLK50MHZ_1US_CNT_VAL,
.ivrTrim = 0x8U,
.clkrSel = 0x1U,
.alphaCoarse = 0xCU,
.betaCoarse = 0x5U,
.flockThresh = 0x3U,
.flockWait = 0x6U,
.flockLkThres = 0x7U,
.flockLkWait = 0x4U,
.alphaExt = 0x14U,
.betaExt = 0x14U,
.lfEn = 0x1U,
.dcEn = 0x1U,
.outputMode = CY_SYSCLK_FLLPLL_OUTPUT_AUTO,
};
static cy_stc_pll_manual_config_t srss_0_clock_0_pll500m_0_pllConfig = {
.hpPllCfg = &srss_0_clock_0_pll500m_0_hp_pllConfig,
};
#if !defined(CY_PDL_TZ_ENABLED)
if (Cy_SysClk_PllIsEnabled(SRSS_DPLL_HP_0_PATH_NUM)) {
return;
}
#endif
Cy_SysClk_PllDisable(SRSS_DPLL_HP_0_PATH_NUM);
if (CY_SYSCLK_SUCCESS !=
Cy_SysClk_PllManualConfigure(SRSS_DPLL_HP_0_PATH_NUM,
&srss_0_clock_0_pll500m_0_pllConfig)) {
cycfg_ClockStartupError(CY_CFG_SYSCLK_PLL_ERROR);
}
if (CY_SYSCLK_SUCCESS != Cy_SysClk_PllEnable(SRSS_DPLL_HP_0_PATH_NUM, 10000u)) {
cycfg_ClockStartupError(CY_CFG_SYSCLK_PLL_ERROR);
}
}
#endif
static int clock_control_infineon_cat1_init(const struct device *dev)
{
ARG_UNUSED(dev);
cy_rslt_t rslt;
cyhal_clock_t *clock_obj = NULL;
cyhal_clock_t *clock_source_obj = NULL;
__attribute__((unused)) uint32 frequency;
uint32 clock_div;
/* Configure IMO */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_imo), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_IMO].obj;
if (cyhal_clock_get(clock_obj, &CYHAL_CLOCK_RSC_IMO)) {
return -EIO;
}
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_iho), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_IHO].obj;
if (cyhal_clock_get(clock_obj, &CYHAL_CLOCK_RSC_IHO)) {
return -EIO;
}
#endif
#if !DT_NODE_HAS_STATUS(DT_NODELABEL(clk_imo), okay) && \
!DT_NODE_HAS_STATUS(DT_NODELABEL(clk_iho), okay)
#error "IMO clock or IHO clock must be enabled"
#endif
/* Configure the PathMux[0] to source defined in tree device 'path_mux0' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux0), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PATHMUX0].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(path_mux0));
if (_configure_path_mux(clock_obj, clock_source_obj, &CYHAL_CLOCK_PATHMUX[0])) {
return -EIO;
}
#endif
/* Configure the PathMux[1] to source defined in tree device 'path_mux1' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux1), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PATHMUX1].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(path_mux1));
if (_configure_path_mux(clock_obj, clock_source_obj, &CYHAL_CLOCK_PATHMUX[1])) {
return -EIO;
}
#endif
/* Configure the PathMux[2] to source defined in tree device 'path_mux2' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux2), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PATHMUX2].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(path_mux2));
if (_configure_path_mux(clock_obj, clock_source_obj, &CYHAL_CLOCK_PATHMUX[2])) {
return -EIO;
}
#endif
/* Configure the PathMux[3] to source defined in tree device 'path_mux3' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux3), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PATHMUX3].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(path_mux3));
if (_configure_path_mux(clock_obj, clock_source_obj, &CYHAL_CLOCK_PATHMUX[3])) {
return -EIO;
}
#endif
/* Configure the PathMux[4] to source defined in tree device 'path_mux4' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(path_mux4), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PATHMUX4].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(path_mux4));
if (_configure_path_mux(clock_obj, clock_source_obj, &CYHAL_CLOCK_PATHMUX[4])) {
return -EIO;
}
#endif
/* Configure FLL0 */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(fll0), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_FLL0].obj;
frequency = DT_PROP(DT_NODELABEL(fll0), clock_frequency);
rslt = _configure_clk_frequency_and_enable(clock_obj, clock_source_obj,
&CYHAL_CLOCK_FLL, frequency);
if (rslt) {
return -EIO;
}
#endif
/* Configure PLL0 */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(pll0), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PLL0].obj;
frequency = DT_PROP(DT_NODELABEL(pll0), clock_frequency);
rslt = _configure_clk_frequency_and_enable(clock_obj, clock_source_obj,
&CYHAL_CLOCK_PLL[0], frequency);
if (rslt) {
return -EIO;
}
#endif
/* Configure PLL1 */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(pll1), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PLL1].obj;
frequency = DT_PROP(DT_NODELABEL(pll1), clock_frequency);
rslt = _configure_clk_frequency_and_enable(clock_obj, clock_source_obj,
&CYHAL_CLOCK_PLL[1], frequency);
if (rslt) {
return -EIO;
}
#endif
/* Configure the HF[0] to source defined in tree device 'clk_hf0' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf0), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF0].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf0));
clock_div = DT_PROP(DT_NODELABEL(clk_hf0), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[0], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[1] to source defined in tree device 'clk_hf1' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf1), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF1].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf1));
clock_div = DT_PROP(DT_NODELABEL(clk_hf1), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[1], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[2] to source defined in tree device 'clk_hf2' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf2), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF2].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf2));
clock_div = DT_PROP(DT_NODELABEL(clk_hf2), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[2], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[3] to source defined in tree device 'clk_hf3' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf3), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF3].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf3));
clock_div = DT_PROP(DT_NODELABEL(clk_hf3), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[3], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[4] to source defined in tree device 'clk_hf4' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf4), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF4].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf4));
clock_div = DT_PROP(DT_NODELABEL(clk_hf4), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[4], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[5] to source defined in tree device 'clk_hf5' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf5), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF5].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf5));
clock_div = DT_PROP(DT_NODELABEL(clk_hf5), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[5], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[6] to source defined in tree device 'clk_hf6' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf6), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF6].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf6));
clock_div = DT_PROP(DT_NODELABEL(clk_hf6), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[6], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[7] to source defined in tree device 'clk_hf7' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf7), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF7].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf7));
clock_div = DT_PROP(DT_NODELABEL(clk_hf7), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[7], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[8] to source defined in tree device 'clk_hf8' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf8), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF8].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf8));
clock_div = DT_PROP(DT_NODELABEL(clk_hf8), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[8], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[9] to source defined in tree device 'clk_hf9' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf9), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF9].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf9));
clock_div = DT_PROP(DT_NODELABEL(clk_hf9), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[9], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[10] to source defined in tree device 'clk_hf10' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf10), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF10].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf10));
clock_div = DT_PROP(DT_NODELABEL(clk_hf10), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[10], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[11] to source defined in tree device 'clk_hf11' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf11), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF11].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf11));
clock_div = DT_PROP(DT_NODELABEL(clk_hf11), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[11], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[12] to source defined in tree device 'clk_hf12' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf12), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF12].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf12));
clock_div = DT_PROP(DT_NODELABEL(clk_hf12), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[12], clock_div)) {
return -EIO;
}
#endif
/* Configure the HF[13] to source defined in tree device 'clk_hf13' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_hf13), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_HF13].obj;
clock_source_obj = _get_hal_obj_from_ord(GET_CLK_SOURCE_ORD(clk_hf13));
clock_div = DT_PROP(DT_NODELABEL(clk_hf13), clock_div);
if (_configure_clk_hf(clock_obj, clock_source_obj, &CYHAL_CLOCK_HF[13], clock_div)) {
return -EIO;
}
#endif
/* Configure the clock fast to source defined in tree device 'clk_fast' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_fast), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_FAST].obj;
clock_div = DT_PROP(DT_NODELABEL(clk_fast), clock_div);
rslt = cyhal_clock_reserve(clock_obj, &CYHAL_CLOCK_FAST);
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_divider(clock_obj, clock_div);
}
if (rslt) {
return -EIO;
}
#endif
/* Configure the clock peri to source defined in tree device 'clk_peri' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_peri), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_PERI].obj;
clock_div = DT_PROP(DT_NODELABEL(clk_peri), clock_div);
rslt = cyhal_clock_reserve(clock_obj, &CYHAL_CLOCK_PERI);
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_divider(clock_obj, clock_div);
}
if (rslt) {
return -EIO;
}
#endif
/* Configure the clock slow to source defined in tree device 'clk_slow' node */
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_slow), okay)
clock_obj = &clock_info_table[INFINEON_CAT1_CLOCK_SLOW].obj;
clock_div = DT_PROP(DT_NODELABEL(clk_slow), clock_div);
rslt = cyhal_clock_reserve(clock_obj, &CYHAL_CLOCK_SLOW);
if (rslt == CY_RSLT_SUCCESS) {
rslt = cyhal_clock_set_divider(clock_obj, clock_div);
}
if (rslt) {
return -EIO;
}
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(dpll_hp), okay)
Cy_SysClk_Dpll_Hp0_Init();
SystemCoreClockUpdate();
#endif
return (int) rslt;
}
static int clock_control_infineon_cat_on_off(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
ARG_UNUSED(sys);
/* On/off functionality are not supported */
return -ENOSYS;
}
static const struct clock_control_driver_api clock_control_infineon_cat1_api = {
.on = clock_control_infineon_cat_on_off,
.off = clock_control_infineon_cat_on_off
};
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_imo), okay)
DEVICE_DT_DEFINE(DT_NODELABEL(clk_imo),
clock_control_infineon_cat1_init,
NULL,
NULL,
NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&clock_control_infineon_cat1_api);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_iho), okay)
DEVICE_DT_DEFINE(DT_NODELABEL(clk_iho),
clock_control_infineon_cat1_init,
NULL,
NULL,
NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&clock_control_infineon_cat1_api);
#endif
``` | /content/code_sandbox/drivers/clock_control/clock_control_ifx_cat1.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,044 |
```c
/*
*/
#define DT_DRV_COMPAT nordic_nrf_auxpll
#include <errno.h>
#include <stdint.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/util.h>
#include <zephyr/toolchain.h>
#include <hal/nrf_auxpll.h>
/* maximum lock time in ms, >10x time observed experimentally */
#define AUXPLL_LOCK_TIME_MAX_MS 20
/* lock wait step in ms*/
#define AUXPLL_LOCK_WAIT_STEP_MS 1
struct clock_control_nrf_auxpll_config {
NRF_AUXPLL_Type *auxpll;
uint32_t ref_clk_hz;
uint32_t ficr_ctune;
nrf_auxpll_config_t cfg;
uint16_t frequency;
nrf_auxpll_ctrl_outsel_t out_div;
};
static int clock_control_nrf_auxpll_on(const struct device *dev, clock_control_subsys_t sys)
{
const struct clock_control_nrf_auxpll_config *config = dev->config;
bool locked;
unsigned int wait = 0U;
ARG_UNUSED(sys);
nrf_auxpll_task_trigger(config->auxpll, NRF_AUXPLL_TASK_START);
do {
locked = nrf_auxpll_mode_locked_check(config->auxpll);
if (!locked) {
k_msleep(AUXPLL_LOCK_WAIT_STEP_MS);
wait += AUXPLL_LOCK_WAIT_STEP_MS;
}
} while (wait < AUXPLL_LOCK_TIME_MAX_MS && !locked);
return locked ? 0 : -ETIMEDOUT;
}
static int clock_control_nrf_auxpll_off(const struct device *dev, clock_control_subsys_t sys)
{
const struct clock_control_nrf_auxpll_config *config = dev->config;
ARG_UNUSED(sys);
nrf_auxpll_task_trigger(config->auxpll, NRF_AUXPLL_TASK_STOP);
while (nrf_auxpll_running_check(config->auxpll)) {
}
return 0;
}
static int clock_control_nrf_auxpll_get_rate(const struct device *dev, clock_control_subsys_t sys,
uint32_t *rate)
{
const struct clock_control_nrf_auxpll_config *config = dev->config;
uint8_t ratio;
ARG_UNUSED(sys);
ratio = nrf_auxpll_static_ratio_get(config->auxpll);
*rate = (ratio * config->ref_clk_hz +
(config->ref_clk_hz * (uint64_t)config->frequency) /
(AUXPLL_AUXPLLCTRL_FREQUENCY_FREQUENCY_MaximumDiv + 1U)) /
config->out_div;
return 0;
}
static enum clock_control_status clock_control_nrf_auxpll_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_nrf_auxpll_config *config = dev->config;
ARG_UNUSED(sys);
if (nrf_auxpll_mode_locked_check(config->auxpll)) {
return CLOCK_CONTROL_STATUS_ON;
}
return CLOCK_CONTROL_STATUS_OFF;
}
static const struct clock_control_driver_api clock_control_nrf_auxpll_api = {
.on = clock_control_nrf_auxpll_on,
.off = clock_control_nrf_auxpll_off,
.get_rate = clock_control_nrf_auxpll_get_rate,
.get_status = clock_control_nrf_auxpll_get_status,
};
static int clock_control_nrf_auxpll_init(const struct device *dev)
{
const struct clock_control_nrf_auxpll_config *config = dev->config;
nrf_auxpll_ctrl_frequency_set(config->auxpll, config->frequency);
nrf_auxpll_lock(config->auxpll);
nrf_auxpll_trim_ctune_set(config->auxpll, sys_read8(config->ficr_ctune));
nrf_auxpll_config_set(config->auxpll, &config->cfg);
nrf_auxpll_ctrl_outsel_set(config->auxpll, config->out_div);
nrf_auxpll_unlock(config->auxpll);
nrf_auxpll_ctrl_mode_set(config->auxpll, NRF_AUXPLL_CTRL_MODE_LOCKED);
return 0;
}
#define CLOCK_CONTROL_NRF_AUXPLL_DEFINE(n) \
static const struct clock_control_nrf_auxpll_config config##n = { \
.auxpll = (NRF_AUXPLL_Type *)DT_INST_REG_ADDR(n), \
.ref_clk_hz = DT_PROP(DT_INST_CLOCKS_CTLR(n), clock_frequency), \
.ficr_ctune = DT_REG_ADDR(DT_INST_PHANDLE(n, nordic_ficrs)) + \
DT_INST_PHA(n, nordic_ficrs, offset), \
.cfg = \
{ \
.outdrive = DT_INST_PROP(n, nordic_out_drive), \
.current_tune = DT_INST_PROP(n, nordic_current_tune), \
.sdm_off = DT_INST_PROP(n, nordic_sdm_disable), \
.dither_off = DT_INST_PROP(n, nordic_dither_disable), \
.range = DT_INST_ENUM_IDX(n, nordic_range), \
}, \
.frequency = DT_INST_PROP(n, nordic_frequency), \
.out_div = DT_INST_PROP(n, nordic_out_div), \
}; \
\
DEVICE_DT_INST_DEFINE(n, clock_control_nrf_auxpll_init, NULL, NULL, &config##n, \
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&clock_control_nrf_auxpll_api);
DT_INST_FOREACH_STATUS_OKAY(CLOCK_CONTROL_NRF_AUXPLL_DEFINE)
``` | /content/code_sandbox/drivers/clock_control/clock_control_nrf_auxpll.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,187 |
```c
/*
*
*/
#include <assert.h>
#include <soc.h>
#include <zephyr/sys/onoff.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/smartbond_clock_control.h>
#include <zephyr/logging/log.h>
#include <zephyr/pm/device.h>
#include <da1469x_clock.h>
#include <da1469x_qspic.h>
#if defined(CONFIG_BT_DA1469X)
#include <shm.h>
#endif
#include <zephyr/drivers/regulator.h>
LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
#define DT_DRV_COMPAT smartbond_clock
struct lpc_clock_state {
uint8_t rcx_started : 1;
uint8_t rcx_ready : 1;
uint8_t rc32k_started : 1;
uint8_t rc32k_ready : 1;
uint8_t xtal32k_started : 1;
uint8_t xtal32k_ready : 1;
uint32_t rcx_freq;
uint32_t rc32k_freq;
} lpc_clock_state = {
.rcx_freq = DT_PROP(DT_NODELABEL(rcx), clock_frequency),
.rc32k_freq = DT_PROP(DT_NODELABEL(rc32k), clock_frequency),
};
#define CALIBRATION_INTERVAL CONFIG_SMARTBOND_LP_OSC_CALIBRATION_INTERVAL
#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
extern int z_clock_hw_cycles_per_sec;
#endif
static void calibration_work_cb(struct k_work *work);
static void xtal32k_settle_work_cb(struct k_work *work);
static enum smartbond_clock smartbond_source_clock(enum smartbond_clock clk);
static K_WORK_DELAYABLE_DEFINE(calibration_work, calibration_work_cb);
static K_WORK_DELAYABLE_DEFINE(xtal32k_settle_work, xtal32k_settle_work_cb);
/* PLL can be turned on by requesting it explicitly or when USB is attached */
/* PLL requested in DT or manually by application */
#define PLL_REQUEST_PLL 1
/* PLL requested indirectly by USB driver */
#define PLL_REQUEST_USB 2
/* Keeps information about blocks that requested PLL */
static uint8_t pll_requests;
static void calibration_work_cb(struct k_work *work)
{
if (lpc_clock_state.rcx_started) {
da1469x_clock_lp_rcx_calibrate();
lpc_clock_state.rcx_ready = true;
lpc_clock_state.rcx_freq = da1469x_clock_lp_rcx_freq_get();
LOG_DBG("RCX calibration done, RCX freq: %d",
(int)lpc_clock_state.rcx_freq);
#if defined(CONFIG_BT_DA1469X)
/* Update CMAC sleep clock with calculated frequency if RCX is set as lp_clk */
if ((CRG_TOP->CLK_CTRL_REG & CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Msk) ==
(1 << CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos)) {
cmac_request_lp_clock_freq_set(lpc_clock_state.rcx_freq);
}
#endif
}
if (lpc_clock_state.rc32k_started) {
da1469x_clock_lp_rc32k_calibrate();
lpc_clock_state.rc32k_ready = true;
lpc_clock_state.rc32k_freq = da1469x_clock_lp_rc32k_freq_get();
LOG_DBG("RC32K calibration done, RC32K freq: %d",
(int)lpc_clock_state.rc32k_freq);
}
k_work_schedule(&calibration_work,
K_MSEC(1000 * CALIBRATION_INTERVAL));
#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
switch (smartbond_source_clock(SMARTBOND_CLK_LP_CLK)) {
case SMARTBOND_CLK_RCX:
z_clock_hw_cycles_per_sec = lpc_clock_state.rcx_freq;
break;
case SMARTBOND_CLK_RC32K:
z_clock_hw_cycles_per_sec = lpc_clock_state.rc32k_freq;
break;
default:
break;
}
#endif
}
static void xtal32k_settle_work_cb(struct k_work *work)
{
if (lpc_clock_state.xtal32k_started && !lpc_clock_state.xtal32k_ready) {
LOG_DBG("XTAL32K settled.");
lpc_clock_state.xtal32k_ready = true;
#if defined(CONFIG_BT_DA1469X)
/* Update CMAC sleep clock if XTAL32K is set as lp_clk */
if ((CRG_TOP->CLK_CTRL_REG & CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Msk) ==
(2 << CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos)) {
cmac_request_lp_clock_freq_set(32768);
}
#endif
}
}
static void smartbond_start_rc32k(void)
{
if ((CRG_TOP->CLK_RC32K_REG & CRG_TOP_CLK_RC32K_REG_RC32K_ENABLE_Msk) == 0) {
CRG_TOP->CLK_RC32K_REG |= CRG_TOP_CLK_RC32K_REG_RC32K_ENABLE_Msk;
}
lpc_clock_state.rc32k_started = true;
if (!lpc_clock_state.rc32k_ready) {
if (!k_work_is_pending(&calibration_work.work)) {
k_work_schedule(&calibration_work,
K_MSEC(1000 * CALIBRATION_INTERVAL));
}
}
}
static void smartbond_start_rcx(void)
{
if (!lpc_clock_state.rcx_started) {
lpc_clock_state.rcx_ready = false;
da1469x_clock_lp_rcx_enable();
lpc_clock_state.rcx_started = true;
}
if (!lpc_clock_state.rcx_ready) {
if (!k_work_is_pending(&calibration_work.work)) {
k_work_schedule(&calibration_work,
K_MSEC(1000 * CALIBRATION_INTERVAL));
}
}
}
static void smartbond_start_xtal32k(void)
{
if (!lpc_clock_state.xtal32k_started) {
lpc_clock_state.xtal32k_ready = false;
da1469x_clock_lp_xtal32k_enable();
lpc_clock_state.xtal32k_started = true;
k_work_schedule(&xtal32k_settle_work,
K_MSEC(DT_PROP(DT_NODELABEL(xtal32k),
settle_time)));
}
}
#ifdef CONFIG_REGULATOR
/*
* Should be used to control PLL when the regulator driver is available.
* If the latter is available, then the VDD level should be changed when
* switching to/from PLL. Otherwise, the VDD level is considered to
* be fixed @1.2V which should support both XTAL32M and PLL system clocks.
*/
static int smartbond_clock_set_pll_status(bool status)
{
const struct device *dev = DEVICE_DT_GET(DT_NODELABEL(vdd));
int ret;
if (!device_is_ready(dev)) {
LOG_ERR("Regulator device is not ready");
return -ENODEV;
}
if (status) {
/* Enabling PLL requires that VDD be raised to 1.2V */
if (regulator_set_voltage(dev, 1200000, 1200000) == 0) {
da1469x_clock_sys_pll_enable();
/* QSPIC read pipe delay should be updated when switching to PLL */
} else {
LOG_ERR("Failed to set VDD_LEVEL to 1.2V");
return -EIO;
}
} else {
/* Disable PLL and switch back to XTAL32M */
da1469x_clock_sys_pll_disable();
/* VDD level can now be switched back to 0.9V */
ret = regulator_set_voltage(dev, 900000, 900000);
if (ret < 0) {
LOG_WRN("Failed to set VDD_LEVEL to 0.9V");
} else {
/*
* System clock should be switched to XTAL32M and VDD should be set to 0.9.
* The QSPIC read pipe delay should be updated.
*/
da1469x_qspi_set_read_pipe_delay(QSPIC_ID, 2);
}
}
return 0;
}
#endif
static inline int smartbond_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
enum smartbond_clock clk = (enum smartbond_clock)(sub_system);
int ret = 0;
ARG_UNUSED(dev);
switch (clk) {
case SMARTBOND_CLK_RC32K:
smartbond_start_rc32k();
break;
case SMARTBOND_CLK_RCX:
smartbond_start_rcx();
break;
case SMARTBOND_CLK_XTAL32K:
smartbond_start_xtal32k();
break;
case SMARTBOND_CLK_RC32M:
CRG_TOP->CLK_RC32M_REG |= CRG_TOP_CLK_RC32M_REG_RC32M_ENABLE_Msk;
break;
case SMARTBOND_CLK_XTAL32M:
da1469x_clock_sys_xtal32m_init();
da1469x_clock_sys_xtal32m_enable();
break;
case SMARTBOND_CLK_USB:
case SMARTBOND_CLK_PLL96M:
pll_requests = 1 << (clk - SMARTBOND_CLK_PLL96M);
if ((CRG_TOP->CLK_CTRL_REG & CRG_TOP_CLK_CTRL_REG_RUNNING_AT_PLL96M_Msk) == 0) {
if ((CRG_TOP->CLK_CTRL_REG &
CRG_TOP_CLK_CTRL_REG_RUNNING_AT_XTAL32M_Msk) == 0) {
da1469x_clock_sys_xtal32m_init();
da1469x_clock_sys_xtal32m_enable();
da1469x_clock_sys_xtal32m_wait_to_settle();
}
#if CONFIG_REGULATOR
ret = smartbond_clock_set_pll_status(true);
#else
da1469x_clock_sys_pll_enable();
#endif
if (pll_requests & PLL_REQUEST_USB) {
CRG_TOP->CLK_CTRL_REG &= ~CRG_TOP_CLK_CTRL_REG_USB_CLK_SRC_Msk;
}
}
break;
default:
return -ENOTSUP;
}
return ret;
}
static inline int smartbond_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
enum smartbond_clock clk = (enum smartbond_clock)(sub_system);
int ret = 0;
ARG_UNUSED(dev);
switch (clk) {
case SMARTBOND_CLK_RC32K:
/* RC32K is used by POWERUP and WAKEUP HW FSM */
BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(rc32k), okay),
"RC32K is not allowed to be turned off");
ret = -EPERM;
break;
case SMARTBOND_CLK_RCX:
if (((CRG_TOP->CLK_CTRL_REG & CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Msk) >>
CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos) != 1) {
CRG_TOP->CLK_RCX_REG &= ~CRG_TOP_CLK_RCX_REG_RCX_ENABLE_Msk;
lpc_clock_state.rcx_ready = false;
lpc_clock_state.rcx_started = false;
}
break;
case SMARTBOND_CLK_XTAL32K:
if (((CRG_TOP->CLK_CTRL_REG & CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Msk) >>
CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos) > 1) {
CRG_TOP->CLK_XTAL32K_REG &= ~CRG_TOP_CLK_XTAL32K_REG_XTAL32K_ENABLE_Msk;
lpc_clock_state.xtal32k_ready = false;
lpc_clock_state.xtal32k_started = false;
}
break;
case SMARTBOND_CLK_RC32M:
/* Disable rc32m only if not used as system clock */
if ((CRG_TOP->CLK_CTRL_REG & CRG_TOP_CLK_CTRL_REG_RUNNING_AT_RC32M_Msk) == 0) {
da1469x_clock_sys_rc32m_disable();
}
break;
case SMARTBOND_CLK_XTAL32M:
da1469x_clock_sys_xtal32m_init();
da1469x_clock_sys_xtal32m_enable();
break;
case SMARTBOND_CLK_USB:
/* Switch USB clock to HCLK to allow for resume */
CRG_TOP->CLK_CTRL_REG |= CRG_TOP_CLK_CTRL_REG_USB_CLK_SRC_Msk;
__fallthrough;
case SMARTBOND_CLK_PLL96M:
pll_requests &= ~(1 << (clk - SMARTBOND_CLK_PLL96M));
if (pll_requests == 0) {
/*
* PLL must not be disabled as long as a peripheral e.g. LCDC is enabled
* and clocked by PLL.
*/
if (!da1469x_clock_check_device_div1_clock()) {
#if CONFIG_REGULATOR
ret = smartbond_clock_set_pll_status(false);
#else
da1469x_clock_sys_pll_disable();
#endif
} else {
ret = -EPERM;
}
}
break;
default:
return -ENOTSUP;
}
return ret;
}
static enum smartbond_clock smartbond_source_clock(enum smartbond_clock clk)
{
static const enum smartbond_clock lp_clk_src[] = {
SMARTBOND_CLK_RC32K,
SMARTBOND_CLK_RCX,
SMARTBOND_CLK_XTAL32K,
SMARTBOND_CLK_XTAL32K,
};
static const enum smartbond_clock sys_clk_src[] = {
SMARTBOND_CLK_XTAL32M,
SMARTBOND_CLK_RC32M,
SMARTBOND_CLK_LP_CLK,
SMARTBOND_CLK_PLL96M,
};
if (clk == SMARTBOND_CLK_SYS_CLK) {
clk = sys_clk_src[CRG_TOP->CLK_CTRL_REG &
CRG_TOP_CLK_CTRL_REG_SYS_CLK_SEL_Msk];
}
/* System clock can be low power clock, so next check is not in else */
if (clk == SMARTBOND_CLK_LP_CLK) {
clk = lp_clk_src[(CRG_TOP->CLK_CTRL_REG &
CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Msk) >>
CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos];
}
return clk;
}
static int smartbond_clock_get_rate(enum smartbond_clock clk, uint32_t *rate)
{
clk = smartbond_source_clock(clk);
switch (clk) {
case SMARTBOND_CLK_RC32K:
*rate = lpc_clock_state.rc32k_freq;
break;
case SMARTBOND_CLK_RCX:
*rate = lpc_clock_state.rcx_freq;
break;
case SMARTBOND_CLK_XTAL32K:
*rate = DT_PROP(DT_NODELABEL(xtal32k), clock_frequency);
break;
case SMARTBOND_CLK_RC32M:
*rate = DT_PROP(DT_NODELABEL(rc32m), clock_frequency);
break;
case SMARTBOND_CLK_XTAL32M:
*rate = DT_PROP(DT_NODELABEL(xtal32m), clock_frequency);
break;
case SMARTBOND_CLK_PLL96M:
*rate = DT_PROP(DT_NODELABEL(pll), clock_frequency);
break;
case SMARTBOND_CLK_USB:
*rate = 48000000;
break;
default:
return -ENOTSUP;
}
return 0;
}
static int smartbond_clock_control_get_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
ARG_UNUSED(dev);
return smartbond_clock_get_rate((enum smartbond_clock)(sub_system), rate);
}
static enum smartbond_clock smartbond_dt_ord_to_clock(uint32_t dt_ord)
{
switch (dt_ord) {
case DT_DEP_ORD(DT_NODELABEL(rc32k)):
return SMARTBOND_CLK_RC32K;
case DT_DEP_ORD(DT_NODELABEL(rcx)):
return SMARTBOND_CLK_RCX;
case DT_DEP_ORD(DT_NODELABEL(xtal32k)):
return SMARTBOND_CLK_XTAL32K;
case DT_DEP_ORD(DT_NODELABEL(rc32m)):
return SMARTBOND_CLK_RC32M;
case DT_DEP_ORD(DT_NODELABEL(xtal32m)):
return SMARTBOND_CLK_XTAL32M;
case DT_DEP_ORD(DT_NODELABEL(pll)):
return SMARTBOND_CLK_PLL96M;
default:
return SMARTBOND_CLK_NONE;
}
}
static void smartbond_clock_control_on_by_ord(const struct device *dev,
uint32_t clock_id)
{
enum smartbond_clock clk = smartbond_dt_ord_to_clock(clock_id);
smartbond_clock_control_on(dev, (clock_control_subsys_rate_t)clk);
}
static void smartbond_clock_control_off_by_ord(const struct device *dev,
uint32_t clock_id)
{
enum smartbond_clock clk = smartbond_dt_ord_to_clock(clock_id);
smartbond_clock_control_off(dev, (clock_control_subsys_rate_t)clk);
}
int z_smartbond_select_lp_clk(enum smartbond_clock lp_clk)
{
int rc = 0;
uint32_t clk_sel = 0;
uint32_t clk_sel_msk = CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Msk;
switch (lp_clk) {
case SMARTBOND_CLK_RC32K:
clk_sel = 0;
break;
case SMARTBOND_CLK_RCX:
clk_sel = 1 << CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos;
break;
case SMARTBOND_CLK_XTAL32K:
clk_sel = 2 << CRG_TOP_CLK_CTRL_REG_LP_CLK_SEL_Pos;
break;
default:
rc = -EINVAL;
}
if (rc == 0) {
#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
switch (lp_clk) {
case SMARTBOND_CLK_RCX:
z_clock_hw_cycles_per_sec = lpc_clock_state.rcx_freq;
break;
case SMARTBOND_CLK_RC32K:
z_clock_hw_cycles_per_sec = lpc_clock_state.rc32k_freq;
break;
default:
z_clock_hw_cycles_per_sec = 32768;
break;
}
#endif
CRG_TOP->CLK_CTRL_REG = (CRG_TOP->CLK_CTRL_REG & ~clk_sel_msk) | clk_sel;
}
return rc;
}
static void smartbond_clock_control_update_memory_settings(uint32_t sys_clock_freq)
{
if (sys_clock_freq > 32000000) {
da1469x_qspi_set_read_pipe_delay(QSPIC_ID, 7);
#if DT_NODE_HAS_STATUS(DT_NODELABEL(memc), okay)
da1469x_qspi_set_read_pipe_delay(QSPIC2_ID, 7);
#endif
} else {
da1469x_qspi_set_read_pipe_delay(QSPIC_ID, 2);
#if DT_NODE_HAS_STATUS(DT_NODELABEL(memc), okay)
da1469x_qspi_set_read_pipe_delay(QSPIC2_ID, 2);
#endif
}
da1469x_qspi_set_cs_delay(QSPIC_ID, SystemCoreClock,
DT_PROP(DT_NODELABEL(flash_controller), read_cs_idle_delay),
DT_PROP(DT_NODELABEL(flash_controller), erase_cs_idle_delay));
#if DT_NODE_HAS_STATUS(DT_NODELABEL(memc), okay)
da1469x_qspi_set_cs_delay(QSPIC2_ID, SystemCoreClock,
DT_PROP(DT_NODELABEL(memc), read_cs_idle_min_ns),
DT_PROP_OR(DT_NODELABEL(memc), erase_cs_idle_min_ns, 0));
#if DT_PROP(DT_NODELABEL(memc), is_ram)
da1469x_qspi_set_tcem(SystemCoreClock, DT_PROP(DT_NODELABEL(memc), tcem_max_us));
#endif
#endif
}
int z_smartbond_select_sys_clk(enum smartbond_clock sys_clk)
{
uint32_t sys_clock_freq;
uint32_t clk_sel;
uint32_t clk_sel_msk = CRG_TOP_CLK_CTRL_REG_SYS_CLK_SEL_Msk;
int res = 0;
res = smartbond_clock_get_rate(sys_clk, &sys_clock_freq);
if (res != 0) {
return -EINVAL;
}
/* When PLL is selected as system clock qspi read pipe delay must be set to 7 */
if (sys_clock_freq > 32000000) {
smartbond_clock_control_update_memory_settings(sys_clock_freq);
}
if (sys_clk == SMARTBOND_CLK_RC32M) {
clk_sel = 1 << CRG_TOP_CLK_CTRL_REG_SYS_CLK_SEL_Pos;
CRG_TOP->CLK_CTRL_REG = (CRG_TOP->CLK_CTRL_REG & ~clk_sel_msk) | clk_sel;
SystemCoreClock = sys_clock_freq;
} else if (sys_clk == SMARTBOND_CLK_PLL96M) {
/* Check that PLL is already enabled, otherwise enable it. */
if (!da1469x_clock_sys_pll_is_enabled()) {
#if CONFIG_REGULATOR
res = smartbond_clock_set_pll_status(true);
if (res != 0) {
return -EIO;
}
#else
da1469x_clock_sys_pll_enable();
#endif
}
da1469x_clock_sys_pll_switch();
} else if (sys_clk == SMARTBOND_CLK_XTAL32M) {
/*
* XTAL32M should be enabled eitherway as it's not allowed
* to be turned off by application.
*/
da1469x_clock_sys_xtal32m_switch_safe();
} else {
return -EINVAL;
}
/* When switching back from PLL to 32MHz read pipe delay may be set to 2 */
if (SystemCoreClock <= 32000000) {
smartbond_clock_control_update_memory_settings(SystemCoreClock);
}
return res;
}
/**
* @brief Initialize clocks for the Smartbond
*
* This routine is called to enable and configure the clocks and PLL
* of the soc on the board.
*
* @param dev clocks device struct
*
* @return 0
*/
int smartbond_clocks_init(const struct device *dev)
{
uint32_t clk_id;
enum smartbond_clock lp_clk;
enum smartbond_clock sys_clk;
ARG_UNUSED(dev);
#if DT_NODE_HAS_STATUS(DT_NODELABEL(memc), okay)
/* Make sure QSPIC2 is enabled */
da1469x_clock_amba_enable(CRG_TOP_CLK_AMBA_REG_QSPI2_ENABLE_Msk);
#endif
#define ENABLE_OSC(clock) smartbond_clock_control_on_by_ord(dev, DT_DEP_ORD(clock))
#define DISABLE_OSC(clock) if (DT_NODE_HAS_STATUS(clock, disabled)) { \
smartbond_clock_control_off_by_ord(dev, DT_DEP_ORD(clock)); \
}
/* Enable all oscillators with status "okay" */
DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(crg, osc), ENABLE_OSC, (;));
/* Make sure that selected sysclock is enabled */
BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_PROP(DT_NODELABEL(sys_clk), clock_src), okay),
"Clock selected as system clock no enabled in DT");
BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_PROP(DT_NODELABEL(lp_clk), clock_src), okay),
"Clock selected as LP clock no enabled in DT");
BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(pll), disabled) ||
DT_NODE_HAS_STATUS(DT_NODELABEL(xtal32m), okay),
"PLL enabled in DT but XTAL32M is disabled");
clk_id = DT_DEP_ORD(DT_PROP(DT_NODELABEL(lp_clk), clock_src));
lp_clk = smartbond_dt_ord_to_clock(clk_id);
z_smartbond_select_lp_clk(lp_clk);
clk_id = DT_DEP_ORD(DT_PROP(DT_NODELABEL(sys_clk), clock_src));
sys_clk = smartbond_dt_ord_to_clock(clk_id);
smartbond_clock_control_on(dev,
(clock_control_subsys_rate_t)smartbond_source_clock(sys_clk));
z_smartbond_select_sys_clk(sys_clk);
/* Disable unwanted oscillators */
DT_FOREACH_CHILD_SEP(DT_PATH(crg, osc), DISABLE_OSC, (;));
return 0;
}
static const struct clock_control_driver_api smartbond_clock_control_api = {
.on = smartbond_clock_control_on,
.off = smartbond_clock_control_off,
.get_rate = smartbond_clock_control_get_rate,
};
#if CONFIG_PM_DEVICE
static int smartbond_clocks_pm_action(const struct device *dev, enum pm_device_action action)
{
switch (action) {
case PM_DEVICE_ACTION_SUSPEND:
break;
case PM_DEVICE_ACTION_RESUME:
#if DT_NODE_HAS_STATUS(DT_NODELABEL(memc), okay)
/* Make sure QSPIC2 is enabled */
da1469x_clock_amba_enable(CRG_TOP_CLK_AMBA_REG_QSPI2_ENABLE_Msk);
#endif
/*
* Make sure the flash controller has correct settings as clock restoration
* might have been performed upon waking up.
*/
smartbond_clock_control_update_memory_settings(SystemCoreClock);
break;
default:
return -ENOTSUP;
}
return 0;
}
#endif
PM_DEVICE_DT_DEFINE(DT_NODELABEL(osc), smartbond_clocks_pm_action);
DEVICE_DT_DEFINE(DT_NODELABEL(osc),
smartbond_clocks_init,
PM_DEVICE_DT_GET(DT_NODELABEL(osc)),
NULL, NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&smartbond_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_smartbond.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,509 |
```c
/*
*
*/
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/renesas_cpg_mssr.h>
#include <zephyr/dt-bindings/clock/renesas_cpg_mssr.h>
#include <zephyr/irq.h>
#include <zephyr/kernel.h>
#include "clock_control_renesas_cpg_mssr.h"
#include <stdlib.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_rcar);
static void rcar_cpg_reset(uint32_t base_address, uint32_t reg, uint32_t bit)
{
rcar_cpg_write(base_address, srcr[reg], BIT(bit));
rcar_cpg_write(base_address, SRSTCLR(reg), BIT(bit));
}
void rcar_cpg_write(uint32_t base_address, uint32_t reg, uint32_t val)
{
sys_write32(~val, base_address + CPGWPR);
sys_write32(val, base_address + reg);
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
k_sleep(K_USEC(35));
}
int rcar_cpg_mstp_clock_endisable(uint32_t base_address, uint32_t module, bool enable)
{
uint32_t reg = module / 100;
uint32_t bit = module % 100;
uint32_t bitmask = BIT(bit);
uint32_t reg_val;
__ASSERT((bit < 32) && reg < ARRAY_SIZE(mstpcr), "Invalid module number for cpg clock: %d",
module);
reg_val = sys_read32(base_address + mstpcr[reg]);
if (enable) {
reg_val &= ~bitmask;
} else {
reg_val |= bitmask;
}
sys_write32(reg_val, base_address + mstpcr[reg]);
if (!enable) {
rcar_cpg_reset(base_address, reg, bit);
}
return 0;
}
static int cmp_cpg_clk_info_table_items(const void *key, const void *element)
{
const struct cpg_clk_info_table *e = element;
uint32_t module = (uintptr_t)key;
if (e->module == module) {
return 0;
} else if (e->module < module) {
return 1;
} else {
return -1;
}
}
struct cpg_clk_info_table *
rcar_cpg_find_clk_info_by_module_id(const struct device *dev, uint32_t domain, uint32_t id)
{
struct rcar_cpg_mssr_data *data = dev->data;
struct cpg_clk_info_table *item;
struct cpg_clk_info_table *table = data->clk_info_table[domain];
uint32_t table_size = data->clk_info_table_size[domain];
uintptr_t uintptr_id = id;
item = bsearch((void *)uintptr_id, table, table_size, sizeof(*item),
cmp_cpg_clk_info_table_items);
if (!item) {
LOG_ERR("%s: can't find clk info (domain %u module %u)", dev->name, domain, id);
}
return item;
}
static uint32_t rcar_cpg_get_divider(const struct device *dev, struct cpg_clk_info_table *clk_info)
{
mem_addr_t reg_addr;
mm_reg_t reg_val;
uint32_t divider = RCAR_CPG_NONE;
struct rcar_cpg_mssr_data *data = dev->data;
if (clk_info->domain == CPG_MOD) {
return 1;
}
reg_addr = clk_info->offset;
if (reg_addr == RCAR_CPG_NONE) {
/* if we don't have valid offset, in is equal to out */
return 1;
}
reg_addr += DEVICE_MMIO_GET(dev);
reg_val = sys_read32(reg_addr);
if (data->get_div_helper) {
divider = data->get_div_helper(reg_val, clk_info->module);
}
if (!divider) {
return RCAR_CPG_NONE;
}
return divider;
}
static int rcar_cpg_update_out_freq(const struct device *dev, struct cpg_clk_info_table *clk_info)
{
uint32_t divider = rcar_cpg_get_divider(dev, clk_info);
if (divider == RCAR_CPG_NONE) {
return -EINVAL;
}
clk_info->out_freq = clk_info->in_freq / divider;
return 0;
}
static int64_t rcar_cpg_get_in_update_out_freq(const struct device *dev,
struct cpg_clk_info_table *clk_info)
{
int64_t freq = -ENOTSUP;
struct cpg_clk_info_table *parent_clk;
if (!clk_info) {
return freq;
}
if (clk_info->in_freq != RCAR_CPG_NONE) {
if (clk_info->out_freq == RCAR_CPG_NONE) {
if (rcar_cpg_update_out_freq(dev, clk_info) < 0) {
return freq;
}
}
return clk_info->in_freq;
}
parent_clk = clk_info->parent;
freq = rcar_cpg_get_in_update_out_freq(dev, parent_clk);
if (freq < 0) {
return freq;
}
clk_info->in_freq = parent_clk->out_freq;
freq = rcar_cpg_update_out_freq(dev, clk_info);
if (freq < 0) {
return freq;
}
return clk_info->in_freq;
}
static int64_t rcar_cpg_get_out_freq(const struct device *dev, struct cpg_clk_info_table *clk_info)
{
int64_t freq;
if (clk_info->out_freq != RCAR_CPG_NONE) {
return clk_info->out_freq;
}
freq = rcar_cpg_get_in_update_out_freq(dev, clk_info);
if (freq < 0) {
return freq;
}
return clk_info->out_freq;
}
static void rcar_cpg_change_children_in_out_freq(const struct device *dev,
struct cpg_clk_info_table *parent)
{
struct cpg_clk_info_table *children_list = parent->children_list;
while (children_list) {
children_list->in_freq = parent->out_freq;
if (rcar_cpg_update_out_freq(dev, children_list) < 0) {
/*
* Why it can happen:
* - divider is zero (with current implementation of board specific
* divider helper function it is impossible);
* - we don't have board specific implementation of get divider helper
* function;
* - we don't have this module in a table (for some of call chains of
* this function it is impossible);
* - impossible value is set in clock register divider bits.
*/
LOG_ERR("%s: error during getting divider from clock register, domain %u "
"module %u! Please, revise logic related to obtaining divider or "
"check presentence of clock inside appropriate clk_info_table",
dev->name, children_list->domain, children_list->module);
k_panic();
return;
}
/* child can have childrens */
rcar_cpg_change_children_in_out_freq(dev, children_list);
children_list = children_list->next_sibling;
}
}
int rcar_cpg_get_rate(const struct device *dev, clock_control_subsys_t sys, uint32_t *rate)
{
int64_t ret;
struct rcar_cpg_mssr_data *data;
struct rcar_cpg_clk *clk = (struct rcar_cpg_clk *)sys;
k_spinlock_key_t key;
struct cpg_clk_info_table *clk_info;
if (!dev || !sys || !rate) {
LOG_ERR("%s: received null ptr input arg(s) dev %p sys %p rate %p",
__func__, dev, sys, rate);
return -EINVAL;
}
clk_info = rcar_cpg_find_clk_info_by_module_id(dev, clk->domain, clk->module);
if (clk_info == NULL) {
return -EINVAL;
}
data = dev->data;
key = k_spin_lock(&data->lock);
ret = rcar_cpg_get_out_freq(dev, clk_info);
k_spin_unlock(&data->lock, key);
if (ret < 0) {
LOG_ERR("%s: clk (domain %u module %u) error (%lld) during getting out frequency",
dev->name, clk->domain, clk->module, ret);
return -EINVAL;
} else if (ret > UINT_MAX) {
LOG_ERR("%s: clk (domain %u module %u) frequency bigger then max uint value",
dev->name, clk->domain, clk->module);
return -EINVAL;
}
*rate = ret;
return 0;
}
int rcar_cpg_set_rate(const struct device *dev, clock_control_subsys_t sys,
clock_control_subsys_rate_t rate)
{
int ret = -ENOTSUP;
k_spinlock_key_t key;
struct cpg_clk_info_table *clk_info;
struct rcar_cpg_clk *clk = (struct rcar_cpg_clk *)sys;
struct rcar_cpg_mssr_data *data;
int64_t in_freq;
uint32_t divider;
uint32_t div_mask;
uint32_t module;
uintptr_t u_rate = (uintptr_t)rate;
if (!dev || !sys || !rate) {
LOG_ERR("%s: received null ptr input arg(s) dev %p sys %p rate %p",
__func__, dev, sys, rate);
return -EINVAL;
}
clk_info = rcar_cpg_find_clk_info_by_module_id(dev, clk->domain, clk->module);
if (clk_info == NULL) {
return -EINVAL;
}
if (clk_info->domain == CPG_MOD) {
if (!clk_info->parent) {
LOG_ERR("%s: parent isn't present for module clock, module id %u",
dev->name, clk_info->module);
k_panic();
}
clk_info = clk_info->parent;
}
module = clk_info->module;
data = dev->data;
key = k_spin_lock(&data->lock);
in_freq = rcar_cpg_get_in_update_out_freq(dev, clk_info);
if (in_freq < 0) {
ret = in_freq;
goto unlock;
}
divider = in_freq / u_rate;
if (divider * u_rate != in_freq) {
ret = -EINVAL;
goto unlock;
}
if (!data->set_rate_helper) {
ret = -ENOTSUP;
goto unlock;
}
ret = data->set_rate_helper(module, ÷r, &div_mask);
if (!ret) {
int64_t out_rate;
uint32_t reg = sys_read32(clk_info->offset + DEVICE_MMIO_GET(dev));
reg &= ~div_mask;
rcar_cpg_write(DEVICE_MMIO_GET(dev), clk_info->offset, reg | divider);
clk_info->out_freq = RCAR_CPG_NONE;
out_rate = rcar_cpg_get_out_freq(dev, clk_info);
if (out_rate < 0 || out_rate != u_rate) {
ret = -EINVAL;
LOG_ERR("%s: clock (domain %u module %u) register cfg freq (%lld) "
"isn't equal to requested %lu",
dev->name, clk->domain, clk->module, out_rate, u_rate);
goto unlock;
}
rcar_cpg_change_children_in_out_freq(dev, clk_info);
}
unlock:
k_spin_unlock(&data->lock, key);
return ret;
}
void rcar_cpg_build_clock_relationship(const struct device *dev)
{
uint32_t domain;
k_spinlock_key_t key;
struct rcar_cpg_mssr_data *data = dev->data;
if (!data) {
return;
}
key = k_spin_lock(&data->lock);
for (domain = 0; domain < CPG_NUM_DOMAINS; domain++) {
uint32_t idx;
uint32_t prev_mod_id = 0;
struct cpg_clk_info_table *item = data->clk_info_table[domain];
for (idx = 0; idx < data->clk_info_table_size[domain]; idx++, item++) {
struct cpg_clk_info_table *parent;
/* check if an array is sorted by module id or not */
if (prev_mod_id >= item->module) {
LOG_ERR("%s: clocks have to be sorted inside clock table in "
"ascending order by module id field, domain %u "
"module id %u",
dev->name, item->domain, item->module);
k_panic();
}
prev_mod_id = item->module;
if (item->parent_id == RCAR_CPG_NONE) {
continue;
}
parent = rcar_cpg_find_clk_info_by_module_id(dev, CPG_CORE,
item->parent_id);
if (!parent) {
LOG_ERR("%s: can't find parent for clock with valid parent id, "
"domain %u module id %u",
dev->name, item->domain, item->module);
k_panic();
}
if (item->parent != NULL) {
LOG_ERR("%s: trying to set another parent for a clock, domain %u "
"module id %u, parent for the clock has been already set",
dev->name, item->domain, item->module);
k_panic();
}
item->parent = parent;
/* insert in the head of the children list of the parent */
item->next_sibling = parent->children_list;
parent->children_list = item;
}
}
k_spin_unlock(&data->lock, key);
}
void rcar_cpg_update_all_in_out_freq(const struct device *dev)
{
uint32_t domain;
k_spinlock_key_t key;
struct rcar_cpg_mssr_data *data = dev->data;
if (!data) {
return;
}
key = k_spin_lock(&data->lock);
for (domain = 0; domain < CPG_NUM_DOMAINS; domain++) {
uint32_t idx;
struct cpg_clk_info_table *item = data->clk_info_table[domain];
for (idx = 0; idx < data->clk_info_table_size[domain]; idx++, item++) {
if (rcar_cpg_get_in_update_out_freq(dev, item) < 0) {
LOG_ERR("%s: can't update in/out freq for clock during init, "
"domain %u module %u! Please, review correctness of data "
"inside clk_info_table",
dev->name, item->domain, item->module);
k_panic();
}
}
}
k_spin_unlock(&data->lock, key);
}
``` | /content/code_sandbox/drivers/clock_control/clock_control_renesas_cpg_mssr.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,217 |
```c
/*
*
*
*/
#include <zephyr/drivers/clock_control.h>
#define DT_DRV_COMPAT fixed_clock
struct fixed_rate_clock_config {
uint32_t rate;
};
static int fixed_rate_clk_on(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
ARG_UNUSED(sys);
return 0;
}
static int fixed_rate_clk_off(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
ARG_UNUSED(sys);
return 0;
}
static enum clock_control_status fixed_rate_clk_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
return CLOCK_CONTROL_STATUS_ON;
}
static int fixed_rate_clk_get_rate(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate)
{
const struct fixed_rate_clock_config *config = dev->config;
ARG_UNUSED(sys);
*rate = config->rate;
return 0;
}
static const struct clock_control_driver_api fixed_rate_clk_api = {
.on = fixed_rate_clk_on,
.off = fixed_rate_clk_off,
.get_status = fixed_rate_clk_get_status,
.get_rate = fixed_rate_clk_get_rate
};
static int fixed_rate_clk_init(const struct device *dev)
{
ARG_UNUSED(dev);
return 0;
}
#define FIXED_CLK_INIT(idx) \
static const struct fixed_rate_clock_config fixed_rate_clock_config_##idx = { \
.rate = DT_INST_PROP(idx, clock_frequency), \
}; \
DEVICE_DT_INST_DEFINE(idx, \
fixed_rate_clk_init, \
NULL, NULL, \
&fixed_rate_clock_config_##idx, \
PRE_KERNEL_1, \
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&fixed_rate_clk_api \
);
DT_INST_FOREACH_STATUS_OKAY(FIXED_CLK_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_fixed_rate.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 392 |
```unknown
# Intel cAVS clock control driver
config CLOCK_CONTROL_ADSP
bool "Intel CAVS clock control"
default y
depends on DT_HAS_INTEL_ADSP_SHIM_CLKCTL_ENABLED
select ADSP_CLOCK
help
Driver for the CAVS clocks. Allow type of clock (and
thus frequency) to be chosen.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.cavs | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 74 |
```unknown
# NuMaker clock controller driver configuration options
config CLOCK_CONTROL_NUMAKER_SCC
bool "NuMaker system clock controller driver"
default y
depends on DT_HAS_NUVOTON_NUMAKER_SCC_ENABLED
help
Enable support for NuMaker system clock controller driver
``` | /content/code_sandbox/drivers/clock_control/Kconfig.numaker | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 58 |
```unknown
config CLOCK_CONTROL_RCAR_CPG_MSSR
bool "RCar CPG MSSR driver"
default y
depends on SOC_FAMILY_RENESAS_RCAR
help
Enable support for Renesas RCar CPG MSSR driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.rcar | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
config CLOCK_CONTROL_AGILEX5
bool "Agilex5 SoCFPGA clock control driver"
default y
depends on DT_HAS_INTEL_AGILEX5_CLOCK_ENABLED
help
This option enables the clock driver for Intel Agilex5 SoCFPGA SOC.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.agilex5 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```c
/*
*
* Based on clock_control_mcux_sim.c, which is:
*
*/
#define DT_DRV_COMPAT nxp_kinetis_mcg
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/kinetis_mcg.h>
#include <soc.h>
#include <fsl_clock.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_mcg);
static int mcux_mcg_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
return 0;
}
static int mcux_mcg_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
return 0;
}
static int mcux_mcg_get_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
clock_name_t clock_name;
switch ((uint32_t) sub_system) {
#if defined(FSL_FEATURE_MCG_FFCLK_DIV) && (FSL_FEATURE_MCG_FFCLK_DIV)
case KINETIS_MCG_FIXED_FREQ_CLK:
clock_name = kCLOCK_McgFixedFreqClk;
break;
#endif
case KINETIS_MCG_OUT_CLK:
*rate = CLOCK_GetOutClkFreq();
return 0;
default:
LOG_ERR("Unsupported clock name");
return -EINVAL;
break;
}
*rate = CLOCK_GetFreq(clock_name);
return 0;
}
static const struct clock_control_driver_api mcux_mcg_driver_api = {
.on = mcux_mcg_on,
.off = mcux_mcg_off,
.get_rate = mcux_mcg_get_rate,
};
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&mcux_mcg_driver_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_mcux_mcg.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 394 |
```c
/*
*
* r8a7795 Clock Pulse Generator / Module Standby and Software Reset
*
*/
#define DT_DRV_COMPAT renesas_r8a7795_cpg_mssr
#include <errno.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/clock_control/renesas_cpg_mssr.h>
#include <zephyr/dt-bindings/clock/renesas_cpg_mssr.h>
#include <zephyr/dt-bindings/clock/r8a7795_cpg_mssr.h>
#include <zephyr/irq.h>
#include "clock_control_renesas_cpg_mssr.h"
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(clock_control_rcar);
#define R8A7795_CLK_SD_STOP_BIT 8
#define R8A7795_CLK_SD_DIV_MASK 0x3
#define R8A7795_CLK_SD_DIV_SHIFT 0
#define R8A7795_CLK_SDH_STOP_BIT 9
#define R8A7795_CLK_SDH_DIV_MASK 0x7
#define R8A7795_CLK_SDH_DIV_SHIFT 2
#define R8A7795_CLK_CANFD_STOP_BIT 8
#define R8A7795_CLK_CANFD_DIV_MASK 0x3f
struct r8a7795_cpg_mssr_config {
DEVICE_MMIO_ROM; /* Must be first */
};
struct r8a7795_cpg_mssr_data {
struct rcar_cpg_mssr_data cmn; /* Must be first */
};
/* NOTE: the array MUST be sorted by module field */
static struct cpg_clk_info_table core_props[] = {
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_S3D4, RCAR_CPG_NONE, RCAR_CPG_NONE,
RCAR_CPG_KHZ(66600)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD0H, 0x0074, RCAR_CPG_NONE, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD0, 0x0074, R8A7795_CLK_SD0H, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD1H, 0x0078, RCAR_CPG_NONE, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD1, 0x0078, R8A7795_CLK_SD1H, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD2H, 0x0268, RCAR_CPG_NONE, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD2, 0x0268, R8A7795_CLK_SD2H, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD3H, 0x026C, RCAR_CPG_NONE, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_SD3, 0x026C, R8A7795_CLK_SD3H, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_CANFD, 0x0244, RCAR_CPG_NONE, RCAR_CPG_MHZ(800)),
RCAR_CORE_CLK_INFO_ITEM(R8A7795_CLK_S0D12, RCAR_CPG_NONE, RCAR_CPG_NONE,
RCAR_CPG_KHZ(66600)),
};
/* NOTE: the array MUST be sorted by module field */
static struct cpg_clk_info_table mod_props[] = {
RCAR_MOD_CLK_INFO_ITEM(310, R8A7795_CLK_S3D4),
RCAR_MOD_CLK_INFO_ITEM(311, R8A7795_CLK_SD3),
RCAR_MOD_CLK_INFO_ITEM(312, R8A7795_CLK_SD2),
RCAR_MOD_CLK_INFO_ITEM(313, R8A7795_CLK_SD1),
RCAR_MOD_CLK_INFO_ITEM(314, R8A7795_CLK_SD0),
};
static int r8a7795_cpg_enable_disable_core(const struct device *dev,
struct cpg_clk_info_table *clk_info, uint32_t enable)
{
int ret = 0;
uint32_t reg;
enable = !!enable;
switch (clk_info->module) {
case R8A7795_CLK_SD0:
case R8A7795_CLK_SD1:
case R8A7795_CLK_SD2:
case R8A7795_CLK_SD3:
reg = sys_read32(DEVICE_MMIO_GET(dev) + clk_info->offset);
reg &= ~(1 << R8A7795_CLK_SD_STOP_BIT);
reg |= (!enable << R8A7795_CLK_SD_STOP_BIT);
break;
case R8A7795_CLK_SD0H:
case R8A7795_CLK_SD1H:
case R8A7795_CLK_SD2H:
case R8A7795_CLK_SD3H:
reg = sys_read32(DEVICE_MMIO_GET(dev) + clk_info->offset);
reg &= ~(1 << R8A7795_CLK_SDH_STOP_BIT);
reg |= (!enable << R8A7795_CLK_SDH_STOP_BIT);
break;
case R8A7795_CLK_CANFD:
reg = sys_read32(DEVICE_MMIO_GET(dev) + clk_info->offset);
reg &= ~(1 << R8A7795_CLK_CANFD_STOP_BIT);
reg |= (!enable << R8A7795_CLK_CANFD_STOP_BIT);
break;
default:
ret = -ENOTSUP;
break;
}
if (!ret) {
rcar_cpg_write(DEVICE_MMIO_GET(dev), clk_info->offset, reg);
}
return ret;
}
static int r8a7795_cpg_core_clock_endisable(const struct device *dev, struct rcar_cpg_clk *clk,
bool enable)
{
struct cpg_clk_info_table *clk_info;
struct r8a7795_cpg_mssr_data *data = dev->data;
k_spinlock_key_t key;
clk_info = rcar_cpg_find_clk_info_by_module_id(dev, clk->domain, clk->module);
if (!clk_info) {
return -EINVAL;
}
if (enable) {
if (clk->rate > 0) {
int ret;
uintptr_t rate = clk->rate;
ret = rcar_cpg_set_rate(dev, (clock_control_subsys_t)clk,
(clock_control_subsys_rate_t)rate);
if (ret < 0) {
return ret;
}
}
}
key = k_spin_lock(&data->cmn.lock);
r8a7795_cpg_enable_disable_core(dev, clk_info, enable);
k_spin_unlock(&data->cmn.lock, key);
return 0;
}
static int r8a7795_cpg_mssr_start_stop(const struct device *dev, clock_control_subsys_t sys,
bool enable)
{
struct rcar_cpg_clk *clk = (struct rcar_cpg_clk *)sys;
int ret;
if (!dev || !sys) {
return -EINVAL;
}
if (clk->domain == CPG_MOD) {
struct r8a7795_cpg_mssr_data *data = dev->data;
k_spinlock_key_t key;
key = k_spin_lock(&data->cmn.lock);
ret = rcar_cpg_mstp_clock_endisable(DEVICE_MMIO_GET(dev), clk->module, enable);
k_spin_unlock(&data->cmn.lock, key);
} else if (clk->domain == CPG_CORE) {
ret = r8a7795_cpg_core_clock_endisable(dev, clk, enable);
} else {
ret = -EINVAL;
}
return ret;
}
static uint32_t r8a7795_get_div_helper(uint32_t reg_val, uint32_t module)
{
switch (module) {
case R8A7795_CLK_SD0H:
case R8A7795_CLK_SD1H:
case R8A7795_CLK_SD2H:
case R8A7795_CLK_SD3H:
reg_val >>= R8A7795_CLK_SDH_DIV_SHIFT;
/* setting of value bigger than 4 is prohibited */
if ((reg_val & R8A7795_CLK_SDH_DIV_MASK) < 5) {
return 1 << (reg_val & R8A7795_CLK_SDH_DIV_MASK);
} else {
return RCAR_CPG_NONE;
}
case R8A7795_CLK_SD0:
case R8A7795_CLK_SD1:
case R8A7795_CLK_SD2:
case R8A7795_CLK_SD3:
/* convert only two possible values 0,1 to 2,4 */
return 1 << ((reg_val & R8A7795_CLK_SD_DIV_MASK) + 1);
case R8A7795_CLK_CANFD:
/* according to documentation, divider value stored in reg is equal to: val + 1 */
return (reg_val & R8A7795_CLK_CANFD_DIV_MASK) + 1;
case R8A7795_CLK_S3D4:
case R8A7795_CLK_S0D12:
return 1;
default:
return RCAR_CPG_NONE;
}
}
static int r8a7795_set_rate_helper(uint32_t module, uint32_t *divider, uint32_t *div_mask)
{
switch (module) {
case R8A7795_CLK_SD0:
case R8A7795_CLK_SD1:
case R8A7795_CLK_SD2:
case R8A7795_CLK_SD3:
/* possible to have only 2 or 4 */
if (*divider == 2 || *divider == 4) {
/* convert 2/4 to 0/1 */
*divider >>= 2;
*div_mask = R8A7795_CLK_SD_DIV_MASK << R8A7795_CLK_SD_DIV_SHIFT;
return 0;
} else {
return -EINVAL;
}
case R8A7795_CLK_SD0H:
case R8A7795_CLK_SD1H:
case R8A7795_CLK_SD2H:
case R8A7795_CLK_SD3H:
/* divider should be power of two and max possible value 16 */
if (!is_power_of_two(*divider) || *divider > 16) {
return -EINVAL;
break;
}
/* 1,2,4,8,16 have to be converted to 0,1,2,3,4 and then shifted */
*divider = (find_lsb_set(*divider) - 1) << R8A7795_CLK_SDH_DIV_SHIFT;
*div_mask = R8A7795_CLK_SDH_DIV_MASK << R8A7795_CLK_SDH_DIV_SHIFT;
return 0;
case R8A7795_CLK_CANFD:
/* according to documentation, divider value stored in reg is equal to: val + 1 */
*divider -= 1;
if (*divider <= R8A7795_CLK_CANFD_DIV_MASK) {
*div_mask = R8A7795_CLK_CANFD_DIV_MASK;
return 0;
} else {
return -EINVAL;
}
default:
return -ENOTSUP;
}
}
static int r8a7795_cpg_mssr_start(const struct device *dev, clock_control_subsys_t sys)
{
return r8a7795_cpg_mssr_start_stop(dev, sys, true);
}
static int r8a7795_cpg_mssr_stop(const struct device *dev, clock_control_subsys_t sys)
{
return r8a7795_cpg_mssr_start_stop(dev, sys, false);
}
static int r8a7795_cpg_mssr_init(const struct device *dev)
{
DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
rcar_cpg_build_clock_relationship(dev);
rcar_cpg_update_all_in_out_freq(dev);
return 0;
}
static const struct clock_control_driver_api r8a7795_cpg_mssr_api = {
.on = r8a7795_cpg_mssr_start,
.off = r8a7795_cpg_mssr_stop,
.get_rate = rcar_cpg_get_rate,
.set_rate = rcar_cpg_set_rate,
};
#define R8A7795_MSSR_INIT(inst) \
static struct r8a7795_cpg_mssr_config r8a7795_cpg_mssr##inst##_config = { \
DEVICE_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
}; \
\
static struct r8a7795_cpg_mssr_data r8a7795_cpg_mssr##inst##_data = { \
.cmn.clk_info_table[CPG_CORE] = core_props, \
.cmn.clk_info_table_size[CPG_CORE] = ARRAY_SIZE(core_props), \
.cmn.clk_info_table[CPG_MOD] = mod_props, \
.cmn.clk_info_table_size[CPG_MOD] = ARRAY_SIZE(mod_props), \
.cmn.get_div_helper = r8a7795_get_div_helper, \
.cmn.set_rate_helper = r8a7795_set_rate_helper \
}; \
\
DEVICE_DT_INST_DEFINE(inst, \
r8a7795_cpg_mssr_init, \
NULL, \
&r8a7795_cpg_mssr##inst##_data, \
&r8a7795_cpg_mssr##inst##_config, \
PRE_KERNEL_1, \
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&r8a7795_cpg_mssr_api);
DT_INST_FOREACH_STATUS_OKAY(R8A7795_MSSR_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_r8a7795_cpg_mssr.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,081 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <stm32_ll_system.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_mco.h"
/* Macros to fill up prescaler values */
#define z_ahb_prescaler(v) LL_RCC_SYSCLK_DIV_ ## v
#define ahb_prescaler(v) z_ahb_prescaler(v)
#define z_apb1_prescaler(v) LL_RCC_APB1_DIV_ ## v
#define apb1_prescaler(v) z_apb1_prescaler(v)
#define z_apb2_prescaler(v) LL_RCC_APB2_DIV_ ## v
#define apb2_prescaler(v) z_apb2_prescaler(v)
#define z_apb3_prescaler(v) LL_RCC_APB3_DIV_ ## v
#define apb3_prescaler(v) z_apb3_prescaler(v)
#define PLL1_ID 1
#define PLL2_ID 2
#define PLL3_ID 3
static uint32_t get_bus_clock(uint32_t clock, uint32_t prescaler)
{
return clock / prescaler;
}
static uint32_t get_msis_frequency(void)
{
return __LL_RCC_CALC_MSIS_FREQ(LL_RCC_MSI_IsEnabledRangeSelect(),
((LL_RCC_MSI_IsEnabledRangeSelect() == 1U) ?
LL_RCC_MSIS_GetRange() :
LL_RCC_MSIS_GetRangeAfterStandby()));
}
__unused
/** @brief returns the pll source frequency of given pll_id */
static uint32_t get_pllsrc_frequency(size_t pll_id)
{
if ((IS_ENABLED(STM32_PLL_SRC_HSI) && pll_id == PLL1_ID) ||
(IS_ENABLED(STM32_PLL2_SRC_HSI) && pll_id == PLL2_ID) ||
(IS_ENABLED(STM32_PLL3_SRC_HSI) && pll_id == PLL3_ID)) {
return STM32_HSI_FREQ;
} else if ((IS_ENABLED(STM32_PLL_SRC_HSE) && pll_id == PLL1_ID) ||
(IS_ENABLED(STM32_PLL2_SRC_HSE) && pll_id == PLL2_ID) ||
(IS_ENABLED(STM32_PLL3_SRC_HSE) && pll_id == PLL3_ID)) {
return STM32_HSE_FREQ;
} else if ((IS_ENABLED(STM32_PLL_SRC_MSIS) && pll_id == PLL1_ID) ||
(IS_ENABLED(STM32_PLL2_SRC_MSIS) && pll_id == PLL2_ID) ||
(IS_ENABLED(STM32_PLL3_SRC_MSIS) && pll_id == PLL3_ID)) {
return get_msis_frequency();
}
__ASSERT(0, "No PLL Source configured");
return 0;
}
static uint32_t get_startup_frequency(void)
{
switch (LL_RCC_GetSysClkSource()) {
case LL_RCC_SYS_CLKSOURCE_STATUS_MSIS:
return get_msis_frequency();
case LL_RCC_SYS_CLKSOURCE_STATUS_HSI:
return STM32_HSI_FREQ;
case LL_RCC_SYS_CLKSOURCE_STATUS_HSE:
return STM32_HSE_FREQ;
case LL_RCC_SYS_CLKSOURCE_STATUS_PLL1:
return get_pllsrc_frequency(PLL1_ID);
default:
__ASSERT(0, "Unexpected startup freq");
return 0;
}
}
__unused
static uint32_t get_pllout_frequency(uint32_t pllsrc_freq,
int pllm_div,
int plln_mul,
int pllout_div)
{
__ASSERT_NO_MSG(pllm_div && pllout_div);
return (pllsrc_freq / pllm_div) * plln_mul / pllout_div;
}
static uint32_t get_sysclk_frequency(void)
{
#if defined(STM32_SYSCLK_SRC_PLL)
return get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
#elif defined(STM32_SYSCLK_SRC_MSIS)
return get_msis_frequency();
#elif defined(STM32_SYSCLK_SRC_HSE)
return STM32_HSE_FREQ;
#elif defined(STM32_SYSCLK_SRC_HSI)
return STM32_HSI_FREQ;
#else
__ASSERT(0, "No SYSCLK Source configured");
return 0;
#endif
}
/** @brief Verifies clock is part of active clock configuration */
static int enabled_clock(uint32_t src_clk)
{
if ((src_clk == STM32_SRC_SYSCLK) ||
((src_clk == STM32_SRC_HSE) && IS_ENABLED(STM32_HSE_ENABLED)) ||
((src_clk == STM32_SRC_HSI16) && IS_ENABLED(STM32_HSI_ENABLED)) ||
((src_clk == STM32_SRC_HSI48) && IS_ENABLED(STM32_HSI48_ENABLED)) ||
((src_clk == STM32_SRC_LSE) && IS_ENABLED(STM32_LSE_ENABLED)) ||
((src_clk == STM32_SRC_LSI) && IS_ENABLED(STM32_LSI_ENABLED)) ||
((src_clk == STM32_SRC_MSIS) && IS_ENABLED(STM32_MSIS_ENABLED)) ||
((src_clk == STM32_SRC_MSIK) && IS_ENABLED(STM32_MSIK_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_P) && IS_ENABLED(STM32_PLL_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_Q) && IS_ENABLED(STM32_PLL_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_R) && IS_ENABLED(STM32_PLL_R_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_P) && IS_ENABLED(STM32_PLL2_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_Q) && IS_ENABLED(STM32_PLL2_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_R) && IS_ENABLED(STM32_PLL2_R_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_P) && IS_ENABLED(STM32_PLL3_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_Q) && IS_ENABLED(STM32_PLL3_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_R) && IS_ENABLED(STM32_PLL3_R_ENABLED))) {
return 0;
}
return -ENOTSUP;
}
static inline int stm32_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
volatile int temp;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
/* Delay after enabling the clock, to allow it to become active */
temp = sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus);
UNUSED(temp);
return 0;
}
static inline int stm32_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
return 0;
}
static inline int stm32_clock_control_configure(const struct device *dev,
clock_control_subsys_t sub_system,
void *data)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
int err;
ARG_UNUSED(dev);
ARG_UNUSED(data);
err = enabled_clock(pclken->bus);
if (err < 0) {
/* Attempt to configure a src clock not available or not valid */
return err;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_MASK_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_VAL_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
return 0;
}
static int stm32_clock_control_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sys);
/*
* Get AHB Clock (= SystemCoreClock = SYSCLK/prescaler)
* SystemCoreClock is preferred to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
* since it will be updated after clock configuration and hence
* more likely to contain actual clock speed
*/
uint32_t ahb_clock = SystemCoreClock;
uint32_t apb1_clock = get_bus_clock(ahb_clock, STM32_APB1_PRESCALER);
uint32_t apb2_clock = get_bus_clock(ahb_clock, STM32_APB2_PRESCALER);
uint32_t apb3_clock = get_bus_clock(ahb_clock, STM32_APB3_PRESCALER);
ARG_UNUSED(dev);
switch (pclken->bus) {
case STM32_CLOCK_BUS_AHB1:
case STM32_CLOCK_BUS_AHB2:
case STM32_CLOCK_BUS_AHB2_2:
case STM32_CLOCK_BUS_AHB3:
*rate = ahb_clock;
break;
case STM32_CLOCK_BUS_APB1:
case STM32_CLOCK_BUS_APB1_2:
*rate = apb1_clock;
break;
case STM32_CLOCK_BUS_APB2:
*rate = apb2_clock;
break;
case STM32_CLOCK_BUS_APB3:
*rate = apb3_clock;
break;
case STM32_SRC_SYSCLK:
*rate = get_sysclk_frequency();
break;
#if defined(STM32_HSI_ENABLED)
case STM32_SRC_HSI16:
*rate = STM32_HSI_FREQ;
break;
#endif /* STM32_HSI_ENABLED */
#if defined(STM32_MSIS_ENABLED)
case STM32_SRC_MSIS:
*rate = get_msis_frequency();
break;
#endif /* STM32_MSIS_ENABLED */
#if defined(STM32_MSIK_ENABLED)
case STM32_SRC_MSIK:
*rate = __LL_RCC_CALC_MSIK_FREQ(LL_RCC_MSIRANGESEL_RUN,
STM32_MSIK_RANGE << RCC_ICSCR1_MSIKRANGE_Pos);
break;
#endif /* STM32_MSIK_ENABLED */
#if defined(STM32_HSE_ENABLED)
case STM32_SRC_HSE:
*rate = STM32_HSE_FREQ;
break;
#endif /* STM32_HSE_ENABLED */
#if defined(STM32_LSE_ENABLED)
case STM32_SRC_LSE:
*rate = STM32_LSE_FREQ;
break;
#endif /* STM32_LSE_ENABLED */
#if defined(STM32_LSI_ENABLED)
case STM32_SRC_LSI:
*rate = STM32_LSI_FREQ;
break;
#endif /* STM32_LSI_ENABLED */
#if defined(STM32_HSI48_ENABLED)
case STM32_SRC_HSI48:
*rate = STM32_HSI48_FREQ;
break;
#endif /* STM32_HSI48_ENABLED */
#if defined(STM32_PLL_ENABLED)
case STM32_SRC_PLL1_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_P_DIVISOR);
break;
case STM32_SRC_PLL1_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_Q_DIVISOR);
break;
case STM32_SRC_PLL1_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL1_ID),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
break;
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLL2_ENABLED)
case STM32_SRC_PLL2_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL2_ID),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_P_DIVISOR);
break;
case STM32_SRC_PLL2_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL2_ID),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_Q_DIVISOR);
break;
case STM32_SRC_PLL2_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL2_ID),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_R_DIVISOR);
break;
#endif /* STM32_PLL2_ENABLED */
#if defined(STM32_PLL3_ENABLED)
case STM32_SRC_PLL3_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL3_ID),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_P_DIVISOR);
break;
case STM32_SRC_PLL3_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL3_ID),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_Q_DIVISOR);
break;
case STM32_SRC_PLL3_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(PLL3_ID),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_R_DIVISOR);
break;
#endif /* STM32_PLL3_ENABLED */
default:
return -ENOTSUP;
}
return 0;
}
static enum clock_control_status stm32_clock_control_get_status(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)sub_system;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == true) {
/* Gated clocks */
if ((sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus) & pclken->enr)
== pclken->enr) {
return CLOCK_CONTROL_STATUS_ON;
} else {
return CLOCK_CONTROL_STATUS_OFF;
}
} else {
/* Domain clock sources */
if (enabled_clock(pclken->bus) == 0) {
return CLOCK_CONTROL_STATUS_ON;
} else {
return CLOCK_CONTROL_STATUS_OFF;
}
}
}
static const struct clock_control_driver_api stm32_clock_control_api = {
.on = stm32_clock_control_on,
.off = stm32_clock_control_off,
.get_rate = stm32_clock_control_get_subsys_rate,
.get_status = stm32_clock_control_get_status,
.configure = stm32_clock_control_configure,
};
__unused
static int get_vco_input_range(uint32_t m_div, uint32_t *range, size_t pll_id)
{
uint32_t vco_freq;
vco_freq = get_pllsrc_frequency(pll_id) / m_div;
if (MHZ(4) <= vco_freq && vco_freq <= MHZ(8)) {
*range = LL_RCC_PLLINPUTRANGE_4_8;
} else if (MHZ(8) < vco_freq && vco_freq <= MHZ(16)) {
*range = LL_RCC_PLLINPUTRANGE_8_16;
} else {
return -ERANGE;
}
return 0;
}
static void set_regu_voltage(uint32_t hclk_freq)
{
if (hclk_freq < MHZ(25)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE4);
} else if (hclk_freq < MHZ(55)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE3);
} else if (hclk_freq < MHZ(110)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE2);
} else {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE1);
}
while (LL_PWR_IsActiveFlag_VOS() == 0) {
}
}
#if defined(STM32_PLL_ENABLED)
/*
* Dynamic voltage scaling:
* Enable the Booster mode before enabling then PLL for sysclock above 55MHz
* The goal of this function is to set the epod prescaler, so that epod clock freq
* is between 4MHz and 16MHz.
* Up to now only MSI as PLL1 source clock can be > 16MHz, requiring a epod prescaler > 1
* For HSI16, epod prescaler is default (div1, not divided).
* Once HSE is > 16MHz, the epod prescaler would also be also required.
*/
static void set_epod_booster(void)
{
/* Reset Epod Prescaler in case it was set earlier with another DIV value */
LL_PWR_DisableEPODBooster();
while (LL_PWR_IsActiveFlag_BOOST() == 1) {
}
LL_RCC_SetPll1EPodPrescaler(LL_RCC_PLL1MBOOST_DIV_1);
if (MHZ(55) <= CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
/*
* Set EPOD clock prescaler based on PLL1 input freq
* (MSI/PLLM or HSE/PLLM when HSE is > 16MHz
* Booster clock frequency should be between 4 and 16MHz
* This is done in following steps:
* Read MSI Frequency or HSE oscillaor freq
* Divide PLL1 input freq (MSI/PLL or HSE/PLLM)
* by the targeted freq (8MHz).
* Make sure value is not higher than 16
* Shift in the register space (/2)
*/
int tmp;
if (IS_ENABLED(STM32_PLL_SRC_MSIS)) {
tmp = __LL_RCC_CALC_MSIS_FREQ(LL_RCC_MSIRANGESEL_RUN,
STM32_MSIS_RANGE << RCC_ICSCR1_MSISRANGE_Pos);
} else if (IS_ENABLED(STM32_PLL_SRC_HSE) && (MHZ(16) < STM32_HSE_FREQ)) {
tmp = STM32_HSE_FREQ;
} else {
return;
}
tmp = MIN(tmp / STM32_PLL_M_DIVISOR / 8000000, 16);
tmp = tmp / 2;
/* Configure the epod clock frequency between 4 and 16 MHz */
LL_RCC_SetPll1EPodPrescaler(tmp << RCC_PLL1CFGR_PLL1MBOOST_Pos);
/* Enable EPOD booster and wait for booster ready flag set */
LL_PWR_EnableEPODBooster();
while (LL_PWR_IsActiveFlag_BOOST() == 0) {
}
}
}
#endif /* STM32_PLL_ENABLED */
__unused
static void clock_switch_to_hsi(void)
{
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
LL_RCC_SetAHBPrescaler(LL_RCC_SYSCLK_DIV_1);
}
__unused
static int set_up_plls(void)
{
#if defined(STM32_PLL_ENABLED) || defined(STM32_PLL2_ENABLED) || \
defined(STM32_PLL3_ENABLED)
int r;
uint32_t vco_input_range;
#endif
#if defined(STM32_PLL_ENABLED)
/*
* Switch to HSI and disable the PLL before configuration.
* (Switching to HSI makes sure we have a SYSCLK source in
* case we're currently running from the PLL we're about to
* turn off and reconfigure.)
*/
if (LL_RCC_GetSysClkSource() == LL_RCC_SYS_CLKSOURCE_STATUS_PLL1) {
clock_switch_to_hsi();
}
LL_RCC_PLL1_Disable();
/* Configure PLL source : Can be HSE, HSI, MSIS */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL_SRC_MSIS)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_MSIS);
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_HSI);
} else {
return -ENOTSUP;
}
/*
* Configure the EPOD booster
* before increasing the system clock freq
* and after pll clock source is set
*/
set_epod_booster();
r = get_vco_input_range(STM32_PLL_M_DIVISOR, &vco_input_range, PLL1_ID);
if (r < 0) {
return r;
}
LL_RCC_PLL1_SetDivider(STM32_PLL_M_DIVISOR);
/* Set VCO Input before enabling the PLL, depends on freq used for PLL1 */
LL_RCC_PLL1_SetVCOInputRange(vco_input_range);
LL_RCC_PLL1_SetN(STM32_PLL_N_MULTIPLIER);
LL_RCC_PLL1FRACN_Disable();
if (IS_ENABLED(STM32_PLL_FRACN_ENABLED)) {
LL_RCC_PLL1_SetFRACN(STM32_PLL_FRACN_VALUE);
LL_RCC_PLL1FRACN_Enable();
}
if (IS_ENABLED(STM32_PLL_P_ENABLED)) {
LL_RCC_PLL1_SetP(STM32_PLL_P_DIVISOR);
LL_RCC_PLL1_EnableDomain_SAI();
}
if (IS_ENABLED(STM32_PLL_Q_ENABLED)) {
LL_RCC_PLL1_SetQ(STM32_PLL_Q_DIVISOR);
LL_RCC_PLL1_EnableDomain_48M();
}
if (IS_ENABLED(STM32_PLL_R_ENABLED)) {
__ASSERT_NO_MSG((STM32_PLL_R_DIVISOR == 1) ||
(STM32_PLL_R_DIVISOR % 2 == 0));
LL_RCC_PLL1_SetR(STM32_PLL_R_DIVISOR);
LL_RCC_PLL1_EnableDomain_SYS();
}
LL_RCC_PLL1_Enable();
while (LL_RCC_PLL1_IsReady() != 1U) {
}
#else
/* Init PLL source to None */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_NONE);
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLL2_ENABLED)
/* Configure PLL2 source */
if (IS_ENABLED(STM32_PLL2_SRC_HSE)) {
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL2_SRC_MSIS)) {
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_MSIS);
} else if (IS_ENABLED(STM32_PLL2_SRC_HSI)) {
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_HSI);
} else {
return -ENOTSUP;
}
r = get_vco_input_range(STM32_PLL2_M_DIVISOR, &vco_input_range, PLL2_ID);
if (r < 0) {
return r;
}
LL_RCC_PLL2_SetDivider(STM32_PLL2_M_DIVISOR);
LL_RCC_PLL2_SetVCOInputRange(vco_input_range);
LL_RCC_PLL2_SetN(STM32_PLL2_N_MULTIPLIER);
LL_RCC_PLL2FRACN_Disable();
if (IS_ENABLED(STM32_PLL2_FRACN_ENABLED)) {
LL_RCC_PLL2_SetFRACN(STM32_PLL2_FRACN_VALUE);
LL_RCC_PLL2FRACN_Enable();
}
if (IS_ENABLED(STM32_PLL2_P_ENABLED)) {
LL_RCC_PLL2_SetP(STM32_PLL2_P_DIVISOR);
SET_BIT(RCC->PLL2CFGR, RCC_PLL2CFGR_PLL2PEN);
}
if (IS_ENABLED(STM32_PLL2_Q_ENABLED)) {
LL_RCC_PLL2_SetQ(STM32_PLL2_Q_DIVISOR);
SET_BIT(RCC->PLL2CFGR, RCC_PLL2CFGR_PLL2QEN);
}
if (IS_ENABLED(STM32_PLL2_R_ENABLED)) {
LL_RCC_PLL2_SetR(STM32_PLL2_R_DIVISOR);
SET_BIT(RCC->PLL2CFGR, RCC_PLL2CFGR_PLL2REN);
}
LL_RCC_PLL2_Enable();
while (LL_RCC_PLL2_IsReady() != 1U) {
}
#else
/* Init PLL2 source to None */
LL_RCC_PLL2_SetSource(LL_RCC_PLL2SOURCE_NONE);
#endif /* STM32_PLL2_ENABLED */
#if defined(STM32_PLL3_ENABLED)
/* Configure PLL3 source */
if (IS_ENABLED(STM32_PLL3_SRC_HSE)) {
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL3_SRC_MSIS)) {
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_MSIS);
} else if (IS_ENABLED(STM32_PLL3_SRC_HSI)) {
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_HSI);
} else {
return -ENOTSUP;
}
r = get_vco_input_range(STM32_PLL3_M_DIVISOR, &vco_input_range, PLL3_ID);
if (r < 0) {
return r;
}
LL_RCC_PLL3_SetDivider(STM32_PLL3_M_DIVISOR);
LL_RCC_PLL3_SetVCOInputRange(vco_input_range);
LL_RCC_PLL3_SetN(STM32_PLL3_N_MULTIPLIER);
LL_RCC_PLL3FRACN_Disable();
if (IS_ENABLED(STM32_PLL3_FRACN_ENABLED)) {
LL_RCC_PLL3_SetFRACN(STM32_PLL3_FRACN_VALUE);
LL_RCC_PLL3FRACN_Enable();
}
if (IS_ENABLED(STM32_PLL3_P_ENABLED)) {
LL_RCC_PLL3_SetP(STM32_PLL3_P_DIVISOR);
SET_BIT(RCC->PLL3CFGR, RCC_PLL3CFGR_PLL3PEN);
}
if (IS_ENABLED(STM32_PLL3_Q_ENABLED)) {
LL_RCC_PLL3_SetQ(STM32_PLL3_Q_DIVISOR);
SET_BIT(RCC->PLL3CFGR, RCC_PLL3CFGR_PLL3QEN);
}
if (IS_ENABLED(STM32_PLL3_R_ENABLED)) {
LL_RCC_PLL3_SetR(STM32_PLL3_R_DIVISOR);
SET_BIT(RCC->PLL3CFGR, RCC_PLL3CFGR_PLL3REN);
}
LL_RCC_PLL3_Enable();
while (LL_RCC_PLL3_IsReady() != 1U) {
}
#else
/* Init PLL3 source to None */
LL_RCC_PLL3_SetSource(LL_RCC_PLL3SOURCE_NONE);
#endif /* STM32_PLL3_ENABLED */
return 0;
}
static void set_up_fixed_clock_sources(void)
{
if (IS_ENABLED(STM32_HSE_ENABLED)) {
/* Check if need to enable HSE bypass feature or not */
if (IS_ENABLED(STM32_HSE_BYPASS)) {
LL_RCC_HSE_EnableBypass();
} else {
LL_RCC_HSE_DisableBypass();
}
/* Enable HSE */
LL_RCC_HSE_Enable();
while (LL_RCC_HSE_IsReady() != 1) {
/* Wait for HSE ready */
}
}
if (IS_ENABLED(STM32_HSI_ENABLED)) {
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
}
if (IS_ENABLED(STM32_LSE_ENABLED)) {
/* Enable the power interface clock */
LL_AHB3_GRP1_EnableClock(LL_AHB3_GRP1_PERIPH_PWR);
if (!LL_PWR_IsEnabledBkUpAccess()) {
/* Enable write access to Backup domain */
LL_PWR_EnableBkUpAccess();
while (!LL_PWR_IsEnabledBkUpAccess()) {
/* Wait for Backup domain access */
}
}
/* Configure driving capability */
LL_RCC_LSE_SetDriveCapability(STM32_LSE_DRIVING << RCC_BDCR_LSEDRV_Pos);
if (IS_ENABLED(STM32_LSE_BYPASS)) {
/* Configure LSE bypass */
LL_RCC_LSE_EnableBypass();
}
/* Enable LSE Oscillator */
LL_RCC_LSE_Enable();
/* Wait for LSE ready */
while (!LL_RCC_LSE_IsReady()) {
}
/* Enable LSESYS additionally */
LL_RCC_LSE_EnablePropagation();
/* Wait till LSESYS is ready */
while (!LL_RCC_LSESYS_IsReady()) {
}
LL_PWR_DisableBkUpAccess();
}
if (IS_ENABLED(STM32_MSIS_ENABLED)) {
/* Set MSIS Range */
LL_RCC_MSI_EnableRangeSelection();
LL_RCC_MSIS_SetRange(STM32_MSIS_RANGE << RCC_ICSCR1_MSISRANGE_Pos);
if (IS_ENABLED(STM32_MSIS_PLL_MODE)) {
__ASSERT(STM32_LSE_ENABLED,
"MSIS Hardware auto calibration needs LSE clock activation");
/* Enable MSI hardware auto calibration */
LL_RCC_SetMSIPLLMode(LL_RCC_PLLMODE_MSIS);
LL_RCC_MSI_EnablePLLMode();
}
/* Enable MSIS */
LL_RCC_MSIS_Enable();
/* Wait till MSIS is ready */
while (LL_RCC_MSIS_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_MSIK_ENABLED)) {
/* Set MSIK Range */
LL_RCC_MSI_EnableRangeSelection();
LL_RCC_MSIK_SetRange(STM32_MSIK_RANGE << RCC_ICSCR1_MSIKRANGE_Pos);
if (IS_ENABLED(STM32_MSIK_PLL_MODE)) {
__ASSERT(STM32_LSE_ENABLED,
"MSIK Hardware auto calibration needs LSE clock activation");
/* Enable MSI hardware auto calibration */
LL_RCC_SetMSIPLLMode(LL_RCC_PLLMODE_MSIK);
LL_RCC_MSI_EnablePLLMode();
}
if (IS_ENABLED(STM32_MSIS_ENABLED)) {
__ASSERT((STM32_MSIK_PLL_MODE == STM32_MSIS_PLL_MODE),
"Please check MSIS/MSIK config consistency");
}
/* Enable MSIK */
LL_RCC_MSIK_Enable();
/* Wait till MSIK is ready */
while (LL_RCC_MSIK_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_LSI_ENABLED)) {
if (!LL_AHB3_GRP1_IsEnabledClock(LL_AHB3_GRP1_PERIPH_PWR)) {
/* Enable the power interface clock */
LL_AHB3_GRP1_EnableClock(LL_AHB3_GRP1_PERIPH_PWR);
}
if (!LL_PWR_IsEnabledBkUpAccess()) {
/* Enable write access to Backup domain */
LL_PWR_EnableBkUpAccess();
while (!LL_PWR_IsEnabledBkUpAccess()) {
/* Wait for Backup domain access */
}
}
/* Enable LSI oscillator */
LL_RCC_LSI_Enable();
while (LL_RCC_LSI_IsReady() != 1) {
}
LL_PWR_DisableBkUpAccess();
}
if (IS_ENABLED(STM32_HSI48_ENABLED)) {
LL_RCC_HSI48_Enable();
while (LL_RCC_HSI48_IsReady() != 1) {
}
}
}
int stm32_clock_control_init(const struct device *dev)
{
uint32_t old_hclk_freq;
int r;
ARG_UNUSED(dev);
/* Current hclk value */
old_hclk_freq = __LL_RCC_CALC_HCLK_FREQ(get_startup_frequency(), LL_RCC_GetAHBPrescaler());
/* Set voltage regulator to comply with targeted system frequency */
set_regu_voltage(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
/* Set flash latency */
/* If freq increases, set flash latency before any clock setting */
if (old_hclk_freq < CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
LL_SetFlashLatency(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
}
/* Set up individual enabled clocks */
set_up_fixed_clock_sources();
/* Set up PLLs */
r = set_up_plls();
if (r < 0) {
return r;
}
/* Set peripheral buses prescalers */
LL_RCC_SetAHBPrescaler(ahb_prescaler(STM32_AHB_PRESCALER));
LL_RCC_SetAPB1Prescaler(apb1_prescaler(STM32_APB1_PRESCALER));
LL_RCC_SetAPB2Prescaler(apb2_prescaler(STM32_APB2_PRESCALER));
LL_RCC_SetAPB3Prescaler(apb3_prescaler(STM32_APB3_PRESCALER));
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
/* Set PLL1 as System Clock Source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_PLL1);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_PLL1) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSE)) {
/* Set HSE as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSE);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSE) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_MSIS)) {
/* Set MSIS as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_MSIS);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_MSIS) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSI)) {
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
} else {
return -ENOTSUP;
}
/* Set FLASH latency */
/* If freq not increased, set flash latency after all clock setting */
if (old_hclk_freq >= CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
LL_SetFlashLatency(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
}
/* Update CMSIS variable */
SystemCoreClock = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
/* configure MCO1/MCO2 based on Kconfig */
stm32_clock_control_mco_init();
return 0;
}
/**
* @brief RCC device, note that priority is intentionally set to 1 so
* that the device init runs just after SOC init
*/
DEVICE_DT_DEFINE(DT_NODELABEL(rcc),
stm32_clock_control_init,
NULL,
NULL, NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&stm32_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_u5.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,928 |
```c
/*
*
*/
#include <string.h>
#define DT_DRV_COMPAT renesas_ra_clock_generation_circuit
#include <zephyr/drivers/clock_control.h>
#include <zephyr/kernel.h>
#include <soc.h>
#include <zephyr/dt-bindings/clock/renesas-ra-cgc.h>
#if DT_SAME_NODE(DT_INST_PROP(0, clock_source), DT_PATH(clocks, pll))
#define SYSCLK_SRC pll
#elif DT_SAME_NODE(DT_INST_PROP(0, clock_source), DT_PATH(clocks, mosc))
#define SYSCLK_SRC mosc
#elif DT_SAME_NODE(DT_INST_PROP(0, clock_source), DT_PATH(clocks, sosc))
#define SYSCLK_SRC sosc
#elif DT_SAME_NODE(DT_INST_PROP(0, clock_source), DT_PATH(clocks, hoco))
#define SYSCLK_SRC hoco
#elif DT_SAME_NODE(DT_INST_PROP(0, clock_source), DT_PATH(clocks, moco))
#define SYSCLK_SRC moco
#elif DT_SAME_NODE(DT_INST_PROP(0, clock_source), DT_PATH(clocks, loco))
#define SYSCLK_SRC loco
#else
#error Unknown clock source
#endif
#define FREQ_iclk (clock_freqs[_CONCAT(SCRSCK_, SYSCLK_SRC)] / DT_INST_PROP(0, iclk_div))
#define FREQ_pclka (clock_freqs[_CONCAT(SCRSCK_, SYSCLK_SRC)] / DT_INST_PROP(0, pclka_div))
#define FREQ_pclkb (clock_freqs[_CONCAT(SCRSCK_, SYSCLK_SRC)] / DT_INST_PROP(0, pclkb_div))
#define FREQ_pclkc (clock_freqs[_CONCAT(SCRSCK_, SYSCLK_SRC)] / DT_INST_PROP(0, pclkc_div))
#define FREQ_pclkd (clock_freqs[_CONCAT(SCRSCK_, SYSCLK_SRC)] / DT_INST_PROP(0, pclkd_div))
#define FREQ_fclk (clock_freqs[_CONCAT(SCRSCK_, SYSCLK_SRC)] / DT_INST_PROP(0, fclk_div))
#define CLKSRC_FREQ(clk) DT_PROP(DT_PATH(clocks, clk), clock_frequency)
#define IS_CLKSRC_ENABLED(clk) DT_NODE_HAS_STATUS(DT_PATH(clocks, clk), okay)
#define SCKSCR_INIT_VALUE _CONCAT(CLKSRC_, SYSCLK_SRC)
#define SCKDIV_ENABLED(clk) DT_INST_NODE_HAS_PROP(0, clk##_div)
#define SCKDIV_VAL(clk) _CONCAT(SCKDIV_, DT_INST_PROP(0, clk##_div))
#define SCKDIV_POS(clk) _CONCAT(SCKDIV_POS_, clk)
#define SCKDIVCR_BITS(clk) \
COND_CODE_1(SCKDIV_ENABLED(clk), ((SCKDIV_VAL(clk) & 0xFU) << SCKDIV_POS(clk)), (0U))
#define SCKDIVCR_INIT_VALUE \
(SCKDIVCR_BITS(iclk) | SCKDIVCR_BITS(pclka) | SCKDIVCR_BITS(pclkb) | \
SCKDIVCR_BITS(pclkc) | SCKDIVCR_BITS(pclkd) | SCKDIVCR_BITS(bclk) | SCKDIVCR_BITS(fclk))
#define HOCOWTCR_INIT_VALUE (6)
/*
* Required cycles for sub-clokc stabilizing.
*/
#define SUBCLK_STABILIZE_CYCLES 5
extern int z_clock_hw_cycles_per_sec;
enum {
CLKSRC_hoco = 0,
CLKSRC_moco,
CLKSRC_loco,
CLKSRC_mosc,
CLKSRC_sosc,
CLKSRC_pll,
};
enum {
SCKDIV_1 = 0,
SCKDIV_2,
SCKDIV_4,
SCKDIV_8,
SCKDIV_16,
SCKDIV_32,
SCKDIV_64,
SCKDIV_128,
SCKDIV_3,
SCKDIV_6,
SCKDIV_12
};
enum {
SCKDIV_POS_pclkd = 0x0U,
SCKDIV_POS_pclkc = 0x4U,
SCKDIV_POS_pclkb = 0x8U,
SCKDIV_POS_pclka = 0xcU,
SCKDIV_POS_bclk = 0x10U,
SCKDIV_POS_pclke = 0x14U,
SCKDIV_POS_iclk = 0x18U,
SCKDIV_POS_fclk = 0x1cU
};
enum {
OSCSF_HOCOSF_POS = 0,
OSCSF_MOSCSF_POS = 3,
OSCSF_PLLSF_POS = 5,
};
enum {
OPCCR_OPCMTSF_POS = 4,
};
static const uint32_t PRCR_KEY = 0xA500U;
static const uint32_t PRCR_CLOCKS = 0x1U;
static const uint32_t PRCR_LOW_POWER = 0x2U;
enum {
#if DT_INST_REG_SIZE_BY_NAME(0, mstp) == 16
MSTPCRA_OFFSET = -0x4,
#else
MSTPCRA_OFFSET = 0x0,
#endif
MSTPCRB_OFFSET = (MSTPCRA_OFFSET + 0x4),
MSTPCRC_OFFSET = (MSTPCRB_OFFSET + 0x4),
MSTPCRD_OFFSET = (MSTPCRC_OFFSET + 0x4),
MSTPCRE_OFFSET = (MSTPCRD_OFFSET + 0x4),
};
enum {
SCKDIVCR_OFFSET = 0x020,
SCKSCR_OFFSET = 0x026,
MEMWAIT_OFFSET = 0x031,
MOSCCR_OFFSET = 0x032,
HOCOCR_OFFSET = 0x036,
OSCSF_OFFSET = 0x03C,
CKOCR_OFFSET = 0x03E,
OPCCR_OFFSET = 0x0A0,
HOCOWTCR_OFFSET = 0x0A5,
PRCR_OFFSET = 0x3FE,
SOSCCR_OFFSET = 0x480,
};
enum {
SCRSCK_hoco,
SCRSCK_moco,
SCRSCK_loco,
SCRSCK_mosc,
SCRSCK_sosc,
SCRSCK_pll,
};
static const int clock_freqs[] = {
COND_CODE_1(IS_CLKSRC_ENABLED(hoco), (CLKSRC_FREQ(hoco)), (0)),
COND_CODE_1(IS_CLKSRC_ENABLED(moco), (CLKSRC_FREQ(moco)), (0)),
COND_CODE_1(IS_CLKSRC_ENABLED(loco), (CLKSRC_FREQ(loco)), (0)),
COND_CODE_1(IS_CLKSRC_ENABLED(mosc), (CLKSRC_FREQ(mosc)), (0)),
COND_CODE_1(IS_CLKSRC_ENABLED(sosc), (CLKSRC_FREQ(sosc)), (0)),
COND_CODE_1(IS_CLKSRC_ENABLED(pll),
(DT_PROP(DT_PHANDLE_BY_IDX(DT_PATH(clocks, pll), clocks, 0), clock_frequency) *
DT_PROP(DT_PATH(clocks, pll), clock_mult) /
DT_PROP(DT_PATH(clocks, pll), clock_div)),
(0)),
};
static uint32_t MSTP_read(size_t offset)
{
return sys_read32(DT_INST_REG_ADDR_BY_NAME(0, mstp) + offset);
}
static void MSTP_write(size_t offset, uint32_t value)
{
sys_write32(value, DT_INST_REG_ADDR_BY_NAME(0, mstp) + offset);
}
static uint8_t SYSTEM_read8(size_t offset)
{
return sys_read8(DT_INST_REG_ADDR_BY_NAME(0, system) + offset);
}
static void SYSTEM_write8(size_t offset, uint8_t value)
{
sys_write8(value, DT_INST_REG_ADDR_BY_NAME(0, system) + offset);
}
static void SYSTEM_write16(size_t offset, uint16_t value)
{
sys_write16(value, DT_INST_REG_ADDR_BY_NAME(0, system) + offset);
}
static void SYSTEM_write32(size_t offset, uint32_t value)
{
sys_write32(value, DT_INST_REG_ADDR_BY_NAME(0, system) + offset);
}
static int clock_control_ra_on(const struct device *dev, clock_control_subsys_t subsys)
{
uint32_t clkid = (uint32_t)subsys;
int lock = irq_lock();
MSTP_write(MSTPCRA_OFFSET + RA_CLOCK_GROUP(clkid),
MSTP_read(MSTPCRB_OFFSET) & ~RA_CLOCK_BIT(clkid));
irq_unlock(lock);
return 0;
}
static int clock_control_ra_off(const struct device *dev, clock_control_subsys_t subsys)
{
uint32_t clkid = (uint32_t)subsys;
int lock = irq_lock();
MSTP_write(MSTPCRA_OFFSET + RA_CLOCK_GROUP(clkid),
MSTP_read(MSTPCRB_OFFSET) | RA_CLOCK_BIT(clkid));
irq_unlock(lock);
return 0;
}
static int clock_control_ra_get_rate(const struct device *dev, clock_control_subsys_t subsys,
uint32_t *rate)
{
uint32_t clkid = (uint32_t)subsys;
switch (clkid & 0xFFFFFF00) {
case RA_CLOCK_SCI(0):
*rate = FREQ_pclka;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct clock_control_driver_api ra_clock_control_driver_api = {
.on = clock_control_ra_on,
.off = clock_control_ra_off,
.get_rate = clock_control_ra_get_rate,
};
static void crude_busy_loop_impl(uint32_t cycles)
{
__asm__ volatile(".align 8\n"
"busy_loop:\n"
" sub r0, r0, #1\n"
" cmp r0, #0\n"
" bne.n busy_loop\n");
}
static inline void crude_busy_loop(uint32_t wait_us)
{
static const uint64_t cycles_per_loop = 4;
crude_busy_loop_impl(sys_clock_hw_cycles_per_sec() * wait_us / USEC_PER_SEC /
cycles_per_loop);
}
static int clock_control_ra_init(const struct device *dev)
{
uint8_t sysclk = SYSTEM_read8(SCKSCR_OFFSET);
z_clock_hw_cycles_per_sec = clock_freqs[sysclk];
SYSTEM_write16(PRCR_OFFSET, PRCR_KEY | PRCR_CLOCKS | PRCR_LOW_POWER);
if (clock_freqs[SCRSCK_hoco] == 64000000) {
SYSTEM_write8(HOCOWTCR_OFFSET, HOCOWTCR_INIT_VALUE);
}
SYSTEM_write8(SOSCCR_OFFSET, !IS_CLKSRC_ENABLED(sosc));
SYSTEM_write8(MOSCCR_OFFSET, !IS_CLKSRC_ENABLED(mosc));
SYSTEM_write8(HOCOCR_OFFSET, !IS_CLKSRC_ENABLED(hoco));
if (IS_CLKSRC_ENABLED(sosc)) {
crude_busy_loop(z_clock_hw_cycles_per_sec / clock_freqs[CLKSRC_sosc] *
SUBCLK_STABILIZE_CYCLES);
}
if (IS_CLKSRC_ENABLED(mosc)) {
while ((SYSTEM_read8(OSCSF_OFFSET) & BIT(OSCSF_MOSCSF_POS)) !=
BIT(OSCSF_MOSCSF_POS)) {
;
}
}
if (IS_CLKSRC_ENABLED(hoco)) {
while ((SYSTEM_read8(OSCSF_OFFSET) & BIT(OSCSF_HOCOSF_POS)) !=
BIT(OSCSF_HOCOSF_POS)) {
;
}
}
SYSTEM_write8(OPCCR_OFFSET, 0);
while ((SYSTEM_read8(OPCCR_OFFSET) & BIT(OPCCR_OPCMTSF_POS)) != 0) {
;
}
SYSTEM_write8(MEMWAIT_OFFSET, 1);
SYSTEM_write32(SCKDIVCR_OFFSET, SCKDIVCR_INIT_VALUE);
SYSTEM_write8(SCKSCR_OFFSET, SCKSCR_INIT_VALUE);
/* re-read system clock setting and apply to hw_cycles */
sysclk = SYSTEM_read8(SCKSCR_OFFSET);
z_clock_hw_cycles_per_sec = clock_freqs[sysclk];
SYSTEM_write16(PRCR_OFFSET, PRCR_KEY);
return 0;
}
DEVICE_DT_INST_DEFINE(0, clock_control_ra_init, NULL, NULL, NULL, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, &ra_clock_control_driver_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_renesas_ra.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,659 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <socfpga_system_manager.h>
#include "clock_control_agilex5_ll.h"
LOG_MODULE_REGISTER(clock_control_agilex5_ll, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
/* Clock manager individual group base addresses. */
struct clock_agilex5_ll_params {
mm_reg_t base_addr;
mm_reg_t mainpll_addr;
mm_reg_t peripll_addr;
mm_reg_t ctl_addr;
};
/* Clock manager low layer(ll) params object. */
static struct clock_agilex5_ll_params clock_agilex5_ll;
/* Initialize the clock ll with the given base address */
void clock_agilex5_ll_init(mm_reg_t base_addr)
{
/* Clock manager module base address. */
clock_agilex5_ll.base_addr = base_addr;
/* Clock manager main PLL base address. */
clock_agilex5_ll.mainpll_addr = clock_agilex5_ll.base_addr + CLKMGR_MAINPLL_OFFSET;
/* Clock manager peripheral PLL base address. */
clock_agilex5_ll.peripll_addr = clock_agilex5_ll.base_addr + CLKMGR_PERPLL_OFFSET;
/* Clock manager control module base address. */
clock_agilex5_ll.ctl_addr = clock_agilex5_ll.base_addr + CLKMGR_INTEL_OFFSET;
}
/* Extract reference clock from platform clock source */
static uint32_t get_ref_clk(uint32_t pllglob)
{
uint32_t arefclkdiv, ref_clk;
uint32_t scr_reg;
/*
* Based on the clock source, read the values from System Manager boot
* scratch registers. These values are filled by boot loader based on
* hand-off data.
*/
switch (CLKMGR_PSRC(pllglob)) {
case CLKMGR_PLLGLOB_PSRC_EOSC1:
scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1);
ref_clk = sys_read32(scr_reg);
break;
case CLKMGR_PLLGLOB_PSRC_INTOSC:
ref_clk = CLKMGR_INTOSC_HZ;
break;
case CLKMGR_PLLGLOB_PSRC_F2S:
scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2);
ref_clk = sys_read32(scr_reg);
break;
default:
ref_clk = 0;
LOG_ERR("Invalid VCO input clock source");
break;
}
/* Reference clock divider, to get the effective reference clock. */
arefclkdiv = CLKMGR_PLLGLOB_AREFCLKDIV(pllglob);
ref_clk /= arefclkdiv;
return ref_clk;
}
/* Calculate clock frequency based on parameter */
static uint32_t get_clk_freq(uint32_t psrc_reg, uint32_t main_pllc, uint32_t per_pllc)
{
uint32_t clk_psrc, mdiv, ref_clk;
uint32_t pllm_reg, pllc_reg, pllc_div, pllglob_reg;
clk_psrc = sys_read32(clock_agilex5_ll.mainpll_addr + psrc_reg);
switch (CLKMGR_PSRC(clk_psrc)) {
case CLKMGR_PSRC_MAIN:
pllm_reg = clock_agilex5_ll.mainpll_addr + CLKMGR_MAINPLL_PLLM;
pllc_reg = clock_agilex5_ll.mainpll_addr + main_pllc;
pllglob_reg = clock_agilex5_ll.mainpll_addr + CLKMGR_MAINPLL_PLLGLOB;
break;
case CLKMGR_PSRC_PER:
pllm_reg = clock_agilex5_ll.peripll_addr + CLKMGR_PERPLL_PLLM;
pllc_reg = clock_agilex5_ll.peripll_addr + per_pllc;
pllglob_reg = clock_agilex5_ll.peripll_addr + CLKMGR_PERPLL_PLLGLOB;
break;
default:
return 0;
}
ref_clk = get_ref_clk(sys_read32(pllglob_reg));
mdiv = CLKMGR_PLLM_MDIV(sys_read32(pllm_reg));
ref_clk *= mdiv;
/* Clock slice divider ration in binary code. */
pllc_div = CLKMGR_PLLC_DIV(sys_read32(pllc_reg));
return ref_clk / pllc_div;
}
/* Return L3 interconnect clock */
uint32_t get_l3_clk(void)
{
uint32_t l3_clk;
l3_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC1, CLKMGR_PERPLL_PLLC1);
return l3_clk;
}
/* Calculate clock frequency to be used for mpu */
uint32_t get_mpu_clk(void)
{
uint32_t mpu_clk;
mpu_clk = get_clk_freq(CLKMGR_MAINPLL_MPUCLK, CLKMGR_MAINPLL_PLLC0, CLKMGR_PERPLL_PLLC0);
return mpu_clk;
}
/* Calculate clock frequency to be used for watchdog timer */
uint32_t get_wdt_clk(void)
{
uint32_t l4_sys_clk;
l4_sys_clk = (get_l3_clk() >> 2);
return l4_sys_clk;
}
/* Calculate clock frequency to be used for UART driver */
uint32_t get_uart_clk(void)
{
uint32_t mainpll_nocdiv, l4_sp_clk;
mainpll_nocdiv = sys_read32(clock_agilex5_ll.mainpll_addr + CLKMGR_MAINPLL_NOCDIV);
mainpll_nocdiv = CLKMGR_MAINPLL_L4SPDIV(mainpll_nocdiv);
l4_sp_clk = (get_l3_clk() >> mainpll_nocdiv);
return l4_sp_clk;
}
/* Calculate clock frequency to be used for SDMMC driver */
uint32_t get_mmc_clk(void)
{
uint32_t sdmmc_ctr, mmc_clk;
mmc_clk = get_clk_freq(CLKMGR_INTEL_SDMMCCTR, CLKMGR_MAINPLL_PLLC3, CLKMGR_PERPLL_PLLC3);
sdmmc_ctr = sys_read32(clock_agilex5_ll.ctl_addr + CLKMGR_INTEL_SDMMCCTR);
sdmmc_ctr = CLKMGR_INTEL_SDMMC_CNT(sdmmc_ctr);
mmc_clk = ((mmc_clk / sdmmc_ctr) >> 2);
return mmc_clk;
}
/* Calculate clock frequency to be used for Timer driver */
uint32_t get_timer_clk(void)
{
uint32_t l4_sys_clk;
l4_sys_clk = (get_l3_clk() >> 2);
return l4_sys_clk;
}
``` | /content/code_sandbox/drivers/clock_control/clock_control_agilex5_ll.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,397 |
```unknown
# MCUXpresso SDK CCM
config CLOCK_CONTROL_MCUX_CCM_REV2
bool "MCUX CCM Rev 2 driver"
default y
depends on DT_HAS_NXP_IMX_CCM_REV2_ENABLED
help
Enable support for mcux ccm rev 2 driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.mcux_ccm_rev2 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```c
/*
*
* r8a779f0 Clock Pulse Generator / Module Standby and Software Reset
*
*/
#define DT_DRV_COMPAT renesas_r8a779f0_cpg_mssr
#include <errno.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/renesas_cpg_mssr.h>
#include <zephyr/dt-bindings/clock/renesas_cpg_mssr.h>
#include <zephyr/dt-bindings/clock/r8a779f0_cpg_mssr.h>
#include <zephyr/irq.h>
#include "clock_control_renesas_cpg_mssr.h"
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(clock_control_rcar);
#define R8A779F0_CLK_SD0_STOP_BIT 8
#define R8A779F0_CLK_SD0_DIV_MASK 0x3
#define R8A779F0_CLK_SD0_DIV_SHIFT 0
#define R8A779F0_CLK_SD0H_STOP_BIT 9
#define R8A779F0_CLK_SD0H_DIV_MASK 0x7
#define R8A779F0_CLK_SD0H_DIV_SHIFT 2
#define R8A779F0_CLK_SDSRC_DIV_MASK 0x3
#define R8A779F0_CLK_SDSRC_DIV_SHIFT 29
struct r8a779f0_cpg_mssr_cfg {
DEVICE_MMIO_ROM; /* Must be first */
};
struct r8a779f0_cpg_mssr_data {
struct rcar_cpg_mssr_data cmn; /* Must be first */
};
enum clk_ids {
/* Core Clock Outputs exported to DT */
LAST_DT_CORE_CLK = R8A779F0_CLK_OSC,
/* Internal Core Clocks */
CLK_PLL5,
CLK_SDSRC,
};
/* NOTE: the array MUST be sorted by module field */
static struct cpg_clk_info_table core_props[] = {
RCAR_CORE_CLK_INFO_ITEM(R8A779F0_CLK_S0D12_PER, RCAR_CPG_NONE, RCAR_CPG_NONE,
RCAR_CPG_KHZ(66660)),
RCAR_CORE_CLK_INFO_ITEM(R8A779F0_CLK_CL16M, RCAR_CPG_NONE, RCAR_CPG_NONE,
RCAR_CPG_KHZ(16660)),
RCAR_CORE_CLK_INFO_ITEM(R8A779F0_CLK_SD0H, 0x0870, CLK_SDSRC, RCAR_CPG_NONE),
RCAR_CORE_CLK_INFO_ITEM(R8A779F0_CLK_SD0, 0x0870, R8A779F0_CLK_SD0H, RCAR_CPG_NONE),
RCAR_CORE_CLK_INFO_ITEM(R8A779F0_CLK_SASYNCPERD1, RCAR_CPG_NONE, RCAR_CPG_NONE,
266666666),
RCAR_CORE_CLK_INFO_ITEM(CLK_PLL5, RCAR_CPG_NONE, RCAR_CPG_NONE, RCAR_CPG_MHZ(3200)),
RCAR_CORE_CLK_INFO_ITEM(CLK_SDSRC, 0x08A4, CLK_PLL5, RCAR_CPG_NONE),
};
/* NOTE: the array MUST be sorted by module field */
static struct cpg_clk_info_table mod_props[] = {
RCAR_MOD_CLK_INFO_ITEM(514, R8A779F0_CLK_SASYNCPERD1),
RCAR_MOD_CLK_INFO_ITEM(702, R8A779F0_CLK_S0D12_PER),
RCAR_MOD_CLK_INFO_ITEM(704, R8A779F0_CLK_S0D12_PER),
RCAR_MOD_CLK_INFO_ITEM(706, R8A779F0_CLK_SD0),
RCAR_MOD_CLK_INFO_ITEM(915, R8A779F0_CLK_CL16M),
};
static int r8a779f0_cpg_enable_disable_core(const struct device *dev,
struct cpg_clk_info_table *clk_info, uint32_t enable)
{
int ret = 0;
uint32_t reg;
switch (clk_info->module) {
case R8A779F0_CLK_SD0:
reg = sys_read32(DEVICE_MMIO_GET(dev) + clk_info->offset);
reg &= ~(1 << R8A779F0_CLK_SD0_STOP_BIT);
reg |= (!enable << R8A779F0_CLK_SD0_STOP_BIT);
break;
case R8A779F0_CLK_SD0H:
reg = sys_read32(DEVICE_MMIO_GET(dev) + clk_info->offset);
reg &= ~(1 << R8A779F0_CLK_SD0H_STOP_BIT);
reg |= (!enable << R8A779F0_CLK_SD0H_STOP_BIT);
break;
default:
ret = -ENOTSUP;
break;
}
if (!ret) {
rcar_cpg_write(DEVICE_MMIO_GET(dev), clk_info->offset, reg);
}
return ret;
}
static int r8a779f0_cpg_core_clock_endisable(const struct device *dev, struct rcar_cpg_clk *clk,
bool enable)
{
struct cpg_clk_info_table *clk_info;
struct r8a779f0_cpg_mssr_data *data = dev->data;
k_spinlock_key_t key;
clk_info = rcar_cpg_find_clk_info_by_module_id(dev, clk->domain, clk->module);
if (!clk_info) {
return -EINVAL;
}
if (enable) {
if (clk->rate > 0) {
int ret;
uintptr_t rate = clk->rate;
ret = rcar_cpg_set_rate(dev, (clock_control_subsys_t)clk,
(clock_control_subsys_rate_t)rate);
if (ret < 0) {
return ret;
}
}
}
key = k_spin_lock(&data->cmn.lock);
r8a779f0_cpg_enable_disable_core(dev, clk_info, enable);
k_spin_unlock(&data->cmn.lock, key);
return 0;
}
int r8a779f0_cpg_mssr_start_stop(const struct device *dev, clock_control_subsys_t sys, bool enable)
{
struct rcar_cpg_clk *clk = (struct rcar_cpg_clk *)sys;
int ret;
if (!dev || !sys) {
return -EINVAL;
}
if (clk->domain == CPG_MOD) {
struct r8a779f0_cpg_mssr_data *data = dev->data;
k_spinlock_key_t key;
key = k_spin_lock(&data->cmn.lock);
ret = rcar_cpg_mstp_clock_endisable(DEVICE_MMIO_GET(dev), clk->module, enable);
k_spin_unlock(&data->cmn.lock, key);
} else if (clk->domain == CPG_CORE) {
ret = r8a779f0_cpg_core_clock_endisable(dev, clk, enable);
} else {
ret = -EINVAL;
}
return ret;
}
static uint32_t r8a779f0_get_div_helper(uint32_t reg_val, uint32_t module)
{
switch (module) {
case R8A779F0_CLK_S0D12_PER:
case R8A779F0_CLK_CL16M:
return 1;
case CLK_SDSRC:
reg_val >>= R8A779F0_CLK_SDSRC_DIV_SHIFT;
reg_val &= R8A779F0_CLK_SDSRC_DIV_MASK;
/* setting of 3 is prohibited */
if (reg_val < 3) {
/* real divider is in range 4 - 6 */
return reg_val + 4;
}
LOG_WRN("SDSRC clock has an incorrect divider value: %u", reg_val);
return RCAR_CPG_NONE;
case R8A779F0_CLK_SD0H:
reg_val >>= R8A779F0_CLK_SD0H_DIV_SHIFT;
reg_val &= R8A779F0_CLK_SD0H_DIV_MASK;
/* setting of value bigger than 4 is prohibited */
if (reg_val < 5) {
return (1 << reg_val);
}
LOG_WRN("SD0H clock has an incorrect divider value: %u", reg_val);
return RCAR_CPG_NONE;
case R8A779F0_CLK_SD0:
/* convert only two possible values 0,1 to 2,4 */
return (1 << ((reg_val & R8A779F0_CLK_SD0_DIV_MASK) + 1));
default:
return RCAR_CPG_NONE;
}
}
static int r8a779f0_set_rate_helper(uint32_t module, uint32_t *divider, uint32_t *div_mask)
{
switch (module) {
case CLK_SDSRC:
/* divider has to be in range 4-6 */
if (*divider > 3 && *divider < 7) {
/* we can write to register value in range 0-2 */
*divider -= 4;
*divider <<= R8A779F0_CLK_SDSRC_DIV_SHIFT;
*div_mask = R8A779F0_CLK_SDSRC_DIV_MASK << R8A779F0_CLK_SDSRC_DIV_SHIFT;
return 0;
}
return -EINVAL;
case R8A779F0_CLK_SD0:
/* possible to have only 2 or 4 */
if (*divider == 2 || *divider == 4) {
/* convert 2/4 to 0/1 */
*divider >>= 2;
*div_mask = R8A779F0_CLK_SD0_DIV_MASK << R8A779F0_CLK_SD0_DIV_SHIFT;
return 0;
}
return -EINVAL;
case R8A779F0_CLK_SD0H:
/* divider should be power of two number and last possible value 16 */
if (!is_power_of_two(*divider) || *divider > 16) {
return -EINVAL;
}
/* 1,2,4,8,16 have to be converted to 0,1,2,3,4 and then shifted */
*divider = (find_lsb_set(*divider) - 1) << R8A779F0_CLK_SD0H_DIV_SHIFT;
*div_mask = R8A779F0_CLK_SD0H_DIV_MASK << R8A779F0_CLK_SD0H_DIV_SHIFT;
return 0;
default:
return -ENOTSUP;
}
}
static int r8a779f0_cpg_mssr_start(const struct device *dev, clock_control_subsys_t sys)
{
return r8a779f0_cpg_mssr_start_stop(dev, sys, true);
}
static int r8a779f0_cpg_mssr_stop(const struct device *dev, clock_control_subsys_t sys)
{
return r8a779f0_cpg_mssr_start_stop(dev, sys, false);
}
static int r8a779f0_cpg_mssr_init(const struct device *dev)
{
DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
rcar_cpg_build_clock_relationship(dev);
rcar_cpg_update_all_in_out_freq(dev);
return 0;
}
static const struct clock_control_driver_api r8a779f0_cpg_mssr_api = {
.on = r8a779f0_cpg_mssr_start,
.off = r8a779f0_cpg_mssr_stop,
.get_rate = rcar_cpg_get_rate,
.set_rate = rcar_cpg_set_rate,
};
#define R8A779F0_MSSR_INIT(inst) \
static struct r8a779f0_cpg_mssr_cfg cpg_mssr##inst##_cfg = { \
DEVICE_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
}; \
\
static struct r8a779f0_cpg_mssr_data cpg_mssr##inst##_data = { \
.cmn.clk_info_table[CPG_CORE] = core_props, \
.cmn.clk_info_table_size[CPG_CORE] = ARRAY_SIZE(core_props), \
.cmn.clk_info_table[CPG_MOD] = mod_props, \
.cmn.clk_info_table_size[CPG_MOD] = ARRAY_SIZE(mod_props), \
.cmn.get_div_helper = r8a779f0_get_div_helper, \
.cmn.set_rate_helper = r8a779f0_set_rate_helper \
}; \
\
DEVICE_DT_INST_DEFINE(inst, \
&r8a779f0_cpg_mssr_init, \
NULL, \
&cpg_mssr##inst##_data, \
&cpg_mssr##inst##_cfg, \
PRE_KERNEL_1, \
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&r8a779f0_cpg_mssr_api);
DT_INST_FOREACH_STATUS_OKAY(R8A779F0_MSSR_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_r8a779f0_cpg_mssr.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,892 |
```c
/*
*
*/
#define DT_DRV_COMPAT arm_beetle_syscon
/**
* @file
* @brief Driver for Clock Control of Beetle MCUs.
*
* This file contains the Clock Control driver implementation for the
* Beetle MCUs.
*/
#include <soc.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/irq.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/arm_clock_control.h>
#define MAINCLK_BASE_FREQ 24000000
struct beetle_clock_control_cfg_t {
/* Clock Control ID */
uint32_t clock_control_id;
/* Clock control freq */
uint32_t freq;
};
static inline void beetle_set_clock(volatile uint32_t *base,
uint8_t bit, enum arm_soc_state_t state)
{
uint32_t key;
key = irq_lock();
switch (state) {
case SOC_ACTIVE:
base[0] |= (1 << bit);
break;
case SOC_SLEEP:
base[2] |= (1 << bit);
break;
case SOC_DEEPSLEEP:
base[4] |= (1 << bit);
break;
default:
break;
}
irq_unlock(key);
}
static inline void beetle_ahb_set_clock_on(uint8_t bit,
enum arm_soc_state_t state)
{
beetle_set_clock((volatile uint32_t *)&(__BEETLE_SYSCON->ahbclkcfg0set),
bit, state);
}
static inline void beetle_ahb_set_clock_off(uint8_t bit,
enum arm_soc_state_t state)
{
beetle_set_clock((volatile uint32_t *)&(__BEETLE_SYSCON->ahbclkcfg0clr),
bit, state);
}
static inline void beetle_apb_set_clock_on(uint8_t bit,
enum arm_soc_state_t state)
{
beetle_set_clock((volatile uint32_t *)&(__BEETLE_SYSCON->apbclkcfg0set),
bit, state);
}
static inline void beetle_apb_set_clock_off(uint8_t bit,
enum arm_soc_state_t state)
{
beetle_set_clock((volatile uint32_t *)&(__BEETLE_SYSCON->apbclkcfg0clr),
bit, state);
}
static inline int beetle_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct arm_clock_control_t *beetle_cc =
(struct arm_clock_control_t *)(sub_system);
uint8_t bit = 0U;
switch (beetle_cc->bus) {
case CMSDK_AHB:
bit = (beetle_cc->device - _BEETLE_AHB_BASE) >> 12;
beetle_ahb_set_clock_on(bit, beetle_cc->state);
break;
case CMSDK_APB:
bit = (beetle_cc->device - _BEETLE_APB_BASE) >> 12;
beetle_apb_set_clock_on(bit, beetle_cc->state);
break;
default:
break;
}
return 0;
}
static inline int beetle_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct arm_clock_control_t *beetle_cc =
(struct arm_clock_control_t *)(sub_system);
uint8_t bit = 0U;
switch (beetle_cc->bus) {
case CMSDK_AHB:
bit = (beetle_cc->device - _BEETLE_AHB_BASE) >> 12;
beetle_ahb_set_clock_off(bit, beetle_cc->state);
break;
case CMSDK_APB:
bit = (beetle_cc->device - _BEETLE_APB_BASE) >> 12;
beetle_apb_set_clock_off(bit, beetle_cc->state);
break;
default:
break;
}
return 0;
}
static int beetle_clock_control_get_subsys_rate(const struct device *clock,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
#ifdef CONFIG_CLOCK_CONTROL_BEETLE_ENABLE_PLL
const struct beetle_clock_control_cfg_t * const cfg =
clock->config;
uint32_t nc_mainclk = beetle_round_freq(cfg->freq);
*rate = nc_mainclk;
#else
ARG_UNUSED(clock);
ARG_UNUSED(sub_system);
*rate = MAINCLK_BASE_FREQ;
#endif /* CONFIG_CLOCK_CONTROL_BEETLE_ENABLE_PLL */
return 0;
}
static const struct clock_control_driver_api beetle_clock_control_api = {
.on = beetle_clock_control_on,
.off = beetle_clock_control_off,
.get_rate = beetle_clock_control_get_subsys_rate,
};
#ifdef CONFIG_CLOCK_CONTROL_BEETLE_ENABLE_PLL
static uint32_t beetle_round_freq(uint32_t mainclk)
{
uint32_t nc_mainclk = 0U;
/*
* Verify that the frequency is in the supported range otherwise
* round it to the next closer one.
*/
if (mainclk <= BEETLE_PLL_FREQUENCY_12MHZ) {
nc_mainclk = BEETLE_PLL_FREQUENCY_12MHZ;
} else if (mainclk <= BEETLE_PLL_FREQUENCY_24MHZ) {
nc_mainclk = BEETLE_PLL_FREQUENCY_24MHZ;
} else if (mainclk <= BEETLE_PLL_FREQUENCY_36MHZ) {
nc_mainclk = BEETLE_PLL_FREQUENCY_36MHZ;
} else {
nc_mainclk = BEETLE_PLL_FREQUENCY_48MHZ;
}
return nc_mainclk;
}
static uint32_t beetle_get_prescaler(uint32_t mainclk)
{
uint32_t pre_mainclk = 0U;
/*
* Verify that the frequency is in the supported range otherwise
* round it to the next closer one.
*/
if (mainclk <= BEETLE_PLL_FREQUENCY_12MHZ) {
pre_mainclk = BEETLE_PLL_PRESCALER_12MHZ;
} else if (mainclk <= BEETLE_PLL_FREQUENCY_24MHZ) {
pre_mainclk = BEETLE_PLL_PRESCALER_24MHZ;
} else if (mainclk <= BEETLE_PLL_FREQUENCY_36MHZ) {
pre_mainclk = BEETLE_PLL_PRESCALER_36MHZ;
} else {
pre_mainclk = BEETLE_PLL_PRESCALER_48MHZ;
}
return pre_mainclk;
}
static int beetle_pll_enable(uint32_t mainclk)
{
uint32_t pre_mainclk = beetle_get_prescaler(mainclk);
/* Set PLLCTRL Register */
__BEETLE_SYSCON->pllctrl = BEETLE_PLL_CONFIGURATION;
/* Switch the Main clock to PLL and set prescaler */
__BEETLE_SYSCON->mainclk = pre_mainclk;
while (!__BEETLE_SYSCON->pllstatus) {
/* Wait for PLL to lock */
}
return 0;
}
#endif /* CONFIG_CLOCK_CONTROL_BEETLE_ENABLE_PLL */
static int beetle_clock_control_init(const struct device *dev)
{
#ifdef CONFIG_CLOCK_CONTROL_BEETLE_ENABLE_PLL
const struct beetle_clock_control_cfg_t * const cfg =
dev->config;
/*
* Enable PLL if Beetle is configured to run at a different
* frequency than 24Mhz.
*/
if (cfg->freq != MAINCLK_BASE_FREQ) {
beetle_pll_enable(cfg->freq);
}
#endif /* CONFIG_CLOCK_CONTROL_BEETLE_ENABLE_PLL */
return 0;
}
static const struct beetle_clock_control_cfg_t beetle_cc_cfg = {
.clock_control_id = 0,
.freq = DT_PROP(DT_PATH(cpus, cpu_0), clock_frequency),
};
/**
* @brief Clock Control device init
*
*/
DEVICE_DT_INST_DEFINE(0, beetle_clock_control_init, NULL,
NULL, &beetle_cc_cfg, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&beetle_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/beetle_clock_control.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,675 |
```c
/*
*
*/
#include <string.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/kernel.h>
#include <soc.h>
#include <zephyr/dt-bindings/clock/ra_clock.h>
#include <zephyr/drivers/clock_control/renesas_ra_cgc.h>
static int clock_control_renesas_ra_on(const struct device *dev, clock_control_subsys_t sys)
{
struct clock_control_ra_subsys_cfg *subsys_clk = (struct clock_control_ra_subsys_cfg *)sys;
if (!dev || !sys) {
return -EINVAL;
}
WRITE_BIT(*subsys_clk->mstp, subsys_clk->stop_bit, false);
return 0;
}
static int clock_control_renesas_ra_off(const struct device *dev, clock_control_subsys_t sys)
{
struct clock_control_ra_subsys_cfg *subsys_clk = (struct clock_control_ra_subsys_cfg *)sys;
if (!dev || !sys) {
return -EINVAL;
}
WRITE_BIT(*subsys_clk->mstp, subsys_clk->stop_bit, true);
return 0;
}
static int clock_control_renesas_ra_get_rate(const struct device *dev, clock_control_subsys_t sys,
uint32_t *rate)
{
const struct clock_control_ra_pclk_cfg *config = dev->config;
uint32_t clk_src_rate;
uint32_t clk_div_val;
if (!dev || !sys || !rate) {
return -EINVAL;
}
clk_src_rate = R_BSP_SourceClockHzGet(config->clk_src);
clk_div_val = R_FSP_ClockDividerGet(config->clk_div);
*rate = clk_src_rate / clk_div_val;
return 0;
}
/**
* @brief Initializes a peripheral clock device driver
*/
static int clock_control_ra_init_pclk(const struct device *dev)
{
ARG_UNUSED(dev);
return 0;
}
static int clock_control_ra_init(const struct device *dev)
{
ARG_UNUSED(dev);
/* Call to HAL layer to initialize system clock and peripheral clock */
bsp_clock_init();
return 0;
}
static const struct clock_control_driver_api clock_control_reneas_ra_api = {
.on = clock_control_renesas_ra_on,
.off = clock_control_renesas_ra_off,
.get_rate = clock_control_renesas_ra_get_rate,
};
#define INIT_PCLK(node_id) \
IF_ENABLED(DT_NODE_HAS_COMPAT(node_id, renesas_ra_cgc_pclk), \
(static const struct clock_control_ra_pclk_cfg node_id##_cfg = \
{.clk_src = DT_PROP_OR(node_id, clk_src, RA_CLOCK_SOURCE_DISABLE), \
.clk_div = DT_PROP_OR(node_id, clk_div, RA_SYS_CLOCK_DIV_1)}; \
DEVICE_DT_DEFINE(node_id, &clock_control_ra_init_pclk, NULL, NULL, \
&node_id##_cfg, PRE_KERNEL_1, \
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS, \
&clock_control_reneas_ra_api)));
DEVICE_DT_DEFINE(DT_NODELABEL(pclkblock), &clock_control_ra_init, NULL, NULL, NULL, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS, NULL);
DT_FOREACH_CHILD_STATUS_OKAY(DT_NODELABEL(pclkblock), INIT_PCLK);
``` | /content/code_sandbox/drivers/clock_control/clock_control_renesas_ra_cgc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 702 |
```c
/*
*
*
*/
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/clock_agilex_ll.h>
#include <zephyr/dt-bindings/clock/intel_socfpga_clock.h>
static int clk_get_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
ARG_UNUSED(dev);
switch ((intptr_t) sub_system) {
case INTEL_SOCFPGA_CLOCK_MPU:
*rate = get_mpu_clk();
break;
case INTEL_SOCFPGA_CLOCK_WDT:
*rate = get_wdt_clk();
break;
case INTEL_SOCFPGA_CLOCK_UART:
*rate = get_uart_clk();
break;
case INTEL_SOCFPGA_CLOCK_MMC:
*rate = get_mmc_clk();
break;
default:
return -ENOTSUP;
}
return 0;
}
static const struct clock_control_driver_api clk_api = {
.get_rate = clk_get_rate
};
DEVICE_DT_DEFINE(DT_NODELABEL(clock), NULL, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&clk_api);
``` | /content/code_sandbox/drivers/clock_control/clock_agilex.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 254 |
```c
/*
*
*/
#include <zephyr/drivers/firmware/scmi/clk.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(arm_scmi_clock);
#define DT_DRV_COMPAT arm_scmi_clock
struct scmi_clock_data {
uint32_t clk_num;
};
static int scmi_clock_on_off(const struct device *dev,
clock_control_subsys_t clk, bool on)
{
struct scmi_clock_data *data;
struct scmi_protocol *proto;
uint32_t clk_id;
struct scmi_clock_config cfg;
proto = dev->data;
data = proto->data;
clk_id = POINTER_TO_UINT(clk);
if (clk_id >= data->clk_num) {
return -EINVAL;
}
memset(&cfg, 0, sizeof(cfg));
cfg.attributes = SCMI_CLK_CONFIG_ENABLE_DISABLE(on);
cfg.clk_id = clk_id;
return scmi_clock_config_set(proto, &cfg);
}
static int scmi_clock_on(const struct device *dev, clock_control_subsys_t clk)
{
return scmi_clock_on_off(dev, clk, true);
}
static int scmi_clock_off(const struct device *dev, clock_control_subsys_t clk)
{
return scmi_clock_on_off(dev, clk, false);
}
static int scmi_clock_get_rate(const struct device *dev,
clock_control_subsys_t clk, uint32_t *rate)
{
struct scmi_clock_data *data;
struct scmi_protocol *proto;
uint32_t clk_id;
proto = dev->data;
data = proto->data;
clk_id = POINTER_TO_UINT(clk);
if (clk_id >= data->clk_num) {
return -EINVAL;
}
return scmi_clock_rate_get(proto, clk_id, rate);
}
static struct clock_control_driver_api scmi_clock_api = {
.on = scmi_clock_on,
.off = scmi_clock_off,
.get_rate = scmi_clock_get_rate,
};
static int scmi_clock_init(const struct device *dev)
{
struct scmi_protocol *proto;
struct scmi_clock_data *data;
int ret;
uint32_t attributes;
proto = dev->data;
data = proto->data;
ret = scmi_clock_protocol_attributes(proto, &attributes);
if (ret < 0) {
LOG_ERR("failed to fetch clock attributes: %d", ret);
return ret;
}
data->clk_num = SCMI_CLK_ATTRIBUTES_CLK_NUM(attributes);
return 0;
}
static struct scmi_clock_data data;
DT_INST_SCMI_PROTOCOL_DEFINE(0, &scmi_clock_init, NULL, &data, NULL,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&scmi_clock_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_arm_scmi.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 572 |
```c
/*
*
*/
#define DT_DRV_COMPAT ambiq_clkctrl
#include <errno.h>
#include <zephyr/init.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control/clock_control_ambiq.h>
#include <am_mcu_apollo.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_ambiq, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
struct ambiq_clock_config {
uint32_t clock_freq;
const struct pinctrl_dev_config *pcfg;
};
static int ambiq_clock_on(const struct device *dev, clock_control_subsys_t sub_system)
{
ARG_UNUSED(dev);
int ret;
uint32_t clock_name = (uint32_t)sub_system;
am_hal_mcuctrl_control_arg_t arg = {
.b_arg_hfxtal_in_use = true,
.b_arg_apply_ext_source = false,
.b_arg_force_update = false,
};
if (clock_name >= CLOCK_CONTROL_AMBIQ_TYPE_MAX) {
return -EINVAL;
}
switch (clock_name) {
case CLOCK_CONTROL_AMBIQ_TYPE_HFXTAL_BLE:
arg.ui32_arg_hfxtal_user_mask = BIT(AM_HAL_HFXTAL_BLE_CONTROLLER_EN);
arg.b_arg_enable_HfXtalClockout = true;
ret = am_hal_mcuctrl_control(AM_HAL_MCUCTRL_CONTROL_EXTCLK32M_KICK_START, &arg);
break;
case CLOCK_CONTROL_AMBIQ_TYPE_LFXTAL:
ret = am_hal_mcuctrl_control(AM_HAL_MCUCTRL_CONTROL_EXTCLK32K_ENABLE, 0);
default:
ret = -ENOTSUP;
break;
}
return ret;
}
static int ambiq_clock_off(const struct device *dev, clock_control_subsys_t sub_system)
{
ARG_UNUSED(dev);
int ret;
uint32_t clock_name = (uint32_t)sub_system;
am_hal_mcuctrl_control_arg_t arg = {
.b_arg_hfxtal_in_use = true,
.b_arg_apply_ext_source = false,
.b_arg_force_update = false,
};
if (clock_name >= CLOCK_CONTROL_AMBIQ_TYPE_MAX) {
return -EINVAL;
}
switch (clock_name) {
case CLOCK_CONTROL_AMBIQ_TYPE_HFXTAL_BLE:
arg.ui32_arg_hfxtal_user_mask = BIT(AM_HAL_HFXTAL_BLE_CONTROLLER_EN);
arg.b_arg_enable_HfXtalClockout = true;
ret = am_hal_mcuctrl_control(AM_HAL_MCUCTRL_CONTROL_EXTCLK32M_DISABLE, &arg);
break;
case CLOCK_CONTROL_AMBIQ_TYPE_LFXTAL:
ret = am_hal_mcuctrl_control(AM_HAL_MCUCTRL_CONTROL_EXTCLK32K_DISABLE, 0);
break;
default:
ret = -ENOTSUP;
break;
}
return ret;
}
static inline int ambiq_clock_get_rate(const struct device *dev, clock_control_subsys_t sub_system,
uint32_t *rate)
{
ARG_UNUSED(sub_system);
const struct ambiq_clock_config *cfg = dev->config;
*rate = cfg->clock_freq;
return 0;
}
static inline int ambiq_clock_configure(const struct device *dev, clock_control_subsys_t sub_system,
void *data)
{
ARG_UNUSED(sub_system);
ARG_UNUSED(data);
const struct ambiq_clock_config *cfg = dev->config;
int ret;
ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
return ret;
}
static int ambiq_clock_init(const struct device *dev)
{
ARG_UNUSED(dev);
/* Nothing to do.*/
return 0;
}
static const struct clock_control_driver_api ambiq_clock_driver_api = {
.on = ambiq_clock_on,
.off = ambiq_clock_off,
.get_rate = ambiq_clock_get_rate,
.configure = ambiq_clock_configure,
};
#define AMBIQ_CLOCK_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
static const struct ambiq_clock_config ambiq_clock_config##n = { \
.clock_freq = DT_INST_PROP(n, clock_frequency), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n)}; \
DEVICE_DT_INST_DEFINE(n, ambiq_clock_init, NULL, NULL, &ambiq_clock_config##n, \
POST_KERNEL, CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&ambiq_clock_driver_api);
DT_INST_FOREACH_STATUS_OKAY(AMBIQ_CLOCK_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_ambiq.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 990 |
```unknown
# Smartbond clock control driver config
config CLOCK_CONTROL_SMARTBOND
bool "Smartbond Clock Control"
depends on SOC_FAMILY_RENESAS_SMARTBOND
help
Enable driver for Clock Control subsystem found in SmartBond
if CLOCK_CONTROL_SMARTBOND
config SMARTBOND_LP_OSC_CALIBRATION_INTERVAL
int "Low-power oscillators calibration interval"
default 1
range 1 10
help
Time in seconds between calibration of low power clock RC32K and RCX.
endif # CLOCK_CONTROL_SMARTBOND
``` | /content/code_sandbox/drivers/clock_control/Kconfig.smartbond | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 118 |
```c
/*
*
*/
#define DT_DRV_COMPAT nuvoton_npcx_pcc
#include <soc.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/npcx_clock.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_npcx, LOG_LEVEL_ERR);
/* Driver config */
struct npcx_pcc_config {
/* cdcg device base address */
uintptr_t base_cdcg;
/* pmc device base address */
uintptr_t base_pmc;
};
/* Driver convenience defines */
#define HAL_CDCG_INST(dev) \
((struct cdcg_reg *)((const struct npcx_pcc_config *)(dev)->config)->base_cdcg)
#define HAL_PMC_INST(dev) \
((struct pmc_reg *)((const struct npcx_pcc_config *)(dev)->config)->base_pmc)
static uint8_t pddwn_ctl_val[] = {NPCX_PWDWN_CTL_INIT};
/* Clock controller local functions */
static inline int npcx_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
ARG_UNUSED(dev);
struct npcx_clk_cfg *clk_cfg = (struct npcx_clk_cfg *)(sub_system);
const uint32_t pmc_base = ((const struct npcx_pcc_config *)dev->config)->base_pmc;
if (clk_cfg->ctrl >= NPCX_PWDWN_CTL_COUNT) {
return -EINVAL;
}
/* Clear related PD (Power-Down) bit of module to turn on clock */
NPCX_PWDWN_CTL(pmc_base, clk_cfg->ctrl) &= ~(BIT(clk_cfg->bit));
return 0;
}
static inline int npcx_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
ARG_UNUSED(dev);
struct npcx_clk_cfg *clk_cfg = (struct npcx_clk_cfg *)(sub_system);
const uint32_t pmc_base = ((const struct npcx_pcc_config *)dev->config)->base_pmc;
if (clk_cfg->ctrl >= NPCX_PWDWN_CTL_COUNT) {
return -EINVAL;
}
/* Set related PD (Power-Down) bit of module to turn off clock */
NPCX_PWDWN_CTL(pmc_base, clk_cfg->ctrl) |= BIT(clk_cfg->bit);
return 0;
}
static int npcx_clock_control_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
ARG_UNUSED(dev);
struct npcx_clk_cfg *clk_cfg = (struct npcx_clk_cfg *)(sub_system);
switch (clk_cfg->bus) {
case NPCX_CLOCK_BUS_APB1:
*rate = NPCX_APB_CLOCK(1);
break;
case NPCX_CLOCK_BUS_APB2:
*rate = NPCX_APB_CLOCK(2);
break;
case NPCX_CLOCK_BUS_APB3:
*rate = NPCX_APB_CLOCK(3);
break;
#if defined(APB4DIV_VAL)
case NPCX_CLOCK_BUS_APB4:
*rate = NPCX_APB_CLOCK(4);
break;
#endif
case NPCX_CLOCK_BUS_AHB6:
*rate = CORE_CLK/(AHB6DIV_VAL + 1);
break;
case NPCX_CLOCK_BUS_FIU:
*rate = CORE_CLK/(FIUDIV_VAL + 1);
break;
#if defined(FIU1DIV_VAL)
case NPCX_CLOCK_BUS_FIU1:
*rate = CORE_CLK/(FIU1DIV_VAL + 1);
break;
#endif
case NPCX_CLOCK_BUS_CORE:
*rate = CORE_CLK;
break;
case NPCX_CLOCK_BUS_LFCLK:
*rate = LFCLK;
break;
case NPCX_CLOCK_BUS_FMCLK:
*rate = FMCLK;
break;
case NPCX_CLOCK_BUS_MCLKD:
*rate = OFMCLK/(MCLKD_SL + 1);
break;
default:
*rate = 0U;
/* Invalid parameters */
return -EINVAL;
}
return 0;
}
/* Platform specific clock controller functions */
#if defined(CONFIG_PM)
void npcx_clock_control_turn_on_system_sleep(bool is_deep, bool is_instant)
{
const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
struct pmc_reg *const inst_pmc = HAL_PMC_INST(clk_dev);
/* Configure that ec enters system sleep mode if receiving 'wfi' */
uint8_t pm_flags = BIT(NPCX_PMCSR_IDLE);
/* Add 'Disable High-Frequency' flag (ie. 'deep sleep' mode) */
if (is_deep) {
pm_flags |= BIT(NPCX_PMCSR_DHF);
/* Add 'Instant Wake-up' flag if sleep time is within 200 ms */
if (is_instant)
pm_flags |= BIT(NPCX_PMCSR_DI_INSTW);
}
inst_pmc->PMCSR = pm_flags;
}
void npcx_clock_control_turn_off_system_sleep(void)
{
const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
struct pmc_reg *const inst_pmc = HAL_PMC_INST(clk_dev);
inst_pmc->PMCSR = 0;
}
#endif /* CONFIG_PM */
/* Clock controller driver registration */
static const struct clock_control_driver_api npcx_clock_control_api = {
.on = npcx_clock_control_on,
.off = npcx_clock_control_off,
.get_rate = npcx_clock_control_get_subsys_rate,
};
/* valid clock frequency check */
BUILD_ASSERT(OFMCLK <= MAX_OFMCLK, "Exceed maximum OFMCLK setting");
BUILD_ASSERT(CORE_CLK <= MAX_OFMCLK && CORE_CLK >= MHZ(4) &&
OFMCLK % CORE_CLK == 0 &&
OFMCLK / CORE_CLK <= 10,
"Invalid CORE_CLK setting");
BUILD_ASSERT(CORE_CLK / (FIUDIV_VAL + 1) <= (MAX_OFMCLK / 2) &&
CORE_CLK / (FIUDIV_VAL + 1) >= MHZ(4),
"Invalid FIUCLK setting");
#if defined(FIU1DIV_VAL)
BUILD_ASSERT(CORE_CLK / (FIU1DIV_VAL + 1) <= (MAX_OFMCLK / 2) &&
CORE_CLK / (FIU1DIV_VAL + 1) >= MHZ(4),
"Invalid FIU1CLK setting");
#endif
BUILD_ASSERT(CORE_CLK / (AHB6DIV_VAL + 1) <= (MAX_OFMCLK / 2) &&
CORE_CLK / (AHB6DIV_VAL + 1) >= MHZ(4),
"Invalid AHB6_CLK setting");
BUILD_ASSERT(APBSRC_CLK / (APB1DIV_VAL + 1) <= (MAX_OFMCLK / 2) &&
APBSRC_CLK / (APB1DIV_VAL + 1) >= MHZ(4) &&
(APB1DIV_VAL + 1) % (FPRED_VAL + 1) == 0,
"Invalid APB1_CLK setting");
BUILD_ASSERT(APBSRC_CLK / (APB2DIV_VAL + 1) <= (MAX_OFMCLK / 2) &&
APBSRC_CLK / (APB2DIV_VAL + 1) >= MHZ(8) &&
(APB2DIV_VAL + 1) % (FPRED_VAL + 1) == 0,
"Invalid APB2_CLK setting");
BUILD_ASSERT(APBSRC_CLK / (APB3DIV_VAL + 1) <= (MAX_OFMCLK / 2) &&
APBSRC_CLK / (APB3DIV_VAL + 1) >= KHZ(12500) &&
(APB3DIV_VAL + 1) % (FPRED_VAL + 1) == 0,
"Invalid APB3_CLK setting");
#if defined(APB4DIV_VAL)
BUILD_ASSERT(APBSRC_CLK / (APB4DIV_VAL + 1) <= MAX_OFMCLK &&
APBSRC_CLK / (APB4DIV_VAL + 1) >= MHZ(8) &&
(APB4DIV_VAL + 1) % (FPRED_VAL + 1) == 0,
"Invalid APB4_CLK setting");
#endif
#if defined(CONFIG_I3C_NPCX)
BUILD_ASSERT(OFMCLK / (MCLKD_SL + 1) <= MHZ(50) &&
OFMCLK / (MCLKD_SL + 1) >= MHZ(40),
"Invalid MCLKD_SL setting");
BUILD_ASSERT(APBSRC_CLK / (APB4DIV_VAL + 1) >= MHZ(20),
"Invalid PDMA CLK setting");
#endif
static int npcx_clock_control_init(const struct device *dev)
{
struct cdcg_reg *const inst_cdcg = HAL_CDCG_INST(dev);
const uint32_t pmc_base = ((const struct npcx_pcc_config *)dev->config)->base_pmc;
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NPCX_EXTERNAL_SRC)) {
inst_cdcg->LFCGCTL2 |= BIT(NPCX_LFCGCTL2_XT_OSC_SL_EN);
}
/*
* Resetting the OFMCLK (even to the same value) will make the clock
* unstable for a little which can affect peripheral communication like
* eSPI. Skip this if not needed.
*/
if (inst_cdcg->HFCGN != HFCGN_VAL || inst_cdcg->HFCGML != HFCGML_VAL
|| inst_cdcg->HFCGMH != HFCGMH_VAL) {
/*
* Configure frequency multiplier M/N values according to
* the requested OFMCLK (Unit:Hz).
*/
inst_cdcg->HFCGN = HFCGN_VAL;
inst_cdcg->HFCGML = HFCGML_VAL;
inst_cdcg->HFCGMH = HFCGMH_VAL;
/* Load M and N values into the frequency multiplier */
inst_cdcg->HFCGCTRL |= BIT(NPCX_HFCGCTRL_LOAD);
/* Wait for stable */
while (IS_BIT_SET(inst_cdcg->HFCGCTRL, NPCX_HFCGCTRL_CLK_CHNG)) {
;
}
}
/* Set all clock prescalers of core and peripherals. */
inst_cdcg->HFCGP = VAL_HFCGP;
inst_cdcg->HFCBCD = VAL_HFCBCD;
inst_cdcg->HFCBCD1 = VAL_HFCBCD1;
inst_cdcg->HFCBCD2 = VAL_HFCBCD2;
#if defined(CONFIG_SOC_SERIES_NPCX4)
inst_cdcg->HFCBCD3 = VAL_HFCBCD3;
#endif
/*
* Power-down (turn off clock) the modules initially for better
* power consumption.
*/
for (int i = 0; i < ARRAY_SIZE(pddwn_ctl_val); i++) {
NPCX_PWDWN_CTL(pmc_base, i) = pddwn_ctl_val[i];
}
/* Turn off the clock of the eSPI module only if eSPI isn't required */
if (!IS_ENABLED(CONFIG_ESPI)) {
NPCX_PWDWN_CTL(pmc_base, NPCX_PWDWN_CTL6) |= BIT(7);
}
return 0;
}
const struct npcx_pcc_config pcc_config = {
.base_cdcg = DT_INST_REG_ADDR_BY_NAME(0, cdcg),
.base_pmc = DT_INST_REG_ADDR_BY_NAME(0, pmc),
};
DEVICE_DT_INST_DEFINE(0,
npcx_clock_control_init,
NULL,
NULL, &pcc_config,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&npcx_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_npcx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,557 |
```unknown
config CLOCK_CONTROL_GD32
bool "GD32 clock control"
default y
depends on DT_HAS_GD_GD32_CCTL_ENABLED
help
Enable driver for Gigadevice Reset Clock Unit (RCU).
``` | /content/code_sandbox/drivers/clock_control/Kconfig.gd32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 49 |
```unknown
config CLOCK_CONTROL_PWM
bool "Generic PWM clock"
default y
depends on DT_HAS_PWM_CLOCK_ENABLED
select PWM
help
Enable generic PWM clock.
config CLOCK_CONTROL_PWM_INIT_PRIORITY
int "Initialization priority of the pwm clock device"
default 51
depends on CLOCK_CONTROL_PWM
help
Initialization priority of the PWM clock device. Must be
lower priority than PWM.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.pwm | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 84 |
```unknown
# OpenISA RV32M1 PPC
config CLOCK_CONTROL_RV32M1_PCC
bool "RV32M1 PCC driver"
default y
depends on DT_HAS_OPENISA_RV32M1_PCC_ENABLED
help
Enable support for RV32M1 PCC driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.rv32m1 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```unknown
# Microchip XEC
config CLOCK_CONTROL_MCHP_XEC
bool "MCHP XEC PCR clock control driver"
default y
depends on DT_HAS_MICROCHIP_XEC_PCR_ENABLED
help
Enable support for Microchip XEC PCR clock driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.xec | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_mco.h"
#include "stm32_hsem.h"
/* Macros to fill up prescaler values */
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
#define z_hsi_divider(v) LL_RCC_HSI_DIV_ ## v
#else
#define z_hsi_divider(v) LL_RCC_HSI_DIV ## v
#endif
#define hsi_divider(v) z_hsi_divider(v)
#define z_sysclk_prescaler(v) LL_RCC_SYSCLK_DIV_ ## v
#define sysclk_prescaler(v) z_sysclk_prescaler(v)
#define z_ahb_prescaler(v) LL_RCC_AHB_DIV_ ## v
#define ahb_prescaler(v) z_ahb_prescaler(v)
#define z_apb1_prescaler(v) LL_RCC_APB1_DIV_ ## v
#define apb1_prescaler(v) z_apb1_prescaler(v)
#define z_apb2_prescaler(v) LL_RCC_APB2_DIV_ ## v
#define apb2_prescaler(v) z_apb2_prescaler(v)
#define z_apb3_prescaler(v) LL_RCC_APB3_DIV_ ## v
#define apb3_prescaler(v) z_apb3_prescaler(v)
#define z_apb4_prescaler(v) LL_RCC_APB4_DIV_ ## v
#define apb4_prescaler(v) z_apb4_prescaler(v)
#define z_apb5_prescaler(v) LL_RCC_APB5_DIV_ ## v
#define apb5_prescaler(v) z_apb5_prescaler(v)
/* Macro to check for clock feasibility */
/* It is Cortex M7's responsibility to setup clock tree */
/* This check should only be performed for the M7 core code */
#ifdef CONFIG_CPU_CORTEX_M7
/* Choose PLL SRC */
#if defined(STM32_PLL_SRC_HSI)
#define PLLSRC_FREQ ((STM32_HSI_FREQ)/(STM32_HSI_DIVISOR))
#elif defined(STM32_PLL_SRC_CSI)
#define PLLSRC_FREQ STM32_CSI_FREQ
#elif defined(STM32_PLL_SRC_HSE)
#define PLLSRC_FREQ STM32_HSE_FREQ
#else
#define PLLSRC_FREQ 0
#endif
/* Given source clock and dividers, computed the output frequency of PLLP */
#define PLLP_FREQ(pllsrc_freq, divm, divn, divp) (((pllsrc_freq)*\
(divn))/((divm)*(divp)))
/* PLL P output frequency value */
#define PLLP_VALUE PLLP_FREQ(\
PLLSRC_FREQ,\
STM32_PLL_M_DIVISOR,\
STM32_PLL_N_MULTIPLIER,\
STM32_PLL_P_DIVISOR)
/* SYSCLKSRC before the D1CPRE prescaler */
#if defined(STM32_SYSCLK_SRC_PLL)
#define SYSCLKSRC_FREQ PLLP_VALUE
#elif defined(STM32_SYSCLK_SRC_HSI)
#define SYSCLKSRC_FREQ ((STM32_HSI_FREQ)/(STM32_HSI_DIVISOR))
#elif defined(STM32_SYSCLK_SRC_CSI)
#define SYSCLKSRC_FREQ STM32_CSI_FREQ
#elif defined(STM32_SYSCLK_SRC_HSE)
#define SYSCLKSRC_FREQ STM32_HSE_FREQ
#endif
/* ARM Sys CPU Clock before HPRE prescaler */
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
#define SYSCLK_FREQ ((SYSCLKSRC_FREQ)/(STM32_D1CPRE))
#define AHB_FREQ ((SYSCLK_FREQ)/(STM32_HPRE))
#define APB1_FREQ ((AHB_FREQ)/(STM32_PPRE1))
#define APB2_FREQ ((AHB_FREQ)/(STM32_PPRE2))
#define APB4_FREQ ((AHB_FREQ)/(STM32_PPRE4))
#define APB5_FREQ ((AHB_FREQ)/(STM32_PPRE5))
#else
#define SYSCLK_FREQ ((SYSCLKSRC_FREQ)/(STM32_D1CPRE))
#define AHB_FREQ ((SYSCLK_FREQ)/(STM32_HPRE))
#define APB1_FREQ ((AHB_FREQ)/(STM32_D2PPRE1))
#define APB2_FREQ ((AHB_FREQ)/(STM32_D2PPRE2))
#define APB3_FREQ ((AHB_FREQ)/(STM32_D1PPRE))
#define APB4_FREQ ((AHB_FREQ)/(STM32_D3PPRE))
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
/* Datasheet maximum frequency definitions */
#if defined(CONFIG_SOC_STM32H743XX) ||\
defined(CONFIG_SOC_STM32H745XX_M7) || defined(CONFIG_SOC_STM32H745XX_M4) ||\
defined(CONFIG_SOC_STM32H747XX_M7) || defined(CONFIG_SOC_STM32H747XX_M4) ||\
defined(CONFIG_SOC_STM32H750XX) ||\
defined(CONFIG_SOC_STM32H753XX) ||\
defined(CONFIG_SOC_STM32H755XX_M7) || defined(CONFIG_SOC_STM32H755XX_M4)
/* All h7 SoC with maximum 480MHz SYSCLK */
#define SYSCLK_FREQ_MAX 480000000UL
#define AHB_FREQ_MAX 240000000UL
#define APBx_FREQ_MAX 120000000UL
#elif defined(CONFIG_SOC_STM32H723XX) ||\
defined(CONFIG_SOC_STM32H725XX) ||\
defined(CONFIG_SOC_STM32H730XX) || defined(CONFIG_SOC_STM32H730XXQ) ||\
defined(CONFIG_SOC_STM32H735XX)
/* All h7 SoC with maximum 550MHz SYSCLK */
#define SYSCLK_FREQ_MAX 550000000UL
#define AHB_FREQ_MAX 275000000UL
#define APBx_FREQ_MAX 137500000UL
#elif defined(CONFIG_SOC_STM32H7A3XX) || defined(CONFIG_SOC_STM32H7A3XXQ) ||\
defined(CONFIG_SOC_STM32H7B0XX) || defined(CONFIG_SOC_STM32H7B0XXQ) ||\
defined(CONFIG_SOC_STM32H7B3XX) || defined(CONFIG_SOC_STM32H7B3XXQ)
#define SYSCLK_FREQ_MAX 280000000UL
#define AHB_FREQ_MAX 280000000UL
#define APBx_FREQ_MAX 140000000UL
#elif defined(CONFIG_SOC_SERIES_STM32H7RSX)
/* All h7RS SoC with maximum 500MHz SYSCLK (refer to Datasheet DS14359 rev 1) */
#define SYSCLK_FREQ_MAX 500000000UL
#define AHB_FREQ_MAX 250000000UL
#define APBx_FREQ_MAX 125000000UL
#else
/* Default: All h7 SoC with maximum 280MHz SYSCLK */
#define SYSCLK_FREQ_MAX 280000000UL
#define AHB_FREQ_MAX 140000000UL
#define APBx_FREQ_MAX 70000000UL
#endif
#if SYSCLK_FREQ > SYSCLK_FREQ_MAX
#error "SYSCLK frequency is too high!"
#endif
#if AHB_FREQ > AHB_FREQ_MAX
#error "AHB frequency is too high!"
#endif
#if APB1_FREQ > APBx_FREQ_MAX
#error "APB1 frequency is too high!"
#endif
#if APB2_FREQ > APBx_FREQ_MAX
#error "APB2 frequency is too high!"
#endif
#if APB3_FREQ > APBx_FREQ_MAX
#error "APB3 frequency is too high!"
#endif
#if APB4_FREQ > APBx_FREQ_MAX
#error "APB4 frequency is too high!"
#endif
#if SYSCLK_FREQ != CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
#error "SYS clock frequency for M7 core doesn't match CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC"
#endif
/* end of clock feasibility check */
#endif /* CONFIG_CPU_CORTEX_M7 */
#if defined(CONFIG_CPU_CORTEX_M7)
#if STM32_D1CPRE > 1
/*
* D1CPRE prescaler allows to set a HCLK frequency lower than SYSCLK frequency.
* Though, zephyr doesn't make a difference today between these two clocks.
* So, changing this prescaler is not allowed until it is made possible to
* use them independently in zephyr clock subsystem.
*/
#error "D1CPRE prescaler can't be higher than 1"
#endif
#endif /* CONFIG_CPU_CORTEX_M7 */
#if defined(CONFIG_CPU_CORTEX_M7)
/* Offset to access bus clock registers from M7 (or only) core */
#define STM32H7_BUS_CLK_REG DT_REG_ADDR(DT_NODELABEL(rcc))
#elif defined(CONFIG_CPU_CORTEX_M4)
/* Offset to access bus clock registers from M4 core */
#define STM32H7_BUS_CLK_REG DT_REG_ADDR(DT_NODELABEL(rcc)) + 0x60
#endif
static uint32_t get_bus_clock(uint32_t clock, uint32_t prescaler)
{
return clock / prescaler;
}
__unused
static uint32_t get_pllout_frequency(uint32_t pllsrc_freq,
int pllm_div,
int plln_mul,
int pllout_div)
{
__ASSERT_NO_MSG(pllm_div && pllout_div);
return (pllsrc_freq / pllm_div) * plln_mul / pllout_div;
}
__unused
static uint32_t get_pllsrc_frequency(void)
{
switch (LL_RCC_PLL_GetSource()) {
case LL_RCC_PLLSOURCE_HSI:
return STM32_HSI_FREQ;
case LL_RCC_PLLSOURCE_CSI:
return STM32_CSI_FREQ;
case LL_RCC_PLLSOURCE_HSE:
return STM32_HSE_FREQ;
case LL_RCC_PLLSOURCE_NONE:
default:
return 0;
}
}
__unused
static uint32_t get_hclk_frequency(void)
{
uint32_t sysclk = 0;
/* Get the current system clock source */
switch (LL_RCC_GetSysClkSource()) {
case LL_RCC_SYS_CLKSOURCE_STATUS_HSI:
sysclk = STM32_HSI_FREQ/STM32_HSI_DIVISOR;
break;
case LL_RCC_SYS_CLKSOURCE_STATUS_CSI:
sysclk = STM32_CSI_FREQ;
break;
case LL_RCC_SYS_CLKSOURCE_STATUS_HSE:
sysclk = STM32_HSE_FREQ;
break;
#if defined(STM32_PLL_ENABLED)
case LL_RCC_SYS_CLKSOURCE_STATUS_PLL1:
sysclk = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_P_DIVISOR);
break;
#endif /* STM32_PLL_ENABLED */
}
return get_bus_clock(sysclk, STM32_HPRE);
}
#if !defined(CONFIG_CPU_CORTEX_M4)
static int32_t prepare_regulator_voltage_scale(void)
{
/* Apply system power supply configuration */
#if defined(SMPS) && defined(CONFIG_POWER_SUPPLY_DIRECT_SMPS)
LL_PWR_ConfigSupply(LL_PWR_DIRECT_SMPS_SUPPLY);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_1V8_SUPPLIES_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_1V8_SUPPLIES_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_2V5_SUPPLIES_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_2V5_SUPPLIES_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_1V8_SUPPLIES_EXT_AND_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_1V8_SUPPLIES_EXT_AND_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_2V5_SUPPLIES_EXT_AND_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_2V5_SUPPLIES_EXT_AND_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_1V8_SUPPLIES_EXT)
LL_PWR_ConfigSupply(LL_PWR_SMPS_1V8_SUPPLIES_EXT);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_2V5_SUPPLIES_EXT)
LL_PWR_ConfigSupply(LL_PWR_SMPS_2V5_SUPPLIES_EXT);
#elif defined(CONFIG_POWER_SUPPLY_EXTERNAL_SOURCE)
LL_PWR_ConfigSupply(LL_PWR_EXTERNAL_SOURCE_SUPPLY);
#else
LL_PWR_ConfigSupply(LL_PWR_LDO_SUPPLY);
#endif
/* Make sure to put the CPU in highest Voltage scale during clock configuration */
/* Highest voltage is SCALE0 */
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE0);
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
while (LL_PWR_IsActiveFlag_VOSRDY() == 0) {
#else
while (LL_PWR_IsActiveFlag_VOS() == 0) {
#endif
}
return 0;
}
static int32_t optimize_regulator_voltage_scale(uint32_t sysclk_freq)
{
/* After sysclock is configured, tweak the voltage scale down */
/* to reduce power consumption */
/* Needs some smart work to configure properly */
/* LL_PWR_REGULATOR_SCALE3 is lowest power consumption */
/* Must be done in accordance to the Maximum allowed frequency vs VOS*/
/* See RM0433 page 352 for more details */
#if defined(SMPS) && defined(CONFIG_POWER_SUPPLY_DIRECT_SMPS)
LL_PWR_ConfigSupply(LL_PWR_DIRECT_SMPS_SUPPLY);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_1V8_SUPPLIES_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_1V8_SUPPLIES_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_2V5_SUPPLIES_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_2V5_SUPPLIES_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_1V8_SUPPLIES_EXT_AND_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_1V8_SUPPLIES_EXT_AND_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_2V5_SUPPLIES_EXT_AND_LDO)
LL_PWR_ConfigSupply(LL_PWR_SMPS_2V5_SUPPLIES_EXT_AND_LDO);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_1V8_SUPPLIES_EXT)
LL_PWR_ConfigSupply(LL_PWR_SMPS_1V8_SUPPLIES_EXT);
#elif defined(SMPS) && defined(CONFIG_POWER_SUPPLY_SMPS_2V5_SUPPLIES_EXT)
LL_PWR_ConfigSupply(LL_PWR_SMPS_2V5_SUPPLIES_EXT);
#elif defined(CONFIG_POWER_SUPPLY_EXTERNAL_SOURCE)
LL_PWR_ConfigSupply(LL_PWR_EXTERNAL_SOURCE_SUPPLY);
#else
LL_PWR_ConfigSupply(LL_PWR_LDO_SUPPLY);
#endif
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE0);
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
while (LL_PWR_IsActiveFlag_VOSRDY() == 0) {
#else
while (LL_PWR_IsActiveFlag_VOS() == 0) {
#endif
};
return 0;
}
__unused
static int get_vco_input_range(uint32_t m_div, uint32_t *range)
{
uint32_t vco_freq;
vco_freq = PLLSRC_FREQ / m_div;
if (MHZ(1) <= vco_freq && vco_freq <= MHZ(2)) {
*range = LL_RCC_PLLINPUTRANGE_1_2;
} else if (MHZ(2) < vco_freq && vco_freq <= MHZ(4)) {
*range = LL_RCC_PLLINPUTRANGE_2_4;
} else if (MHZ(4) < vco_freq && vco_freq <= MHZ(8)) {
*range = LL_RCC_PLLINPUTRANGE_4_8;
} else if (MHZ(8) < vco_freq && vco_freq <= MHZ(16)) {
*range = LL_RCC_PLLINPUTRANGE_8_16;
} else {
return -ERANGE;
}
return 0;
}
__unused
static uint32_t get_vco_output_range(uint32_t vco_input_range)
{
if (vco_input_range == LL_RCC_PLLINPUTRANGE_1_2) {
return LL_RCC_PLLVCORANGE_MEDIUM;
}
return LL_RCC_PLLVCORANGE_WIDE;
}
#endif /* ! CONFIG_CPU_CORTEX_M4 */
/** @brief Verifies clock is part of active clock configuration */
static int enabled_clock(uint32_t src_clk)
{
if ((src_clk == STM32_SRC_SYSCLK) ||
((src_clk == STM32_SRC_CKPER) && IS_ENABLED(STM32_CKPER_ENABLED)) ||
((src_clk == STM32_SRC_HSE) && IS_ENABLED(STM32_HSE_ENABLED)) ||
((src_clk == STM32_SRC_HSI_KER) && IS_ENABLED(STM32_HSI_ENABLED)) ||
((src_clk == STM32_SRC_CSI_KER) && IS_ENABLED(STM32_CSI_ENABLED)) ||
((src_clk == STM32_SRC_HSI48) && IS_ENABLED(STM32_HSI48_ENABLED)) ||
((src_clk == STM32_SRC_LSE) && IS_ENABLED(STM32_LSE_ENABLED)) ||
((src_clk == STM32_SRC_LSI) && IS_ENABLED(STM32_LSI_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_P) && IS_ENABLED(STM32_PLL_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_Q) && IS_ENABLED(STM32_PLL_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_R) && IS_ENABLED(STM32_PLL_R_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_P) && IS_ENABLED(STM32_PLL2_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_Q) && IS_ENABLED(STM32_PLL2_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL2_R) && IS_ENABLED(STM32_PLL2_R_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_P) && IS_ENABLED(STM32_PLL3_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_Q) && IS_ENABLED(STM32_PLL3_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL3_R) && IS_ENABLED(STM32_PLL3_R_ENABLED))) {
return 0;
}
return -ENOTSUP;
}
static inline int stm32_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
volatile int temp;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
z_stm32_hsem_lock(CFG_HW_RCC_SEMID, HSEM_LOCK_DEFAULT_RETRY);
sys_set_bits(STM32H7_BUS_CLK_REG + pclken->bus, pclken->enr);
/* Delay after enabling the clock, to allow it to become active.
* See RM0433 8.5.10 "Clock enabling delays"
*/
temp = sys_read32(STM32H7_BUS_CLK_REG + pclken->bus);
UNUSED(temp);
z_stm32_hsem_unlock(CFG_HW_RCC_SEMID);
return 0;
}
static inline int stm32_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
z_stm32_hsem_lock(CFG_HW_RCC_SEMID, HSEM_LOCK_DEFAULT_RETRY);
sys_clear_bits(STM32H7_BUS_CLK_REG + pclken->bus, pclken->enr);
z_stm32_hsem_unlock(CFG_HW_RCC_SEMID);
return 0;
}
static inline int stm32_clock_control_configure(const struct device *dev,
clock_control_subsys_t sub_system,
void *data)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
int err;
ARG_UNUSED(dev);
ARG_UNUSED(data);
err = enabled_clock(pclken->bus);
if (err < 0) {
/* Attempt to configure a src clock not available or not valid */
return err;
}
z_stm32_hsem_lock(CFG_HW_RCC_SEMID, HSEM_LOCK_DEFAULT_RETRY);
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_MASK_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_VAL_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
z_stm32_hsem_unlock(CFG_HW_RCC_SEMID);
return 0;
}
static int stm32_clock_control_get_subsys_rate(const struct device *clock,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
/*
* Get AHB Clock (= SystemCoreClock = SYSCLK/prescaler)
* SystemCoreClock is preferred to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
* since it will be updated after clock configuration and hence
* more likely to contain actual clock speed
*/
#if defined(CONFIG_CPU_CORTEX_M4)
uint32_t ahb_clock = SystemCoreClock;
#else
uint32_t ahb_clock = get_bus_clock(SystemCoreClock, STM32_HPRE);
#endif
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
uint32_t apb1_clock = get_bus_clock(ahb_clock, STM32_PPRE1);
uint32_t apb2_clock = get_bus_clock(ahb_clock, STM32_PPRE2);
uint32_t apb4_clock = get_bus_clock(ahb_clock, STM32_PPRE4);
uint32_t apb5_clock = get_bus_clock(ahb_clock, STM32_PPRE5);
#else
uint32_t apb1_clock = get_bus_clock(ahb_clock, STM32_D2PPRE1);
uint32_t apb2_clock = get_bus_clock(ahb_clock, STM32_D2PPRE2);
uint32_t apb3_clock = get_bus_clock(ahb_clock, STM32_D1PPRE);
uint32_t apb4_clock = get_bus_clock(ahb_clock, STM32_D3PPRE);
#endif
ARG_UNUSED(clock);
switch (pclken->bus) {
case STM32_CLOCK_BUS_AHB1:
case STM32_CLOCK_BUS_AHB2:
case STM32_CLOCK_BUS_AHB3:
case STM32_CLOCK_BUS_AHB4:
*rate = ahb_clock;
break;
case STM32_CLOCK_BUS_APB1:
case STM32_CLOCK_BUS_APB1_2:
*rate = apb1_clock;
break;
case STM32_CLOCK_BUS_APB2:
*rate = apb2_clock;
break;
#if !defined(CONFIG_SOC_SERIES_STM32H7RSX)
case STM32_CLOCK_BUS_APB3:
*rate = apb3_clock;
break;
#endif /* !CONFIG_SOC_SERIES_STM32H7RSX */
case STM32_CLOCK_BUS_APB4:
*rate = apb4_clock;
break;
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
case STM32_CLOCK_BUS_APB5:
*rate = apb5_clock;
break;
case STM32_CLOCK_BUS_AHB5:
*rate = ahb_clock;
break;
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
case STM32_SRC_SYSCLK:
*rate = get_hclk_frequency();
break;
#if defined(STM32_CKPER_ENABLED)
case STM32_SRC_CKPER:
*rate = LL_RCC_GetCLKPClockFreq(LL_RCC_CLKP_CLKSOURCE);
break;
#endif /* STM32_CKPER_ENABLED */
#if defined(STM32_HSE_ENABLED)
case STM32_SRC_HSE:
*rate = STM32_HSE_FREQ;
break;
#endif /* STM32_HSE_ENABLED */
#if defined(STM32_LSE_ENABLED)
case STM32_SRC_LSE:
*rate = STM32_LSE_FREQ;
break;
#endif /* STM32_LSE_ENABLED */
#if defined(STM32_LSI_ENABLED)
case STM32_SRC_LSI:
*rate = STM32_LSI_FREQ;
break;
#endif /* STM32_LSI_ENABLED */
#if defined(STM32_HSI48_ENABLED)
case STM32_SRC_HSI48:
*rate = STM32_HSI48_FREQ;
break;
#endif /* STM32_HSI48_ENABLED */
#if defined(STM32_PLL_ENABLED)
case STM32_SRC_PLL1_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_P_DIVISOR);
break;
case STM32_SRC_PLL1_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_Q_DIVISOR);
break;
case STM32_SRC_PLL1_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
break;
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
case STM32_SRC_PLL1_S:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_S_DIVISOR);
break;
/* PLL 1 has no T-divider */
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLL2_ENABLED)
case STM32_SRC_PLL2_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_P_DIVISOR);
break;
case STM32_SRC_PLL2_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_Q_DIVISOR);
break;
case STM32_SRC_PLL2_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_R_DIVISOR);
break;
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
case STM32_SRC_PLL2_S:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_S_DIVISOR);
break;
case STM32_SRC_PLL2_T:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL2_M_DIVISOR,
STM32_PLL2_N_MULTIPLIER,
STM32_PLL2_T_DIVISOR);
break;
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
#endif /* STM32_PLL2_ENABLED */
#if defined(STM32_PLL3_ENABLED)
case STM32_SRC_PLL3_P:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_P_DIVISOR);
break;
case STM32_SRC_PLL3_Q:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_Q_DIVISOR);
break;
case STM32_SRC_PLL3_R:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_R_DIVISOR);
break;
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
case STM32_SRC_PLL3_S:
*rate = get_pllout_frequency(get_pllsrc_frequency(),
STM32_PLL3_M_DIVISOR,
STM32_PLL3_N_MULTIPLIER,
STM32_PLL3_S_DIVISOR);
break;
/* PLL 3 has no T-divider */
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
#endif /* STM32_PLL3_ENABLED */
default:
return -ENOTSUP;
}
return 0;
}
static const struct clock_control_driver_api stm32_clock_control_api = {
.on = stm32_clock_control_on,
.off = stm32_clock_control_off,
.get_rate = stm32_clock_control_get_subsys_rate,
.configure = stm32_clock_control_configure,
};
__unused
static void set_up_fixed_clock_sources(void)
{
if (IS_ENABLED(STM32_HSE_ENABLED)) {
/* Enable HSE oscillator */
if (IS_ENABLED(STM32_HSE_BYPASS)) {
LL_RCC_HSE_EnableBypass();
} else {
LL_RCC_HSE_DisableBypass();
}
LL_RCC_HSE_Enable();
while (LL_RCC_HSE_IsReady() != 1) {
}
/* Check if we need to enable HSE clock security system or not */
#if STM32_HSE_CSS
z_arm_nmi_set_handler(HAL_RCC_NMI_IRQHandler);
LL_RCC_HSE_EnableCSS();
#endif /* STM32_HSE_CSS */
}
if (IS_ENABLED(STM32_HSI_ENABLED)) {
/* Enable HSI oscillator */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
}
/* HSI divider configuration */
LL_RCC_HSI_SetDivider(hsi_divider(STM32_HSI_DIVISOR));
}
if (IS_ENABLED(STM32_CSI_ENABLED)) {
/* Enable CSI oscillator */
LL_RCC_CSI_Enable();
while (LL_RCC_CSI_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_LSI_ENABLED)) {
/* Enable LSI oscillator */
LL_RCC_LSI_Enable();
while (LL_RCC_LSI_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_LSE_ENABLED)) {
/* Enable backup domain */
LL_PWR_EnableBkUpAccess();
/* Configure driving capability */
LL_RCC_LSE_SetDriveCapability(STM32_LSE_DRIVING << RCC_BDCR_LSEDRV_Pos);
if (IS_ENABLED(STM32_LSE_BYPASS)) {
/* Configure LSE bypass */
LL_RCC_LSE_EnableBypass();
}
/* Enable LSE oscillator */
LL_RCC_LSE_Enable();
while (LL_RCC_LSE_IsReady() != 1) {
}
}
if (IS_ENABLED(STM32_HSI48_ENABLED)) {
LL_RCC_HSI48_Enable();
while (LL_RCC_HSI48_IsReady() != 1) {
}
}
}
/*
* Unconditionally switch the system clock source to HSI.
*/
__unused
static void stm32_clock_switch_to_hsi(void)
{
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
}
__unused
static int set_up_plls(void)
{
#if defined(STM32_PLL_ENABLED) || defined(STM32_PLL2_ENABLED) || defined(STM32_PLL3_ENABLED)
int r;
uint32_t vco_input_range;
uint32_t vco_output_range;
/*
* Case of chain-loaded applications:
* Switch to HSI and disable the PLL before configuration.
* (Switching to HSI makes sure we have a SYSCLK source in
* case we're currently running from the PLL we're about to
* turn off and reconfigure.)
*
*/
if (LL_RCC_GetSysClkSource() == LL_RCC_SYS_CLKSOURCE_STATUS_PLL1) {
stm32_clock_switch_to_hsi();
LL_RCC_SetAHBPrescaler(LL_RCC_SYSCLK_DIV_1);
}
LL_RCC_PLL1_Disable();
/* Configure PLL source */
/* Can be HSE , HSI 64Mhz/HSIDIV, CSI 4MHz*/
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
/* Main PLL configuration and activation */
LL_RCC_PLL_SetSource(LL_RCC_PLLSOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL_SRC_CSI)) {
/* Main PLL configuration and activation */
LL_RCC_PLL_SetSource(LL_RCC_PLLSOURCE_CSI);
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
/* Main PLL configuration and activation */
LL_RCC_PLL_SetSource(LL_RCC_PLLSOURCE_HSI);
} else {
return -ENOTSUP;
}
#if defined(STM32_PLL_ENABLED)
r = get_vco_input_range(STM32_PLL_M_DIVISOR, &vco_input_range);
if (r < 0) {
return r;
}
vco_output_range = get_vco_output_range(vco_input_range);
LL_RCC_PLL1_SetM(STM32_PLL_M_DIVISOR);
LL_RCC_PLL1_SetVCOInputRange(vco_input_range);
LL_RCC_PLL1_SetVCOOutputRange(vco_output_range);
LL_RCC_PLL1_SetN(STM32_PLL_N_MULTIPLIER);
/* FRACN disable DIVP,DIVQ,DIVR enable*/
LL_RCC_PLL1FRACN_Disable();
if (IS_ENABLED(STM32_PLL_P_ENABLED)) {
LL_RCC_PLL1_SetP(STM32_PLL_P_DIVISOR);
LL_RCC_PLL1P_Enable();
}
if (IS_ENABLED(STM32_PLL_Q_ENABLED)) {
LL_RCC_PLL1_SetQ(STM32_PLL_Q_DIVISOR);
LL_RCC_PLL1Q_Enable();
}
if (IS_ENABLED(STM32_PLL_R_ENABLED)) {
LL_RCC_PLL1_SetR(STM32_PLL_R_DIVISOR);
LL_RCC_PLL1R_Enable();
}
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
if (IS_ENABLED(STM32_PLL_S_ENABLED)) {
LL_RCC_PLL1_SetS(STM32_PLL_S_DIVISOR);
LL_RCC_PLL1S_Enable();
}
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
LL_RCC_PLL1_Enable();
while (LL_RCC_PLL1_IsReady() != 1U) {
}
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_PLL2_ENABLED)
r = get_vco_input_range(STM32_PLL2_M_DIVISOR, &vco_input_range);
if (r < 0) {
return r;
}
vco_output_range = get_vco_output_range(vco_input_range);
LL_RCC_PLL2_SetM(STM32_PLL2_M_DIVISOR);
LL_RCC_PLL2_SetVCOInputRange(vco_input_range);
LL_RCC_PLL2_SetVCOOutputRange(vco_output_range);
LL_RCC_PLL2_SetN(STM32_PLL2_N_MULTIPLIER);
LL_RCC_PLL2FRACN_Disable();
if (IS_ENABLED(STM32_PLL2_P_ENABLED)) {
LL_RCC_PLL2_SetP(STM32_PLL2_P_DIVISOR);
LL_RCC_PLL2P_Enable();
}
if (IS_ENABLED(STM32_PLL2_Q_ENABLED)) {
LL_RCC_PLL2_SetQ(STM32_PLL2_Q_DIVISOR);
LL_RCC_PLL2Q_Enable();
}
if (IS_ENABLED(STM32_PLL2_R_ENABLED)) {
LL_RCC_PLL2_SetR(STM32_PLL2_R_DIVISOR);
LL_RCC_PLL2R_Enable();
}
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
if (IS_ENABLED(STM32_PLL2_S_ENABLED)) {
LL_RCC_PLL2_SetS(STM32_PLL2_S_DIVISOR);
LL_RCC_PLL2S_Enable();
}
if (IS_ENABLED(STM32_PLL2_T_ENABLED)) {
LL_RCC_PLL2_SetT(STM32_PLL2_T_DIVISOR);
LL_RCC_PLL2T_Enable();
}
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
LL_RCC_PLL2_Enable();
while (LL_RCC_PLL2_IsReady() != 1U) {
}
#endif /* STM32_PLL2_ENABLED */
#if defined(STM32_PLL3_ENABLED)
r = get_vco_input_range(STM32_PLL3_M_DIVISOR, &vco_input_range);
if (r < 0) {
return r;
}
vco_output_range = get_vco_output_range(vco_input_range);
LL_RCC_PLL3_SetM(STM32_PLL3_M_DIVISOR);
LL_RCC_PLL3_SetVCOInputRange(vco_input_range);
LL_RCC_PLL3_SetVCOOutputRange(vco_output_range);
LL_RCC_PLL3_SetN(STM32_PLL3_N_MULTIPLIER);
LL_RCC_PLL3FRACN_Disable();
if (IS_ENABLED(STM32_PLL3_P_ENABLED)) {
LL_RCC_PLL3_SetP(STM32_PLL3_P_DIVISOR);
LL_RCC_PLL3P_Enable();
}
if (IS_ENABLED(STM32_PLL3_Q_ENABLED)) {
LL_RCC_PLL3_SetQ(STM32_PLL3_Q_DIVISOR);
LL_RCC_PLL3Q_Enable();
}
if (IS_ENABLED(STM32_PLL3_R_ENABLED)) {
LL_RCC_PLL3_SetR(STM32_PLL3_R_DIVISOR);
LL_RCC_PLL3R_Enable();
}
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
if (IS_ENABLED(STM32_PLL3_S_ENABLED)) {
LL_RCC_PLL3_SetS(STM32_PLL3_S_DIVISOR);
LL_RCC_PLL3S_Enable();
}
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
LL_RCC_PLL3_Enable();
while (LL_RCC_PLL3_IsReady() != 1U) {
}
#endif /* STM32_PLL3_ENABLED */
#else
/* Init PLL source to None */
LL_RCC_PLL_SetSource(LL_RCC_PLLSOURCE_NONE);
#endif /* STM32_PLL_ENABLED || STM32_PLL2_ENABLED || STM32_PLL3_ENABLED */
return 0;
}
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
/* adapted from the stm32cube SystemCoreClockUpdate*/
void stm32_system_clock_update(void)
{
uint32_t sysclk, hsivalue, pllsource, pllm, pllp, core_presc;
float_t pllfracn, pllvco;
/* Get SYSCLK source */
switch (RCC->CFGR & RCC_CFGR_SWS) {
case 0x00: /* HSI used as system clock source (default after reset) */
sysclk = (HSI_VALUE >> ((RCC->CR & RCC_CR_HSIDIV)
>> RCC_CR_HSIDIV_Pos));
break;
case 0x08: /* CSI used as system clock source */
sysclk = CSI_VALUE;
break;
case 0x10: /* HSE used as system clock source */
sysclk = HSE_VALUE;
break;
case 0x18: /* PLL1 used as system clock source */
/*
* PLL1_VCO = (HSE_VALUE or HSI_VALUE or CSI_VALUE/ PLLM) * PLLN
* SYSCLK = PLL1_VCO / PLL1R
*/
pllsource = (RCC->PLLCKSELR & RCC_PLLCKSELR_PLLSRC);
pllm = ((RCC->PLLCKSELR & RCC_PLLCKSELR_DIVM1) >> RCC_PLLCKSELR_DIVM1_Pos);
if ((RCC->PLLCFGR & RCC_PLLCFGR_PLL1FRACEN) != 0U) {
pllfracn = (float_t)(uint32_t)(((RCC->PLL1FRACR & RCC_PLL1FRACR_FRACN)
>> RCC_PLL1FRACR_FRACN_Pos));
} else {
pllfracn = (float_t)0U;
}
if (pllm != 0U) {
switch (pllsource) {
case 0x02: /* HSE used as PLL1 clock source */
pllvco = ((float_t)HSE_VALUE / (float_t)pllm) *
((float_t)(uint32_t)(RCC->PLL1DIVR1 & RCC_PLL1DIVR1_DIVN) +
(pllfracn/(float_t)0x2000) + (float_t)1);
break;
case 0x01: /* CSI used as PLL1 clock source */
pllvco = ((float_t)CSI_VALUE / (float_t)pllm) *
((float_t)(uint32_t)(RCC->PLL1DIVR1 & RCC_PLL1DIVR1_DIVN) +
(pllfracn/(float_t)0x2000) + (float_t)1);
break;
case 0x00: /* HSI used as PLL1 clock source */
default:
hsivalue = (HSI_VALUE >> ((RCC->CR & RCC_CR_HSIDIV) >>
RCC_CR_HSIDIV_Pos));
pllvco = ((float_t)hsivalue / (float_t)pllm) *
((float_t)(uint32_t)(RCC->PLL1DIVR1 & RCC_PLL1DIVR1_DIVN) +
(pllfracn/(float_t)0x2000) + (float_t)1);
break;
}
pllp = (((RCC->PLL1DIVR1 & RCC_PLL1DIVR1_DIVP) >>
RCC_PLL1DIVR1_DIVP_Pos) + 1U);
sysclk = (uint32_t)(float_t)(pllvco/(float_t)pllp);
} else {
sysclk = 0U;
}
break;
default: /* Unexpected, default to HSI used as system clk source (default after reset) */
sysclk = (HSI_VALUE >> ((RCC->CR & RCC_CR_HSIDIV) >> RCC_CR_HSIDIV_Pos));
break;
}
/* system clock frequency : CM7 CPU frequency */
core_presc = (RCC->CDCFGR & RCC_CDCFGR_CPRE);
if (core_presc >= 8U) {
SystemCoreClock = (sysclk >> (core_presc - RCC_CDCFGR_CPRE_3 + 1U));
} else {
SystemCoreClock = sysclk;
}
}
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
int stm32_clock_control_init(const struct device *dev)
{
int r = 0;
#if defined(CONFIG_CPU_CORTEX_M7)
uint32_t old_hclk_freq;
uint32_t new_hclk_freq;
/* HW semaphore Clock enable */
#if defined(CONFIG_SOC_STM32H7A3XX) || defined(CONFIG_SOC_STM32H7A3XXQ) || \
defined(CONFIG_SOC_STM32H7B0XX) || defined(CONFIG_SOC_STM32H7B0XXQ) || \
defined(CONFIG_SOC_STM32H7B3XX) || defined(CONFIG_SOC_STM32H7B3XXQ)
LL_AHB2_GRP1_EnableClock(LL_AHB2_GRP1_PERIPH_HSEM);
#elif !defined(CONFIG_SOC_SERIES_STM32H7RSX)
/* The stm32h7RS serie has no HSEM peripheral */
LL_AHB4_GRP1_EnableClock(LL_AHB4_GRP1_PERIPH_HSEM);
#endif
z_stm32_hsem_lock(CFG_HW_RCC_SEMID, HSEM_LOCK_DEFAULT_RETRY);
/* Configure MCO1/MCO2 based on Kconfig */
stm32_clock_control_mco_init();
/* Set up individual enabled clocks */
set_up_fixed_clock_sources();
/* Set up PLLs */
r = set_up_plls();
if (r < 0) {
return r;
}
/* Configure Voltage scale to comply with the desired system frequency */
prepare_regulator_voltage_scale();
/* Current hclk value */
old_hclk_freq = get_hclk_frequency();
/* AHB is HCLK clock to configure */
new_hclk_freq = get_bus_clock(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
STM32_HPRE);
/* Set flash latency */
/* AHB/AXI/HCLK clock is SYSCLK / HPRE */
/* If freq increases, set flash latency before any clock setting */
if (new_hclk_freq > old_hclk_freq) {
LL_SetFlashLatency(new_hclk_freq);
}
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
/*
* The default Flash latency is 3 WS which is not enough,
* set higher and correct later if needed
*/
LL_FLASH_SetLatency(LL_FLASH_LATENCY_6);
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
/* Preset the prescalers prior to choosing SYSCLK */
/* Prevents APB clock to go over limits */
/* Set buses (Sys,AHB, APB1, APB2 & APB4) prescalers */
LL_RCC_SetSysPrescaler(sysclk_prescaler(STM32_D1CPRE));
LL_RCC_SetAHBPrescaler(ahb_prescaler(STM32_HPRE));
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
LL_RCC_SetAPB1Prescaler(apb1_prescaler(STM32_PPRE1));
LL_RCC_SetAPB2Prescaler(apb2_prescaler(STM32_PPRE2));
LL_RCC_SetAPB4Prescaler(apb4_prescaler(STM32_PPRE4));
LL_RCC_SetAPB5Prescaler(apb5_prescaler(STM32_PPRE5));
#else
LL_RCC_SetAPB1Prescaler(apb1_prescaler(STM32_D2PPRE1));
LL_RCC_SetAPB2Prescaler(apb2_prescaler(STM32_D2PPRE2));
LL_RCC_SetAPB3Prescaler(apb3_prescaler(STM32_D1PPRE));
LL_RCC_SetAPB4Prescaler(apb4_prescaler(STM32_D3PPRE));
#endif
/* Set up sys clock */
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
/* Set PLL1 as System Clock Source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_PLL1);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_PLL1) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSE)) {
/* Set sysclk source to HSE */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSE);
while (LL_RCC_GetSysClkSource() !=
LL_RCC_SYS_CLKSOURCE_STATUS_HSE) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSI)) {
/* Set sysclk source to HSI */
stm32_clock_switch_to_hsi();
} else if (IS_ENABLED(STM32_SYSCLK_SRC_CSI)) {
/* Set sysclk source to CSI */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_CSI);
while (LL_RCC_GetSysClkSource() !=
LL_RCC_SYS_CLKSOURCE_STATUS_CSI) {
}
} else {
return -ENOTSUP;
}
/* Set FLASH latency */
/* AHB/AXI/HCLK clock is SYSCLK / HPRE */
/* If freq not increased, set flash latency after all clock setting */
if (new_hclk_freq <= old_hclk_freq) {
LL_SetFlashLatency(new_hclk_freq);
}
optimize_regulator_voltage_scale(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
z_stm32_hsem_unlock(CFG_HW_RCC_SEMID);
#endif /* CONFIG_CPU_CORTEX_M7 */
ARG_UNUSED(dev);
/* Update CMSIS variable */
SystemCoreClock = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
return r;
}
#if defined(STM32_HSE_CSS)
void __weak stm32_hse_css_callback(void) {}
/* Called by the HAL in response to an HSE CSS interrupt */
void HAL_RCC_CSSCallback(void)
{
stm32_hse_css_callback();
}
#endif
/**
* @brief RCC device, note that priority is intentionally set to 1 so
* that the device init runs just after SOC init
*/
DEVICE_DT_DEFINE(DT_NODELABEL(rcc),
stm32_clock_control_init,
NULL,
NULL, NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&stm32_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_h7.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,737 |
```c
/*
*
* Based on clock_control_mcux_sim.c, which is:
*
*/
#define DT_DRV_COMPAT nxp_kinetis_scg
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/kinetis_scg.h>
#include <soc.h>
#include <fsl_clock.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control_scg);
#define MCUX_SCG_CLOCK_NODE(name) DT_INST_CHILD(0, name)
static int mcux_scg_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
return 0;
}
static int mcux_scg_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
return 0;
}
static int mcux_scg_get_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
clock_name_t clock_name;
switch ((uint32_t) sub_system) {
case KINETIS_SCG_CORESYS_CLK:
clock_name = kCLOCK_CoreSysClk;
break;
case KINETIS_SCG_BUS_CLK:
clock_name = kCLOCK_BusClk;
break;
#if !(defined(CONFIG_SOC_MKE17Z7) || defined(CONFIG_SOC_MKE17Z9))
case KINETIS_SCG_FLEXBUS_CLK:
clock_name = kCLOCK_FlexBusClk;
break;
#endif
case KINETIS_SCG_FLASH_CLK:
clock_name = kCLOCK_FlashClk;
break;
case KINETIS_SCG_SOSC_CLK:
clock_name = kCLOCK_ScgSysOscClk;
break;
case KINETIS_SCG_SIRC_CLK:
clock_name = kCLOCK_ScgSircClk;
break;
case KINETIS_SCG_FIRC_CLK:
clock_name = kCLOCK_ScgFircClk;
break;
#if (defined(FSL_FEATURE_SCG_HAS_SPLL) && FSL_FEATURE_SCG_HAS_SPLL)
case KINETIS_SCG_SPLL_CLK:
clock_name = kCLOCK_ScgSysPllClk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_SPLL) && FSL_FEATURE_SCG_HAS_SPLL) */
#if (defined(FSL_FEATURE_SCG_HAS_LPFLL) && FSL_FEATURE_SCG_HAS_LPFLL)
case KINETIS_SCG_SPLL_CLK:
clock_name = kCLOCK_ScgLpFllClk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_LPFLL) && FSL_FEATURE_SCG_HAS_LPFLL) */
#if (defined(FSL_FEATURE_SCG_HAS_SOSCDIV1) && FSL_FEATURE_SCG_HAS_SOSCDIV1)
case KINETIS_SCG_SOSC_ASYNC_DIV1_CLK:
clock_name = kCLOCK_ScgSysOscAsyncDiv1Clk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_SOSCDIV1) && FSL_FEATURE_SCG_HAS_SOSCDIV1) */
case KINETIS_SCG_SOSC_ASYNC_DIV2_CLK:
clock_name = kCLOCK_ScgSysOscAsyncDiv2Clk;
break;
#if (defined(FSL_FEATURE_SCG_HAS_SIRCDIV1) && FSL_FEATURE_SCG_HAS_SIRCDIV1)
case KINETIS_SCG_SIRC_ASYNC_DIV1_CLK:
clock_name = kCLOCK_ScgSircAsyncDiv1Clk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_SIRCDIV1) && FSL_FEATURE_SCG_HAS_SIRCDIV1) */
case KINETIS_SCG_SIRC_ASYNC_DIV2_CLK:
clock_name = kCLOCK_ScgSircAsyncDiv2Clk;
break;
#if (defined(FSL_FEATURE_FSL_FEATURE_SCG_HAS_FIRCDIV1) && FSL_FEATURE_SCG_HAS_FIRCDIV1)
case KINETIS_SCG_FIRC_ASYNC_DIV1_CLK:
clock_name = kCLOCK_ScgFircAsyncDiv1Clk;
break;
#endif /* (defined(FSL_FEATURE_FSL_FEATURE_SCG_HAS_FIRCDIV1) && FSL_FEATURE_SCG_HAS_FIRCDIV1) */
case KINETIS_SCG_FIRC_ASYNC_DIV2_CLK:
clock_name = kCLOCK_ScgFircAsyncDiv2Clk;
break;
#if (defined(FSL_FEATURE_SCG_HAS_SPLLDIV1) && FSL_FEATURE_SCG_HAS_SPLLDIV1)
case KINETIS_SCG_SPLL_ASYNC_DIV1_CLK:
clock_name = kCLOCK_ScgSysPllAsyncDiv1Clk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_SPLLDIV1) && FSL_FEATURE_SCG_HAS_SPLLDIV1) */
#if (defined(FSL_FEATURE_SCG_HAS_SPLL) && FSL_FEATURE_SCG_HAS_SPLL)
case KINETIS_SCG_SPLL_ASYNC_DIV2_CLK:
clock_name = kCLOCK_ScgSysPllAsyncDiv2Clk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_SPLL) && FSL_FEATURE_SCG_HAS_SPLL) */
#if (defined(FSL_FEATURE_SCG_HAS_FLLDIV1) && FSL_FEATURE_SCG_HAS_FLLDIV1)
case KINETIS_SCG_LPFLL_ASYNC_DIV2_CLK:
clock_name = kCLOCK_ScgSysLPFllAsyncDiv2Clk;
break;
#endif /* (defined(FSL_FEATURE_SCG_HAS_FLLDIV1) && FSL_FEATURE_SCG_HAS_FLLDIV1) */
default:
LOG_ERR("Unsupported clock name");
return -EINVAL;
}
*rate = CLOCK_GetFreq(clock_name);
return 0;
}
static int mcux_scg_init(const struct device *dev)
{
#if DT_NODE_HAS_STATUS(MCUX_SCG_CLOCK_NODE(clkout_clk), okay)
#if DT_SAME_NODE(DT_CLOCKS_CTLR(MCUX_SCG_CLOCK_NODE(clkout_clk)), MCUX_SCG_CLOCK_NODE(slow_clk))
CLOCK_SetClkOutSel(kClockClkoutSelScgSlow);
#elif DT_SAME_NODE(DT_CLOCKS_CTLR(MCUX_SCG_CLOCK_NODE(clkout_clk)), MCUX_SCG_CLOCK_NODE(sosc_clk))
CLOCK_SetClkOutSel(kClockClkoutSelSysOsc);
#elif DT_SAME_NODE(DT_CLOCKS_CTLR(MCUX_SCG_CLOCK_NODE(clkout_clk)), MCUX_SCG_CLOCK_NODE(sirc_clk))
CLOCK_SetClkOutSel(kClockClkoutSelSirc);
#elif DT_SAME_NODE(DT_CLOCKS_CTLR(MCUX_SCG_CLOCK_NODE(clkout_clk)), MCUX_SCG_CLOCK_NODE(firc_clk))
CLOCK_SetClkOutSel(kClockClkoutSelFirc);
#elif DT_SAME_NODE(DT_CLOCKS_CTLR(MCUX_SCG_CLOCK_NODE(clkout_clk)), MCUX_SCG_CLOCK_NODE(spll_clk))
CLOCK_SetClkOutSel(kClockClkoutSelSysPll);
#else
#error Unsupported SCG clkout clock source
#endif
#endif /* DT_NODE_HAS_STATUS(MCUX_SCG_CLOCK_NODE(clkout_clk), okay) */
return 0;
}
static const struct clock_control_driver_api mcux_scg_driver_api = {
.on = mcux_scg_on,
.off = mcux_scg_off,
.get_rate = mcux_scg_get_rate,
};
DEVICE_DT_INST_DEFINE(0,
mcux_scg_init,
NULL,
NULL, NULL,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&mcux_scg_driver_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_mcux_scg.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,658 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_common.h"
#if defined(STM32_PLL_ENABLED)
/**
* @brief Set up pll configuration
*/
__unused
void config_pll_sysclock(void)
{
uint32_t pll_source, pll_mul, pll_div;
/*
* PLL MUL
* 2 -> LL_RCC_PLL_MUL_2 -> 0x00000000
* 3 -> LL_RCC_PLL_MUL_3 -> 0x00040000
* 4 -> LL_RCC_PLL_MUL_4 -> 0x00080000
* ...
* 16 -> LL_RCC_PLL_MUL_16 -> 0x00380000
*/
pll_mul = ((STM32_PLL_MULTIPLIER - 2) << RCC_CFGR_PLLMUL_Pos);
/*
* PLL PREDIV
* 1 -> LL_RCC_PREDIV_DIV_1 -> 0x00000000
* 2 -> LL_RCC_PREDIV_DIV_2 -> 0x00000001
* 3 -> LL_RCC_PREDIV_DIV_3 -> 0x00000002
* ...
* 16 -> LL_RCC_PREDIV_DIV_16 -> 0x0000000F
*/
pll_div = STM32_PLL_PREDIV - 1;
#if defined(RCC_PLLSRC_PREDIV1_SUPPORT)
/*
* PREDIV1 support is a specific RCC configuration present on
* following SoCs: STM32F04xx, STM32F07xx, STM32F09xx,
* STM32F030xC, STM32F302xE, STM32F303xE and STM32F39xx
* cf Reference manual for more details
*/
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
pll_source = LL_RCC_PLLSOURCE_HSE;
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
pll_source = LL_RCC_PLLSOURCE_HSI;
} else {
__ASSERT(0, "Invalid source");
}
LL_RCC_PLL_ConfigDomain_SYS(pll_source, pll_mul, pll_div);
#else
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
pll_source = LL_RCC_PLLSOURCE_HSE | pll_div;
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
pll_source = LL_RCC_PLLSOURCE_HSI_DIV_2;
} else {
__ASSERT(0, "Invalid source");
}
LL_RCC_PLL_ConfigDomain_SYS(pll_source, pll_mul);
#endif /* RCC_PLLSRC_PREDIV1_SUPPORT */
}
/**
* @brief Return pllout frequency
*/
__unused
uint32_t get_pllout_frequency(void)
{
uint32_t pll_input_freq, pll_mul, pll_div;
/*
* PLL MUL
* 2 -> LL_RCC_PLL_MUL_2 -> 0x00000000
* 3 -> LL_RCC_PLL_MUL_3 -> 0x00040000
* 4 -> LL_RCC_PLL_MUL_4 -> 0x00080000
* ...
* 16 -> LL_RCC_PLL_MUL_16 -> 0x00380000
*/
pll_mul = ((STM32_PLL_MULTIPLIER - 2) << RCC_CFGR_PLLMUL_Pos);
/*
* PLL PREDIV
* 1 -> LL_RCC_PREDIV_DIV_1 -> 0x00000000
* 2 -> LL_RCC_PREDIV_DIV_2 -> 0x00000001
* 3 -> LL_RCC_PREDIV_DIV_3 -> 0x00000002
* ...
* 16 -> LL_RCC_PREDIV_DIV_16 -> 0x0000000F
*/
pll_div = STM32_PLL_PREDIV - 1;
#if defined(RCC_PLLSRC_PREDIV1_SUPPORT)
/*
* PREDIV1 support is a specific RCC configuration present on
* following SoCs: STM32F04xx, STM32F07xx, STM32F09xx,
* STM32F030xC, STM32F302xE, STM32F303xE and STM32F39xx
* cf Reference manual for more details
*/
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
pll_input_freq = STM32_HSE_FREQ;
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
pll_input_freq = STM32_HSI_FREQ;
} else {
return 0;
}
return __LL_RCC_CALC_PLLCLK_FREQ(pll_input_freq, pll_mul, pll_div);
#else
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
pll_input_freq = STM32_HSE_FREQ;
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
pll_input_freq = STM32_HSI_FREQ / 2;
} else {
return 0;
}
return __LL_RCC_CALC_PLLCLK_FREQ(pll_input_freq, pll_mul);
#endif /* RCC_PLLSRC_PREDIV1_SUPPORT */
}
#endif /* defined(STM32_PLL_ENABLED) */
/**
* @brief Activate default clocks
*/
void config_enable_default_clocks(void)
{
/* Enable PWR clock, required to access BDCR and PWR_CR */
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
#ifndef CONFIG_SOC_SERIES_STM32F3X
#if defined(CONFIG_EXTI_STM32) || defined(CONFIG_USB_DC_STM32)
/* Enable System Configuration Controller clock. */
LL_APB1_GRP2_EnableClock(LL_APB1_GRP2_PERIPH_SYSCFG);
#endif
#else
#if defined(CONFIG_USB_DC_STM32) && defined(SYSCFG_CFGR1_USB_IT_RMP)
/* Enable System Configuration Controller clock. */
/* SYSCFG is required to remap IRQ to avoid conflicts with CAN */
/* cf 14.1.3, RM0316 */
LL_APB2_GRP1_EnableClock(LL_APB2_GRP1_PERIPH_SYSCFG);
#endif
#endif /* !CONFIG_SOC_SERIES_STM32F3X */
}
``` | /content/code_sandbox/drivers/clock_control/clock_stm32f0_f3.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,413 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_CLOCK_CONTROL_CLOCK_CONTROL_LPC11U6X_H_
#define ZEPHYR_DRIVERS_CLOCK_CONTROL_CLOCK_CONTROL_LPC11U6X_H_
#include <zephyr/drivers/pinctrl.h>
#define LPC11U6X_SYS_AHB_CLK_CTRL_I2C0 (1 << 5)
#define LPC11U6X_SYS_AHB_CLK_CTRL_GPIO (1 << 6)
#define LPC11U6X_SYS_AHB_CLK_CTRL_USART0 (1 << 12)
#define LPC11U6X_SYS_AHB_CLK_CTRL_USB (1 << 14)
#define LPC11U6X_SYS_AHB_CLK_CTRL_IOCON (1 << 16)
#define LPC11U6X_SYS_AHB_CLK_CTRL_PINT (1 << 19)
#define LPC11U6X_SYS_AHB_CLK_CTRL_USART1 (1 << 20)
#define LPC11U6X_SYS_AHB_CLK_CTRL_USART2 (1 << 21)
#define LPC11U6X_SYS_AHB_CLK_CTRL_USART3_4 (1 << 22)
#define LPC11U6X_SYS_AHB_CLK_CTRL_I2C1 (1 << 25)
#define LPC11U6X_SYS_AHB_CLK_CTRL_SRAM1 (1 << 26)
#define LPC11U6X_SYS_AHB_CLK_CTRL_USB_SRAM (1 << 27)
#define LPC11U6X_PDRUNCFG_IRC_PD (1 << 1)
#define LPC11U6X_PDRUNCFG_SYSOSC_PD (1 << 5)
#define LPC11U6X_PDRUNCFG_PLL_PD (1 << 7)
#define LPC11U6X_PDRUNCFG_MASK 0xC800
#define LPC11U6X_SYS_PLL_CLK_SEL_IRC 0x0
#define LPC11U6X_SYS_PLL_CLK_SEL_SYSOSC 0x1
#define LPC11U6X_FLASH_TIMING_REG 0x4003C010
#define LPC11U6X_FLASH_TIMING_3CYCLES 0x2
#define LPC11U6X_FLASH_TIMING_MASK 0x3
#define LPC11U6X_SYS_PLL_CTRL_MSEL_MASK 0x1F
#define LPC11U6X_SYS_PLL_CTRL_PSEL_SHIFT 5
#define LPC11U6X_SYS_PLL_CTRL_PSEL_MASK 0x3
#define LPC11U6X_MAIN_CLK_SRC_PLLOUT 0x3
#define LPC11U6X_PRESET_CTRL_I2C0 (1 << 1)
#define LPC11U6X_PRESET_CTRL_I2C1 (1 << 3)
#define LPC11U6X_PRESET_CTRL_FRG (1 << 4)
#define LPC11U6X_PRESET_CTRL_USART1 (1 << 5)
#define LPC11U6X_PRESET_CTRL_USART2 (1 << 6)
#define LPC11U6X_PRESET_CTRL_USART3 (1 << 7)
#define LPC11U6X_PRESET_CTRL_USART4 (1 << 8)
#define LPC11U6X_USART_CLOCK_RATE 14745600
struct lpc11u6x_syscon_regs {
volatile uint32_t sys_mem_remap; /* System memory remap */
volatile uint32_t p_reset_ctrl; /* Peripheral reset control */
volatile uint32_t sys_pll_ctrl; /* System PLL control */
volatile const uint32_t sys_pll_stat; /* System PLL status */
volatile uint32_t usb_pll_ctrl; /* USB PLL control */
volatile const uint32_t usb_pll_stat; /* USB PLL status */
volatile const uint32_t reserved1;
volatile uint32_t rtc_osc_ctrl; /* RTC oscillator control */
volatile uint32_t sys_osc_ctrl; /* System oscillator control */
volatile uint32_t wdt_osc_ctrl; /* Watchdog oscillator
* control
*/
volatile uint32_t irc_ctrl; /* IRC Control */
volatile const uint32_t reserved2;
volatile uint32_t sys_rst_stat; /* System reset status */
volatile const uint32_t reserved3[3];
volatile uint32_t sys_pll_clk_sel; /* System PLL clock source */
volatile uint32_t sys_pll_clk_uen; /* System PLL source update */
volatile uint32_t usb_pll_clk_sel; /* USB PLL clock source */
volatile uint32_t usb_pll_clk_uen; /* USB PLL clock source
* update
*/
volatile const uint32_t reserved4[8];
volatile uint32_t main_clk_sel; /* Main clock select */
volatile uint32_t main_clk_uen; /* Main clock update */
volatile uint32_t sys_ahb_clk_div; /* System clock divider */
volatile const uint32_t reserved5;
volatile uint32_t sys_ahb_clk_ctrl; /* System clock control */
volatile const uint32_t reserved6[4];
volatile uint32_t ssp0_clk_div; /* SSP0 clock divider */
volatile uint32_t usart0_clk_div; /* USART0 clock divider */
volatile uint32_t ssp1_clk_div; /* SSP1 clock divider */
volatile uint32_t frg_clk_div; /* USART 1-4 fractional baud
* rate generator clock divider
*/
volatile const uint32_t reserved7[7];
volatile uint32_t usb_clk_sel; /* USB clock select */
volatile uint32_t usb_clk_uen; /* USB clock update */
volatile uint32_t usb_clk_div; /* USB clock divider */
volatile const uint32_t reserved8[5];
volatile uint32_t clk_out_sel; /* CLKOUT source select */
volatile uint32_t clk_out_uen; /* CLKOUT source update */
volatile uint32_t clk_out_div; /* CLKOUT divider */
volatile const uint32_t reserved9;
volatile uint32_t uart_frg_div; /* USART1-4 fractional
* generator divider
*/
volatile uint32_t uart_frg_mult; /* USART1-4 fractional
* generator multiplier
*/
volatile const uint32_t reserved10;
volatile uint32_t ext_trace_cmd; /* External trace buffer
* command
*/
volatile const uint32_t pio_por_cap[3]; /* CLKOUT source select */
volatile const uint32_t reserved11[10];
volatile uint32_t iocon_clk_div[7]; /* IOCON clock divider */
volatile uint32_t bod_ctrl; /* Brown-out detect control */
volatile uint32_t sys_tck_cal; /* System tick calibration */
volatile const uint32_t reserved12[6];
volatile uint32_t irq_latency; /* IRQ latency */
volatile uint32_t nmi_src; /* NMI source control */
volatile uint32_t pint_sel[8]; /* GPIO pin interrupt select */
volatile uint32_t usb_clk_ctrl; /* USB clock control */
volatile const uint32_t usb_clk_stat; /* USB clock status */
volatile uint32_t reserved13[25];
volatile uint32_t starterp0; /* Start logic 0 int wake-up */
volatile const uint32_t reserved14[3];
volatile uint32_t starterp1; /* Start logic 1 int wake-up */
volatile const uint32_t reserved15[6];
volatile uint32_t pd_sleep_cfg; /* Deep-sleep power-down
* states
*/
volatile uint32_t pd_awake_cfg; /* Power-down states for
* wake-up from deep-sleep
*/
volatile uint32_t pd_run_cfg; /* Power configuration */
volatile const uint32_t reserved16[110];
volatile const uint32_t device_id; /* Device identifier */
};
struct lpc11u6x_syscon_config {
struct lpc11u6x_syscon_regs *syscon;
const struct pinctrl_dev_config *pincfg;
};
struct lpc11u6x_syscon_data {
struct k_mutex mutex;
uint8_t frg_in_use;
uint8_t usart34_in_use;
};
#endif /* ZEPHYR_DRIVERS_CLOCK_CONTROL_CLOCK_CONTROL_LPC11U6X_H_ */
``` | /content/code_sandbox/drivers/clock_control/clock_control_lpc11u6x.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,709 |
```c
/*
*
*
*/
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/intel_socfpga_clock.h>
#include <zephyr/logging/log.h>
#include "clock_control_agilex5_ll.h"
#define DT_DRV_COMPAT intel_agilex5_clock
LOG_MODULE_REGISTER(clock_control_agilex5, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
struct clock_control_config {
DEVICE_MMIO_ROM;
};
struct clock_control_data {
DEVICE_MMIO_RAM;
};
static int clock_init(const struct device *dev)
{
DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
/* Initialize the low layer clock driver */
clock_agilex5_ll_init(DEVICE_MMIO_GET(dev));
LOG_INF("Intel Agilex5 clock driver initialized!");
return 0;
}
static int clock_get_rate(const struct device *dev, clock_control_subsys_t sub_system,
uint32_t *rate)
{
ARG_UNUSED(dev);
switch ((intptr_t)sub_system) {
case INTEL_SOCFPGA_CLOCK_MPU:
*rate = get_mpu_clk();
break;
case INTEL_SOCFPGA_CLOCK_WDT:
*rate = get_wdt_clk();
break;
case INTEL_SOCFPGA_CLOCK_UART:
*rate = get_uart_clk();
break;
case INTEL_SOCFPGA_CLOCK_MMC:
*rate = get_mmc_clk();
break;
case INTEL_SOCFPGA_CLOCK_TIMER:
*rate = get_timer_clk();
break;
default:
LOG_ERR("Clock ID %ld is not supported\n", (intptr_t)sub_system);
return -ENOTSUP;
}
return 0;
}
static const struct clock_control_driver_api clock_api = {.get_rate = clock_get_rate};
#define CLOCK_CONTROL_DEVICE(_inst) \
\
static struct clock_control_data clock_control_data_##_inst; \
\
static const struct clock_control_config clock_control_config_##_inst = { \
DEVICE_MMIO_ROM_INIT(DT_DRV_INST(_inst)), \
}; \
\
DEVICE_DT_INST_DEFINE(_inst, clock_init, NULL, &clock_control_data_##_inst, \
&clock_control_config_##_inst, PRE_KERNEL_1, \
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, &clock_api);
DT_INST_FOREACH_STATUS_OKAY(CLOCK_CONTROL_DEVICE)
``` | /content/code_sandbox/drivers/clock_control/clock_control_agilex5.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 504 |
```c
/*
*
*/
#include <soc.h>
#include <zephyr/sys/onoff.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/nrf_clock_control.h>
#include "nrf_clock_calibration.h"
#include <nrfx_clock.h>
#include <zephyr/logging/log.h>
#include <zephyr/shell/shell.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
#define DT_DRV_COMPAT nordic_nrf_clock
#define CTX_ONOFF BIT(6)
#define CTX_API BIT(7)
#define CTX_MASK (CTX_ONOFF | CTX_API)
#define STATUS_MASK 0x7
#define GET_STATUS(flags) (flags & STATUS_MASK)
#define GET_CTX(flags) (flags & CTX_MASK)
/* Used only by HF clock */
#define HF_USER_BT BIT(0)
#define HF_USER_GENERIC BIT(1)
/* Helper logging macros which prepends subsys name to the log. */
#ifdef CONFIG_LOG
#define CLOCK_LOG(lvl, dev, subsys, ...) \
LOG_##lvl("%s: " GET_ARG_N(1, __VA_ARGS__), \
get_sub_config(dev, (enum clock_control_nrf_type)subsys)->name \
COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
(), (, GET_ARGS_LESS_N(1, __VA_ARGS__))))
#else
#define CLOCK_LOG(...)
#endif
#define ERR(dev, subsys, ...) CLOCK_LOG(ERR, dev, subsys, __VA_ARGS__)
#define WRN(dev, subsys, ...) CLOCK_LOG(WRN, dev, subsys, __VA_ARGS__)
#define INF(dev, subsys, ...) CLOCK_LOG(INF, dev, subsys, __VA_ARGS__)
#define DBG(dev, subsys, ...) CLOCK_LOG(DBG, dev, subsys, __VA_ARGS__)
/* Clock subsys structure */
struct nrf_clock_control_sub_data {
clock_control_cb_t cb;
void *user_data;
uint32_t flags;
};
typedef void (*clk_ctrl_func_t)(void);
/* Clock subsys static configuration */
struct nrf_clock_control_sub_config {
clk_ctrl_func_t start; /* Clock start function */
clk_ctrl_func_t stop; /* Clock stop function */
#ifdef CONFIG_LOG
const char *name;
#endif
};
struct nrf_clock_control_data {
struct onoff_manager mgr[CLOCK_CONTROL_NRF_TYPE_COUNT];
struct nrf_clock_control_sub_data subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
};
struct nrf_clock_control_config {
struct nrf_clock_control_sub_config
subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
};
static atomic_t hfclk_users;
static uint64_t hf_start_tstamp;
static uint64_t hf_stop_tstamp;
static struct nrf_clock_control_sub_data *get_sub_data(const struct device *dev,
enum clock_control_nrf_type type)
{
struct nrf_clock_control_data *data = dev->data;
return &data->subsys[type];
}
static const struct nrf_clock_control_sub_config *get_sub_config(const struct device *dev,
enum clock_control_nrf_type type)
{
const struct nrf_clock_control_config *config =
dev->config;
return &config->subsys[type];
}
static struct onoff_manager *get_onoff_manager(const struct device *dev,
enum clock_control_nrf_type type)
{
struct nrf_clock_control_data *data = dev->data;
return &data->mgr[type];
}
#define CLOCK_DEVICE DEVICE_DT_GET(DT_NODELABEL(clock))
struct onoff_manager *z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)
{
return get_onoff_manager(CLOCK_DEVICE,
(enum clock_control_nrf_type)(size_t)sys);
}
static enum clock_control_status get_status(const struct device *dev,
clock_control_subsys_t subsys)
{
enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
return GET_STATUS(get_sub_data(dev, type)->flags);
}
static int set_off_state(uint32_t *flags, uint32_t ctx)
{
int err = 0;
unsigned int key = irq_lock();
uint32_t current_ctx = GET_CTX(*flags);
if ((current_ctx != 0) && (current_ctx != ctx)) {
err = -EPERM;
} else {
*flags = CLOCK_CONTROL_STATUS_OFF;
}
irq_unlock(key);
return err;
}
static int set_starting_state(uint32_t *flags, uint32_t ctx)
{
int err = 0;
unsigned int key = irq_lock();
uint32_t current_ctx = GET_CTX(*flags);
if ((*flags & (STATUS_MASK)) == CLOCK_CONTROL_STATUS_OFF) {
*flags = CLOCK_CONTROL_STATUS_STARTING | ctx;
} else if (current_ctx != ctx) {
err = -EPERM;
} else {
err = -EALREADY;
}
irq_unlock(key);
return err;
}
static void set_on_state(uint32_t *flags)
{
unsigned int key = irq_lock();
*flags = CLOCK_CONTROL_STATUS_ON | GET_CTX(*flags);
irq_unlock(key);
}
static void clkstarted_handle(const struct device *dev,
enum clock_control_nrf_type type)
{
struct nrf_clock_control_sub_data *sub_data = get_sub_data(dev, type);
clock_control_cb_t callback = sub_data->cb;
void *user_data = sub_data->user_data;
sub_data->cb = NULL;
set_on_state(&sub_data->flags);
DBG(dev, type, "Clock started");
if (callback) {
callback(dev, (clock_control_subsys_t)type, user_data);
}
}
static inline void anomaly_132_workaround(void)
{
#if (CONFIG_NRF52_ANOMALY_132_DELAY_US - 0)
static bool once;
if (!once) {
k_busy_wait(CONFIG_NRF52_ANOMALY_132_DELAY_US);
once = true;
}
#endif
}
static void lfclk_start(void)
{
if (IS_ENABLED(CONFIG_NRF52_ANOMALY_132_WORKAROUND)) {
anomaly_132_workaround();
}
nrfx_clock_lfclk_start();
}
static void lfclk_stop(void)
{
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
z_nrf_clock_calibration_lfclk_stopped();
}
nrfx_clock_lfclk_stop();
}
static void hfclk_start(void)
{
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
hf_start_tstamp = k_uptime_get();
}
nrfx_clock_hfclk_start();
}
static void hfclk_stop(void)
{
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
hf_stop_tstamp = k_uptime_get();
}
nrfx_clock_hfclk_stop();
}
#if NRF_CLOCK_HAS_HFCLK192M
static void hfclk192m_start(void)
{
nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK192M);
}
static void hfclk192m_stop(void)
{
nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK192M);
}
#endif
#if NRF_CLOCK_HAS_HFCLKAUDIO
static void hfclkaudio_start(void)
{
nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
}
static void hfclkaudio_stop(void)
{
nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
}
#endif
static uint32_t *get_hf_flags(void)
{
struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
return &data->subsys[CLOCK_CONTROL_NRF_TYPE_HFCLK].flags;
}
static void generic_hfclk_start(void)
{
nrf_clock_hfclk_t type;
bool already_started = false;
unsigned int key = irq_lock();
hfclk_users |= HF_USER_GENERIC;
if (hfclk_users & HF_USER_BT) {
(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, &type);
if (type == NRF_CLOCK_HFCLK_HIGH_ACCURACY) {
already_started = true;
/* Set on state in case clock interrupt comes and we
* want to avoid handling that.
*/
set_on_state(get_hf_flags());
}
}
irq_unlock(key);
if (already_started) {
/* Clock already started by z_nrf_clock_bt_ctlr_hf_request */
clkstarted_handle(CLOCK_DEVICE,
CLOCK_CONTROL_NRF_TYPE_HFCLK);
return;
}
hfclk_start();
}
static void generic_hfclk_stop(void)
{
/* It's not enough to use only atomic_and() here for synchronization,
* as the thread could be preempted right after that function but
* before hfclk_stop() is called and the preempting code could request
* the HFCLK again. Then, the HFCLK would be stopped inappropriately
* and hfclk_user would be left with an incorrect value.
*/
unsigned int key = irq_lock();
hfclk_users &= ~HF_USER_GENERIC;
/* Skip stopping if BT is still requesting the clock. */
if (!(hfclk_users & HF_USER_BT)) {
hfclk_stop();
}
irq_unlock(key);
}
void z_nrf_clock_bt_ctlr_hf_request(void)
{
if (atomic_or(&hfclk_users, HF_USER_BT) & HF_USER_GENERIC) {
/* generic request already activated clock. */
return;
}
hfclk_start();
}
void z_nrf_clock_bt_ctlr_hf_release(void)
{
/* It's not enough to use only atomic_and() here for synchronization,
* see the explanation in generic_hfclk_stop().
*/
unsigned int key = irq_lock();
hfclk_users &= ~HF_USER_BT;
/* Skip stopping if generic is still requesting the clock. */
if (!(hfclk_users & HF_USER_GENERIC)) {
hfclk_stop();
}
irq_unlock(key);
}
static int stop(const struct device *dev, clock_control_subsys_t subsys,
uint32_t ctx)
{
enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
int err;
__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
err = set_off_state(&subdata->flags, ctx);
if (err < 0) {
return err;
}
get_sub_config(dev, type)->stop();
return 0;
}
static int api_stop(const struct device *dev, clock_control_subsys_t subsys)
{
return stop(dev, subsys, CTX_API);
}
static int async_start(const struct device *dev, clock_control_subsys_t subsys,
clock_control_cb_t cb, void *user_data, uint32_t ctx)
{
enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
int err;
err = set_starting_state(&subdata->flags, ctx);
if (err < 0) {
return err;
}
subdata->cb = cb;
subdata->user_data = user_data;
get_sub_config(dev, type)->start();
return 0;
}
static int api_start(const struct device *dev, clock_control_subsys_t subsys,
clock_control_cb_t cb, void *user_data)
{
return async_start(dev, subsys, cb, user_data, CTX_API);
}
static void blocking_start_callback(const struct device *dev,
clock_control_subsys_t subsys,
void *user_data)
{
struct k_sem *sem = user_data;
k_sem_give(sem);
}
static int api_blocking_start(const struct device *dev,
clock_control_subsys_t subsys)
{
struct k_sem sem = Z_SEM_INITIALIZER(sem, 0, 1);
int err;
if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
return -ENOTSUP;
}
err = api_start(dev, subsys, blocking_start_callback, &sem);
if (err < 0) {
return err;
}
return k_sem_take(&sem, K_MSEC(500));
}
static clock_control_subsys_t get_subsys(struct onoff_manager *mgr)
{
struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
size_t offset = (size_t)(mgr - data->mgr);
return (clock_control_subsys_t)offset;
}
static void onoff_stop(struct onoff_manager *mgr,
onoff_notify_fn notify)
{
int res;
res = stop(CLOCK_DEVICE, get_subsys(mgr), CTX_ONOFF);
notify(mgr, res);
}
static void onoff_started_callback(const struct device *dev,
clock_control_subsys_t sys,
void *user_data)
{
enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)sys;
struct onoff_manager *mgr = get_onoff_manager(dev, type);
onoff_notify_fn notify = user_data;
notify(mgr, 0);
}
static void onoff_start(struct onoff_manager *mgr,
onoff_notify_fn notify)
{
int err;
err = async_start(CLOCK_DEVICE, get_subsys(mgr),
onoff_started_callback, notify, CTX_ONOFF);
if (err < 0) {
notify(mgr, err);
}
}
/** @brief Wait for LF clock availability or stability.
*
* If LF clock source is SYNTH or RC then there is no distinction between
* availability and stability. In case of XTAL source clock, system is initially
* starting RC and then seamlessly switches to XTAL. Running RC means clock
* availability and running target source means stability, That is because
* significant difference in startup time (<1ms vs >200ms).
*
* In order to get event/interrupt when RC is ready (allowing CPU sleeping) two
* stage startup sequence is used. Initially, LF source is set to RC and when
* LFSTARTED event is handled it is reconfigured to the target source clock.
* This approach is implemented in nrfx_clock driver and utilized here.
*
* @param mode Start mode.
*/
static void lfclk_spinwait(enum nrf_lfclk_start_mode mode)
{
static const nrf_clock_domain_t d = NRF_CLOCK_DOMAIN_LFCLK;
static const nrf_clock_lfclk_t target_type =
/* For sources XTAL, EXT_LOW_SWING, and EXT_FULL_SWING,
* NRF_CLOCK_LFCLK_XTAL is returned as the type of running clock.
*/
(IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_XTAL) ||
IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_LOW_SWING) ||
IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING))
? NRF_CLOCK_LFCLK_XTAL
: CLOCK_CONTROL_NRF_K32SRC;
nrf_clock_lfclk_t type;
if ((mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE) &&
(target_type == NRF_CLOCK_LFCLK_XTAL) &&
(nrf_clock_lf_srccopy_get(NRF_CLOCK) == CLOCK_CONTROL_NRF_K32SRC)) {
/* If target clock source is using XTAL then due to two-stage
* clock startup sequence, RC might already be running.
* It can be determined by checking current LFCLK source. If it
* is set to the target clock source then it means that RC was
* started.
*/
return;
}
bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
int key = isr_mode ? irq_lock() : 0;
if (!isr_mode) {
nrf_clock_int_disable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
}
while (!(nrfx_clock_is_running(d, (void *)&type)
&& ((type == target_type)
|| (mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE)))) {
/* Synth source start is almost instant and LFCLKSTARTED may
* happen before calling idle. That would lead to deadlock.
*/
if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH)) {
if (isr_mode || !IS_ENABLED(CONFIG_MULTITHREADING)) {
k_cpu_atomic_idle(key);
} else {
k_msleep(1);
}
}
/* Clock interrupt is locked, LFCLKSTARTED is handled here. */
if ((target_type == NRF_CLOCK_LFCLK_XTAL)
&& (nrf_clock_lf_src_get(NRF_CLOCK) == NRF_CLOCK_LFCLK_RC)
&& nrf_clock_event_check(NRF_CLOCK,
NRF_CLOCK_EVENT_LFCLKSTARTED)) {
nrf_clock_event_clear(NRF_CLOCK,
NRF_CLOCK_EVENT_LFCLKSTARTED);
nrf_clock_lf_src_set(NRF_CLOCK,
CLOCK_CONTROL_NRF_K32SRC);
/* Clear pending interrupt, otherwise new clock event
* would not wake up from idle.
*/
NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
nrf_clock_task_trigger(NRF_CLOCK,
NRF_CLOCK_TASK_LFCLKSTART);
}
}
if (isr_mode) {
irq_unlock(key);
} else {
nrf_clock_int_enable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
}
}
void z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)
{
static atomic_t on;
static struct onoff_client cli;
if (atomic_set(&on, 1) == 0) {
int err;
struct onoff_manager *mgr =
get_onoff_manager(CLOCK_DEVICE,
CLOCK_CONTROL_NRF_TYPE_LFCLK);
sys_notify_init_spinwait(&cli.notify);
err = onoff_request(mgr, &cli);
__ASSERT_NO_MSG(err >= 0);
}
/* In case of simulated board leave immediately. */
if (IS_ENABLED(CONFIG_SOC_SERIES_BSIM_NRFXX)) {
return;
}
switch (start_mode) {
case CLOCK_CONTROL_NRF_LF_START_AVAILABLE:
case CLOCK_CONTROL_NRF_LF_START_STABLE:
lfclk_spinwait(start_mode);
break;
case CLOCK_CONTROL_NRF_LF_START_NOWAIT:
break;
default:
__ASSERT_NO_MSG(false);
}
}
static void clock_event_handler(nrfx_clock_evt_type_t event)
{
const struct device *dev = CLOCK_DEVICE;
switch (event) {
case NRFX_CLOCK_EVT_HFCLK_STARTED:
{
struct nrf_clock_control_sub_data *data =
get_sub_data(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
/* Check needed due to anomaly 201:
* HFCLKSTARTED may be generated twice.
*/
if (GET_STATUS(data->flags) == CLOCK_CONTROL_STATUS_STARTING) {
clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
}
break;
}
#if NRF_CLOCK_HAS_HFCLK192M
case NRFX_CLOCK_EVT_HFCLK192M_STARTED:
clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK192M);
break;
#endif
#if NRF_CLOCK_HAS_HFCLKAUDIO
case NRFX_CLOCK_EVT_HFCLKAUDIO_STARTED:
clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO);
break;
#endif
case NRFX_CLOCK_EVT_LFCLK_STARTED:
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
z_nrf_clock_calibration_lfclk_started();
}
clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_LFCLK);
break;
case NRFX_CLOCK_EVT_CAL_DONE:
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
z_nrf_clock_calibration_done_handler();
} else {
/* Should not happen when calibration is disabled. */
__ASSERT_NO_MSG(false);
}
break;
default:
__ASSERT_NO_MSG(0);
break;
}
}
static void hfclkaudio_init(void)
{
#if DT_NODE_HAS_PROP(DT_NODELABEL(clock), hfclkaudio_frequency)
const uint32_t frequency =
DT_PROP(DT_NODELABEL(clock), hfclkaudio_frequency);
/* As specified in the nRF5340 PS:
*
* FREQ_VALUE = 2^16 * ((12 * f_out / 32M) - 4)
*/
const uint32_t freq_value =
(uint32_t)((384ULL * frequency) / 15625) - 262144;
#if NRF_CLOCK_HAS_HFCLKAUDIO
nrf_clock_hfclkaudio_config_set(NRF_CLOCK, freq_value);
#else
#error "hfclkaudio-frequency specified but HFCLKAUDIO clock is not present."
#endif /* NRF_CLOCK_HAS_HFCLKAUDIO */
#endif
}
static int clk_init(const struct device *dev)
{
nrfx_err_t nrfx_err;
int err;
static const struct onoff_transitions transitions = {
.start = onoff_start,
.stop = onoff_stop
};
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
nrfx_isr, nrfx_power_clock_irq_handler, 0);
nrfx_err = nrfx_clock_init(clock_event_handler);
if (nrfx_err != NRFX_SUCCESS) {
return -EIO;
}
hfclkaudio_init();
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
struct nrf_clock_control_data *data = dev->data;
z_nrf_clock_calibration_init(data->mgr);
}
nrfx_clock_enable();
for (enum clock_control_nrf_type i = 0;
i < CLOCK_CONTROL_NRF_TYPE_COUNT; i++) {
struct nrf_clock_control_sub_data *subdata =
get_sub_data(dev, i);
err = onoff_manager_init(get_onoff_manager(dev, i),
&transitions);
if (err < 0) {
return err;
}
subdata->flags = CLOCK_CONTROL_STATUS_OFF;
}
return 0;
}
static const struct clock_control_driver_api clock_control_api = {
.on = api_blocking_start,
.off = api_stop,
.async_on = api_start,
.get_status = get_status,
};
static struct nrf_clock_control_data data;
static const struct nrf_clock_control_config config = {
.subsys = {
[CLOCK_CONTROL_NRF_TYPE_HFCLK] = {
.start = generic_hfclk_start,
.stop = generic_hfclk_stop,
IF_ENABLED(CONFIG_LOG, (.name = "hfclk",))
},
[CLOCK_CONTROL_NRF_TYPE_LFCLK] = {
.start = lfclk_start,
.stop = lfclk_stop,
IF_ENABLED(CONFIG_LOG, (.name = "lfclk",))
},
#if NRF_CLOCK_HAS_HFCLK192M
[CLOCK_CONTROL_NRF_TYPE_HFCLK192M] = {
.start = hfclk192m_start,
.stop = hfclk192m_stop,
IF_ENABLED(CONFIG_LOG, (.name = "hfclk192m",))
},
#endif
#if NRF_CLOCK_HAS_HFCLKAUDIO
[CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO] = {
.start = hfclkaudio_start,
.stop = hfclkaudio_stop,
IF_ENABLED(CONFIG_LOG, (.name = "hfclkaudio",))
},
#endif
}
};
DEVICE_DT_DEFINE(DT_NODELABEL(clock), clk_init, NULL,
&data, &config,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&clock_control_api);
#if defined(CONFIG_SHELL)
static int cmd_status(const struct shell *sh, size_t argc, char **argv)
{
nrf_clock_hfclk_t hfclk_src;
bool hf_status;
bool lf_status = nrfx_clock_is_running(NRF_CLOCK_DOMAIN_LFCLK, NULL);
struct onoff_manager *hf_mgr =
get_onoff_manager(CLOCK_DEVICE,
CLOCK_CONTROL_NRF_TYPE_HFCLK);
struct onoff_manager *lf_mgr =
get_onoff_manager(CLOCK_DEVICE,
CLOCK_CONTROL_NRF_TYPE_LFCLK);
uint32_t abs_start, abs_stop;
unsigned int key = irq_lock();
uint64_t now = k_uptime_get();
(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, (void *)&hfclk_src);
hf_status = (hfclk_src == NRF_CLOCK_HFCLK_HIGH_ACCURACY);
abs_start = hf_start_tstamp;
abs_stop = hf_stop_tstamp;
irq_unlock(key);
shell_print(sh, "HF clock:");
shell_print(sh, "\t- %srunning (users: %u)",
hf_status ? "" : "not ", hf_mgr->refs);
shell_print(sh, "\t- last start: %u ms (%u ms ago)",
(uint32_t)abs_start, (uint32_t)(now - abs_start));
shell_print(sh, "\t- last stop: %u ms (%u ms ago)",
(uint32_t)abs_stop, (uint32_t)(now - abs_stop));
shell_print(sh, "LF clock:");
shell_print(sh, "\t- %srunning (users: %u)",
lf_status ? "" : "not ", lf_mgr->refs);
return 0;
}
SHELL_STATIC_SUBCMD_SET_CREATE(subcmds,
SHELL_CMD_ARG(status, NULL, "Status", cmd_status, 1, 0),
SHELL_SUBCMD_SET_END
);
SHELL_COND_CMD_REGISTER(CONFIG_CLOCK_CONTROL_NRF_SHELL,
nrf_clock_control, &subcmds,
"Clock control commands",
cmd_status);
#endif /* defined(CONFIG_SHELL) */
``` | /content/code_sandbox/drivers/clock_control/clock_control_nrf.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,545 |
```c
/*
*
*/
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/adi_max32_clock_control.h>
#include <wrap_max32_sys.h>
#define DT_DRV_COMPAT adi_max32_gcr
static inline int api_on(const struct device *dev, clock_control_subsys_t clkcfg)
{
ARG_UNUSED(dev);
struct max32_perclk *perclk = (struct max32_perclk *)(clkcfg);
switch (perclk->bus) {
case ADI_MAX32_CLOCK_BUS0:
MXC_SYS_ClockEnable((mxc_sys_periph_clock_t)perclk->bit);
break;
case ADI_MAX32_CLOCK_BUS1:
MXC_SYS_ClockEnable((mxc_sys_periph_clock_t)(perclk->bit + 32));
break;
case ADI_MAX32_CLOCK_BUS2:
MXC_SYS_ClockEnable((mxc_sys_periph_clock_t)(perclk->bit + 64));
break;
default:
return -EINVAL;
}
return 0;
}
static inline int api_off(const struct device *dev, clock_control_subsys_t clkcfg)
{
ARG_UNUSED(dev);
struct max32_perclk *perclk = (struct max32_perclk *)(clkcfg);
switch (perclk->bus) {
case ADI_MAX32_CLOCK_BUS0:
MXC_SYS_ClockDisable((mxc_sys_periph_clock_t)perclk->bit);
break;
case ADI_MAX32_CLOCK_BUS1:
MXC_SYS_ClockDisable((mxc_sys_periph_clock_t)(perclk->bit + 32));
break;
case ADI_MAX32_CLOCK_BUS2:
MXC_SYS_ClockDisable((mxc_sys_periph_clock_t)(perclk->bit + 64));
break;
default:
return -EINVAL;
}
return 0;
}
static int api_get_rate(const struct device *dev, clock_control_subsys_t clkcfg, uint32_t *rate)
{
ARG_UNUSED(dev);
struct max32_perclk *perclk = (struct max32_perclk *)(clkcfg);
switch (perclk->clk_src) {
case ADI_MAX32_PRPH_CLK_SRC_PCLK:
*rate = ADI_MAX32_PCLK_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_EXTCLK:
*rate = ADI_MAX32_CLK_EXTCLK_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_IBRO:
*rate = ADI_MAX32_CLK_IBRO_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_ERFO:
*rate = ADI_MAX32_CLK_ERFO_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_ERTCO:
*rate = ADI_MAX32_CLK_ERTCO_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_INRO:
*rate = ADI_MAX32_CLK_INRO_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_ISO:
*rate = ADI_MAX32_CLK_ISO_FREQ;
break;
case ADI_MAX32_PRPH_CLK_SRC_IBRO_DIV8:
*rate = ADI_MAX32_CLK_IBRO_FREQ / 8;
break;
default:
*rate = 0U;
/* Invalid parameters */
return -EINVAL;
}
return 0;
}
static const struct clock_control_driver_api max32_clkctrl_api = {
.on = api_on,
.off = api_off,
.get_rate = api_get_rate,
};
static void setup_fixed_clocks(void)
{
#if DT_NODE_HAS_COMPAT(DT_NODELABEL(clk_extclk), fixed_clock)
MXC_SYS_ClockSourceDisable(ADI_MAX32_CLK_EXTCLK);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_ipo), okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_IPO);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_erfo), okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_ERFO);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_ibro), okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_IBRO);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_iso), okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_ISO);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_inro), okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_INRO);
#endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(clk_ertco), okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_ERTCO);
#endif
/* Some device does not support external clock */
#if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(clk_extclk), fixed_clock, okay)
MXC_SYS_ClockSourceEnable(ADI_MAX32_CLK_EXTCLK);
#endif
}
static int max32_clkctrl_init(const struct device *dev)
{
ARG_UNUSED(dev);
/* Setup fixed clocks if enabled */
setup_fixed_clocks();
/* Setup device clock source */
MXC_SYS_Clock_Select(ADI_MAX32_SYSCLK_SRC);
#if DT_NODE_HAS_PROP(DT_NODELABEL(gcr), sysclk_prescaler)
/* Setup divider */
Wrap_MXC_SYS_SetClockDiv(sysclk_prescaler(ADI_MAX32_SYSCLK_PRESCALER));
#endif
return 0;
}
DEVICE_DT_INST_DEFINE(0, max32_clkctrl_init, NULL, NULL, NULL, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, &max32_clkctrl_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_max32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,182 |
```unknown
# NPCX Clock controller driver configuration options
config CLOCK_CONTROL_NPCX
bool "NPCX clock controller driver"
default y
depends on DT_HAS_NUVOTON_NPCX_PCC_ENABLED
help
Enable support for NPCX clock controller driver.
if CLOCK_CONTROL_NPCX
config CLOCK_CONTROL_NPCX_EXTERNAL_SRC
bool "Generate LFCLK by on-chip Crystal Oscillator"
help
When this option is enabled, the internal 32.768 KHz clock (LFCLK)
is generated by the on-chip Crystal Oscillator (XTOSC).
This includes an on-chip oscillator, to which an external crystal
and the related passive components are connected.
config CLOCK_CONTROL_NPCX_SUPP_APB4
bool "Indicates that the clock controller supports APB4 bus"
default y if !SOC_SERIES_NPCX7
help
Selected if NPCX series supports APB4 bus.
config CLOCK_CONTROL_NPCX_SUPP_FIU1
bool "Indicates that the clock controller supports FIU1 bus"
default y if SOC_SERIES_NPCX4
help
Selected if NPCX series supports FIU1 bus.
endif # CLOCK_CONTROL_NPCX
``` | /content/code_sandbox/drivers/clock_control/Kconfig.npcx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 247 |
```unknown
# Beetle MCU clock control driver config
if SOC_FAMILY_ARM
menuconfig CLOCK_CONTROL_BEETLE
bool "BEETLE Clock Control"
default y
depends on DT_HAS_ARM_BEETLE_SYSCON_ENABLED
help
Enable driver for Reset & Clock Control subsystem found
in STM32F4 family of MCUs
if CLOCK_CONTROL_BEETLE
config CLOCK_CONTROL_BEETLE_ENABLE_PLL
bool "PLL on Beetle"
depends on SOC_SERIES_BEETLE
help
Enable PLL on Beetle.
Select n if not sure.
endif # CLOCK_CONTROL_BEETLE
endif # SOC_FAMILY_ARM
``` | /content/code_sandbox/drivers/clock_control/Kconfig.beetle | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 132 |
```c
/*
*
*/
#define DT_DRV_COMPAT atmel_sam_pmc
#include <stdint.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
#include <soc.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
static int atmel_sam_clock_control_on(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
const struct atmel_sam_pmc_config *cfg = (const struct atmel_sam_pmc_config *)sys;
if (cfg == NULL) {
LOG_ERR("The PMC config can not be NULL.");
return -ENXIO;
}
LOG_DBG("Type: %x, Id: %d", cfg->clock_type, cfg->peripheral_id);
switch (cfg->clock_type) {
case PMC_TYPE_PERIPHERAL:
soc_pmc_peripheral_enable(cfg->peripheral_id);
break;
default:
LOG_ERR("The PMC clock type is not implemented.");
return -ENODEV;
}
return 0;
}
static int atmel_sam_clock_control_off(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
const struct atmel_sam_pmc_config *cfg = (const struct atmel_sam_pmc_config *)sys;
if (cfg == NULL) {
LOG_ERR("The PMC config can not be NULL.");
return -ENXIO;
}
LOG_DBG("Type: %x, Id: %d", cfg->clock_type, cfg->peripheral_id);
switch (cfg->clock_type) {
case PMC_TYPE_PERIPHERAL:
soc_pmc_peripheral_disable(cfg->peripheral_id);
break;
default:
LOG_ERR("The PMC clock type is not implemented.");
return -ENODEV;
}
return 0;
}
static int atmel_sam_clock_control_get_rate(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate)
{
ARG_UNUSED(dev);
const struct atmel_sam_pmc_config *cfg = (const struct atmel_sam_pmc_config *)sys;
if (cfg == NULL) {
LOG_ERR("The PMC config can not be NULL.");
return -ENXIO;
}
LOG_DBG("Type: %x, Id: %d", cfg->clock_type, cfg->peripheral_id);
switch (cfg->clock_type) {
case PMC_TYPE_PERIPHERAL:
*rate = SOC_ATMEL_SAM_MCK_FREQ_HZ;
break;
default:
LOG_ERR("The PMC clock type is not implemented.");
return -ENODEV;
}
LOG_DBG("Rate: %d", *rate);
return 0;
}
static enum clock_control_status
atmel_sam_clock_control_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
ARG_UNUSED(dev);
const struct atmel_sam_pmc_config *cfg = (const struct atmel_sam_pmc_config *)sys;
enum clock_control_status status;
if (cfg == NULL) {
LOG_ERR("The PMC config can not be NULL.");
return -ENXIO;
}
LOG_DBG("Type: %x, Id: %d", cfg->clock_type, cfg->peripheral_id);
switch (cfg->clock_type) {
case PMC_TYPE_PERIPHERAL:
status = soc_pmc_peripheral_is_enabled(cfg->peripheral_id) > 0
? CLOCK_CONTROL_STATUS_ON
: CLOCK_CONTROL_STATUS_OFF;
break;
default:
LOG_ERR("The PMC clock type is not implemented.");
return -ENODEV;
}
return status;
}
static const struct clock_control_driver_api atmel_sam_clock_control_api = {
.on = atmel_sam_clock_control_on,
.off = atmel_sam_clock_control_off,
.get_rate = atmel_sam_clock_control_get_rate,
.get_status = atmel_sam_clock_control_get_status,
};
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&atmel_sam_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_sam_pmc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 916 |
```c
/*
*
*/
#include <zephyr/arch/cpu.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/drivers/clock_control/clock_agilex_ll.h>
#include <socfpga_system_manager.h>
/*
* Intel SoC re-use Arm Trusted Firmware (ATF) driver code in Zephyr.
* The migrated ATF driver code uses mmio_X macro to access the register.
* The following macros map mmio_X to Zephyr compatible function for
* register access. This allow Zephyr to re-use the ATF driver codes
* without massive changes.
*/
#define mmio_write_32(addr, data) sys_write32((data), (addr))
#define mmio_read_32(addr) sys_read32((addr))
#define mmio_setbits_32(addr, mask) sys_set_bits((addr), (mask))
#define mmio_clrbits_32(addr, mask) sys_clear_bits((addr), (mask))
/* Extract reference clock from platform clock source */
uint32_t get_ref_clk(uint32_t pllglob)
{
uint32_t arefclkdiv, ref_clk;
uint32_t scr_reg;
switch (CLKMGR_PSRC(pllglob)) {
case CLKMGR_PLLGLOB_PSRC_EOSC1:
scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1);
ref_clk = mmio_read_32(scr_reg);
break;
case CLKMGR_PLLGLOB_PSRC_INTOSC:
ref_clk = CLKMGR_INTOSC_HZ;
break;
case CLKMGR_PLLGLOB_PSRC_F2S:
scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2);
ref_clk = mmio_read_32(scr_reg);
break;
default:
ref_clk = 0;
break;
}
arefclkdiv = CLKMGR_PLLGLOB_AREFCLKDIV(pllglob);
ref_clk /= arefclkdiv;
return ref_clk;
}
/* Calculate clock frequency based on parameter */
uint32_t get_clk_freq(uint32_t psrc_reg, uint32_t main_pllc, uint32_t per_pllc)
{
uint32_t clk_psrc, mdiv, ref_clk;
uint32_t pllm_reg, pllc_reg, pllc_div, pllglob_reg;
clk_psrc = mmio_read_32(CLKMGR_MAINPLL + psrc_reg);
switch (CLKMGR_PSRC(clk_psrc)) {
case CLKMGR_PSRC_MAIN:
pllm_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM;
pllc_reg = CLKMGR_MAINPLL + main_pllc;
pllglob_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB;
break;
case CLKMGR_PSRC_PER:
pllm_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM;
pllc_reg = CLKMGR_PERPLL + per_pllc;
pllglob_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB;
break;
default:
return 0;
}
ref_clk = get_ref_clk(mmio_read_32(pllglob_reg));
mdiv = CLKMGR_PLLM_MDIV(mmio_read_32(pllm_reg));
ref_clk *= mdiv;
pllc_div = mmio_read_32(pllc_reg) & 0x7ff;
return ref_clk / pllc_div;
}
/* Return L3 interconnect clock */
uint32_t get_l3_clk(void)
{
uint32_t l3_clk;
l3_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC1,
CLKMGR_PERPLL_PLLC1);
return l3_clk;
}
/* Calculate clock frequency to be used for mpu */
uint32_t get_mpu_clk(void)
{
uint32_t mpu_clk = 0;
mpu_clk = get_clk_freq(CLKMGR_MAINPLL_MPUCLK, CLKMGR_MAINPLL_PLLC0,
CLKMGR_PERPLL_PLLC0);
return mpu_clk;
}
/* Calculate clock frequency to be used for watchdog timer */
uint32_t get_wdt_clk(void)
{
uint32_t l3_clk, l4_sys_clk;
l3_clk = get_l3_clk();
l4_sys_clk = l3_clk / 4;
return l4_sys_clk;
}
/* Calculate clock frequency to be used for UART driver */
uint32_t get_uart_clk(void)
{
uint32_t data32, l3_clk, l4_sp_clk;
l3_clk = get_l3_clk();
data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV);
data32 = (data32 >> 16) & 0x3;
l4_sp_clk = l3_clk >> data32;
return l4_sp_clk;
}
/* Calculate clock frequency to be used for SDMMC driver */
uint32_t get_mmc_clk(void)
{
uint32_t data32, mmc_clk;
mmc_clk = get_clk_freq(CLKMGR_ALTERA_SDMMCCTR,
CLKMGR_MAINPLL_PLLC3, CLKMGR_PERPLL_PLLC3);
data32 = mmio_read_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR);
data32 = (data32 & 0x7ff) + 1;
mmc_clk = (mmc_clk / data32) / 4;
return mmc_clk;
}
``` | /content/code_sandbox/drivers/clock_control/clock_agilex_ll.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,172 |
```unknown
# MCUXpresso SDK SIM
config CLOCK_CONTROL_MCUX_SIM
bool "MCUX SIM driver"
default y
depends on DT_HAS_NXP_KINETIS_SIM_ENABLED
help
Enable support for mcux sim driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.mcux_sim | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 49 |
```unknown
# MCUXpresso SDK SCG
config CLOCK_CONTROL_MCUX_SCG
bool "MCUX SCG driver"
default y
depends on DT_HAS_NXP_KINETIS_SCG_ENABLED
help
Enable support for mcux scg driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.mcux_scg | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
# MCUXpresso SDK MCG
config CLOCK_CONTROL_MCUX_MCG
bool "MCUX MCG driver"
default y
depends on DT_HAS_NXP_KINETIS_MCG_ENABLED
help
Enable support for mcux mcg driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.mcux_mcg | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
config CLOCK_CONTROL_ARM_SCMI
bool "SCMI clock protocol clock controller driver"
default y
depends on ARM_SCMI_CLK_HELPERS
help
Enable support for SCMI-based clock control.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.arm_scmi | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 44 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_common.h"
#if defined(RCC_CFGR_USBPRE)
#define STM32_USB_PRE_ENABLED RCC_CFGR_USBPRE
#elif defined(RCC_CFGR_OTGFSPRE)
#define STM32_USB_PRE_ENABLED RCC_CFGR_OTGFSPRE
#endif
#if defined(STM32_PLL_ENABLED)
/*
* Select PLL source for STM32F1 Connectivity line devices (STM32F105xx and
* STM32F107xx).
* Both flags are defined in STM32Cube LL API. Keep only the selected one.
*/
/**
* @brief Set up pll configuration
*/
__unused
void config_pll_sysclock(void)
{
uint32_t pll_source, pll_mul, pll_div;
/*
* PLLMUL on SOC_STM32F10X_DENSITY_DEVICE
* 2 -> LL_RCC_PLL_MUL_2 -> 0x00000000
* 3 -> LL_RCC_PLL_MUL_3 -> 0x00040000
* 4 -> LL_RCC_PLL_MUL_4 -> 0x00080000
* ...
* 16 -> LL_RCC_PLL_MUL_16 -> 0x00380000
*
* PLLMUL on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
* 4 -> LL_RCC_PLL_MUL_4 -> 0x00080000
* ...
* 9 -> LL_RCC_PLL_MUL_9 -> 0x001C0000
* 13 -> LL_RCC_PLL_MUL_6_5 -> 0x00340000
*/
pll_mul = ((STM32_PLL_MULTIPLIER - 2) << RCC_CFGR_PLLMULL_Pos);
if (!IS_ENABLED(STM32_PLL_SRC_HSI)) {
/* In case PLL source is not HSI, set prediv case by case */
#ifdef CONFIG_SOC_STM32F10X_DENSITY_DEVICE
/* PLL prediv */
if (IS_ENABLED(STM32_PLL_XTPRE)) {
/*
* SOC_STM32F10X_DENSITY_DEVICE:
* PLLXPTRE (depends on PLL source HSE)
* HSE/2 used as PLL source
*/
pll_div = LL_RCC_PREDIV_DIV_2;
} else {
/*
* SOC_STM32F10X_DENSITY_DEVICE:
* PLLXPTRE (depends on PLL source HSE)
* HSE used as direct PLL source
*/
pll_div = LL_RCC_PREDIV_DIV_1;
}
#else
/*
* SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
* 1 -> LL_RCC_PREDIV_DIV_1 -> 0x00000000
* 2 -> LL_RCC_PREDIV_DIV_2 -> 0x00000001
* 3 -> LL_RCC_PREDIV_DIV_3 -> 0x00000002
* ...
* 16 -> LL_RCC_PREDIV_DIV_16 -> 0x0000000F
*/
pll_div = STM32_PLL_PREDIV - 1;
#endif /* CONFIG_SOC_STM32F10X_DENSITY_DEVICE */
}
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
pll_source = LL_RCC_PLLSOURCE_HSI_DIV_2;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
pll_source = LL_RCC_PLLSOURCE_HSE | pll_div;
#if defined(RCC_CFGR2_PREDIV1SRC)
} else if (IS_ENABLED(STM32_PLL_SRC_PLL2)) {
pll_source = LL_RCC_PLLSOURCE_PLL2 | pll_div;
#endif
} else {
__ASSERT(0, "Invalid source");
}
LL_RCC_PLL_ConfigDomain_SYS(pll_source, pll_mul);
#ifdef STM32_USB_PRE_ENABLED
/* Prescaler is enabled: PLL clock is not divided */
LL_RCC_SetUSBClockSource(IS_ENABLED(STM32_PLL_USBPRE) ?
STM32_USB_PRE_ENABLED : 0);
#endif
}
#endif /* defined(STM32_PLL_ENABLED) */
#if defined(STM32_PLL2_ENABLED)
/**
* @brief Set up pll2 configuration
*/
__unused
void config_pll2(void)
{
uint32_t pll_mul, pll_div;
/*
* PLL2MUL on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
* 8 -> LL_RCC_PLL2_MUL_8 -> 0x00000600
* 9 -> LL_RCC_PLL2_MUL_9 -> 0x00000700
* ...
* 14 -> LL_RCC_PLL2_MUL_14 -> 0x00000C00
* 16 -> LL_RCC_PLL2_MUL_16 -> 0x00000E00
* 20 -> LL_RCC_PLL2_MUL_20 -> 0x00000F00
*/
if (STM32_PLL2_MULTIPLIER == 20) {
pll_mul = RCC_CFGR2_PLL2MUL20;
} else {
pll_mul = ((STM32_PLL2_MULTIPLIER - 2) << RCC_CFGR2_PLL2MUL_Pos);
}
/*
* SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
* 1 -> LL_RCC_HSE_PREDIV2_DIV_1 -> 0x00000000
* 2 -> LL_RCC_HSE_PREDIV2_DIV_2 -> 0x00000010
* ...
* 16 -> LL_RCC_HSE_PREDIV2_DIV_16 -> 0x000000F0
*/
pll_div = ((STM32_PLL2_PREDIV - 1) << RCC_CFGR2_PREDIV2_Pos);
/* Check PLL2 source */
if (!IS_ENABLED(STM32_PLL2_SRC_HSE)) {
__ASSERT(0, "Invalid source");
}
LL_RCC_PLL_ConfigDomain_PLL2(pll_div, pll_mul);
}
#endif /* defined(STM32_PLL2_ENABLED) */
/**
* @brief Activate default clocks
*/
void config_enable_default_clocks(void)
{
if (IS_ENABLED(STM32_LSE_ENABLED)) {
/* Set the PWREN and BKPEN bits in the RCC_APB1ENR register */
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_BKP);
}
}
``` | /content/code_sandbox/drivers/clock_control/clock_stm32f1.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,463 |
```unknown
# Clock controller driver configuration options
#
# Clock controller drivers
#
menuconfig CLOCK_CONTROL
bool "Clock controller drivers"
help
Enable support for hardware clock controller. Such hardware can
provide clock for other subsystem, and thus can be also used for
power efficiency by controlling their clock. Note that this has
nothing to do with RTC.
if CLOCK_CONTROL
config CLOCK_CONTROL_INIT_PRIORITY
int "Clock control init priority"
default KERNEL_INIT_PRIORITY_OBJECTS
help
Clock control driver device initialization priority.
module = CLOCK_CONTROL
module-str = clock control
source "subsys/logging/Kconfig.template.log_config"
source "drivers/clock_control/Kconfig.nrf"
source "drivers/clock_control/Kconfig.stm32"
source "drivers/clock_control/Kconfig.beetle"
source "drivers/clock_control/Kconfig.fixed"
source "drivers/clock_control/Kconfig.lpc11u6x"
source "drivers/clock_control/Kconfig.mcux_ccm"
source "drivers/clock_control/Kconfig.mcux_ccm_rev2"
source "drivers/clock_control/Kconfig.mcux_mcg"
source "drivers/clock_control/Kconfig.mcux_pcc"
source "drivers/clock_control/Kconfig.mcux_scg"
source "drivers/clock_control/Kconfig.mcux_sim"
source "drivers/clock_control/Kconfig.mcux_syscon"
source "drivers/clock_control/Kconfig.npcx"
source "drivers/clock_control/Kconfig.rv32m1"
source "drivers/clock_control/Kconfig.esp32"
source "drivers/clock_control/Kconfig.litex"
source "drivers/clock_control/Kconfig.rcar"
source "drivers/clock_control/Kconfig.xec"
source "drivers/clock_control/Kconfig.ifx_cat1"
source "drivers/clock_control/Kconfig.cavs"
source "drivers/clock_control/Kconfig.aspeed"
source "drivers/clock_control/Kconfig.gd32"
source "drivers/clock_control/Kconfig.sam"
source "drivers/clock_control/Kconfig.smartbond"
source "drivers/clock_control/Kconfig.numaker"
source "drivers/clock_control/Kconfig.nxp_s32"
source "drivers/clock_control/Kconfig.agilex5"
source "drivers/clock_control/Kconfig.renesas_ra"
source "drivers/clock_control/Kconfig.renesas_ra_cgc"
source "drivers/clock_control/Kconfig.max32"
source "drivers/clock_control/Kconfig.ambiq"
source "drivers/clock_control/Kconfig.pwm"
source "drivers/clock_control/Kconfig.rpi_pico"
source "drivers/clock_control/Kconfig.nrf_auxpll"
source "drivers/clock_control/Kconfig.arm_scmi"
endif # CLOCK_CONTROL
``` | /content/code_sandbox/drivers/clock_control/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 565 |
```unknown
# MCUXpresso SDK SYSCON
config CLOCK_CONTROL_MCUX_SYSCON
bool "MCUX LPC clock driver"
default y
depends on DT_HAS_NXP_LPC_SYSCON_ENABLED
help
Enable support for mcux clock driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.mcux_syscon | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 55 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_kinetis_sim
#include <errno.h>
#include <soc.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/kinetis_sim.h>
#include <fsl_clock.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control);
static int mcux_sim_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
clock_ip_name_t clock_ip_name = (clock_ip_name_t) sub_system;
#ifdef CONFIG_ETH_NXP_ENET
if ((uint32_t)sub_system == KINETIS_SIM_ENET_CLK) {
clock_ip_name = kCLOCK_Enet0;
}
#endif
CLOCK_EnableClock(clock_ip_name);
return 0;
}
static int mcux_sim_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
clock_ip_name_t clock_ip_name = (clock_ip_name_t) sub_system;
CLOCK_DisableClock(clock_ip_name);
return 0;
}
static int mcux_sim_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
clock_name_t clock_name;
switch ((uint32_t) sub_system) {
case KINETIS_SIM_LPO_CLK:
clock_name = kCLOCK_LpoClk;
break;
case KINETIS_SIM_ENET_CLK:
clock_name = kCLOCK_CoreSysClk;
break;
case KINETIS_SIM_ENET_1588_CLK:
clock_name = kCLOCK_Osc0ErClk;
break;
default:
clock_name = (clock_name_t) sub_system;
break;
}
*rate = CLOCK_GetFreq(clock_name);
return 0;
}
#if DT_NODE_HAS_STATUS(DT_INST(0, nxp_kinetis_ke1xf_sim), okay)
#define NXP_KINETIS_SIM_NODE DT_INST(0, nxp_kinetis_ke1xf_sim)
#if DT_NODE_HAS_PROP(DT_INST(0, nxp_kinetis_ke1xf_sim), clkout_source)
#define NXP_KINETIS_SIM_CLKOUT_SOURCE \
DT_PROP(DT_INST(0, nxp_kinetis_ke1xf_sim), clkout_source)
#endif
#if DT_NODE_HAS_PROP(DT_INST(0, nxp_kinetis_ke1xf_sim), clkout_divider)
#define NXP_KINETIS_SIM_CLKOUT_DIVIDER \
DT_PROP(DT_INST(0, nxp_kinetis_ke1xf_sim), clkout_divider)
#endif
#else
#define NXP_KINETIS_SIM_NODE DT_INST(0, nxp_kinetis_sim)
#if DT_NODE_HAS_PROP(DT_INST(0, nxp_kinetis_sim), clkout_source)
#define NXP_KINETIS_SIM_CLKOUT_SOURCE \
DT_PROP(DT_INST(0, nxp_kinetis_sim), clkout_source)
#endif
#if DT_NODE_HAS_PROP(DT_INST(0, nxp_kinetis_sim), clkout_divider)
#define NXP_KINETIS_SIM_CLKOUT_DIVIDER \
DT_PROP(DT_INST(0, nxp_kinetis_sim), clkout_divider)
#endif
#endif
static int mcux_sim_init(const struct device *dev)
{
#ifdef NXP_KINETIS_SIM_CLKOUT_DIVIDER
SIM->CHIPCTL = (SIM->CHIPCTL & ~SIM_CHIPCTL_CLKOUTDIV_MASK)
| SIM_CHIPCTL_CLKOUTDIV(NXP_KINETIS_SIM_CLKOUT_DIVIDER);
#endif
#ifdef NXP_KINETIS_SIM_CLKOUT_SOURCE
SIM->CHIPCTL = (SIM->CHIPCTL & ~SIM_CHIPCTL_CLKOUTSEL_MASK)
| SIM_CHIPCTL_CLKOUTSEL(NXP_KINETIS_SIM_CLKOUT_SOURCE);
#endif
return 0;
}
static const struct clock_control_driver_api mcux_sim_driver_api = {
.on = mcux_sim_on,
.off = mcux_sim_off,
.get_rate = mcux_sim_get_subsys_rate,
};
DEVICE_DT_DEFINE(NXP_KINETIS_SIM_NODE,
mcux_sim_init,
NULL,
NULL, NULL,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&mcux_sim_driver_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_mcux_sim.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 921 |
```c
/*
*
*/
#define DT_DRV_COMPAT gd_gd32_cctl
#include <stdint.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/gd32.h>
#include <gd32_regs.h>
/** RCU offset (from id cell) */
#define GD32_CLOCK_ID_OFFSET(id) (((id) >> 6U) & 0xFFU)
/** RCU configuration bit (from id cell) */
#define GD32_CLOCK_ID_BIT(id) ((id)&0x1FU)
#define CPU_FREQ DT_PROP(DT_PATH(cpus, cpu_0), clock_frequency)
/** AHB prescaler exponents */
static const uint8_t ahb_exp[16] = {
0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 1U, 2U, 3U, 4U, 6U, 7U, 8U, 9U,
};
/** APB1 prescaler exponents */
static const uint8_t apb1_exp[8] = {
0U, 0U, 0U, 0U, 1U, 2U, 3U, 4U,
};
/** APB2 prescaler exponents */
static const uint8_t apb2_exp[8] = {
0U, 0U, 0U, 0U, 1U, 2U, 3U, 4U,
};
struct clock_control_gd32_config {
uint32_t base;
};
#if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_timer)
/* timer identifiers */
#define TIMER_ID_OR_NONE(nodelabel) \
COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(nodelabel), okay), \
(DT_CLOCKS_CELL(DT_NODELABEL(nodelabel), id),), ())
static const uint16_t timer_ids[] = {
TIMER_ID_OR_NONE(timer0) /* */
TIMER_ID_OR_NONE(timer1) /* */
TIMER_ID_OR_NONE(timer2) /* */
TIMER_ID_OR_NONE(timer3) /* */
TIMER_ID_OR_NONE(timer4) /* */
TIMER_ID_OR_NONE(timer5) /* */
TIMER_ID_OR_NONE(timer6) /* */
TIMER_ID_OR_NONE(timer7) /* */
TIMER_ID_OR_NONE(timer8) /* */
TIMER_ID_OR_NONE(timer9) /* */
TIMER_ID_OR_NONE(timer10) /* */
TIMER_ID_OR_NONE(timer11) /* */
TIMER_ID_OR_NONE(timer12) /* */
TIMER_ID_OR_NONE(timer13) /* */
TIMER_ID_OR_NONE(timer14) /* */
TIMER_ID_OR_NONE(timer15) /* */
TIMER_ID_OR_NONE(timer16) /* */
};
#endif /* DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_timer) */
static int clock_control_gd32_on(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_gd32_config *config = dev->config;
uint16_t id = *(uint16_t *)sys;
sys_set_bit(config->base + GD32_CLOCK_ID_OFFSET(id),
GD32_CLOCK_ID_BIT(id));
return 0;
}
static int clock_control_gd32_off(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_gd32_config *config = dev->config;
uint16_t id = *(uint16_t *)sys;
sys_clear_bit(config->base + GD32_CLOCK_ID_OFFSET(id),
GD32_CLOCK_ID_BIT(id));
return 0;
}
static int clock_control_gd32_get_rate(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate)
{
const struct clock_control_gd32_config *config = dev->config;
uint16_t id = *(uint16_t *)sys;
uint32_t cfg;
uint8_t psc;
cfg = sys_read32(config->base + RCU_CFG0_OFFSET);
switch (GD32_CLOCK_ID_OFFSET(id)) {
#if defined(CONFIG_SOC_SERIES_GD32F4XX)
case RCU_AHB1EN_OFFSET:
case RCU_AHB2EN_OFFSET:
case RCU_AHB3EN_OFFSET:
#else
case RCU_AHBEN_OFFSET:
#endif
psc = (cfg & RCU_CFG0_AHBPSC_MSK) >> RCU_CFG0_AHBPSC_POS;
*rate = CPU_FREQ >> ahb_exp[psc];
break;
case RCU_APB1EN_OFFSET:
#if !defined(CONFIG_SOC_SERIES_GD32VF103) && \
!defined(CONFIG_SOC_SERIES_GD32A50X) && \
!defined(CONFIG_SOC_SERIES_GD32L23X)
case RCU_ADDAPB1EN_OFFSET:
#endif
psc = (cfg & RCU_CFG0_APB1PSC_MSK) >> RCU_CFG0_APB1PSC_POS;
*rate = CPU_FREQ >> apb1_exp[psc];
break;
case RCU_APB2EN_OFFSET:
psc = (cfg & RCU_CFG0_APB2PSC_MSK) >> RCU_CFG0_APB2PSC_POS;
*rate = CPU_FREQ >> apb2_exp[psc];
break;
default:
return -ENOTSUP;
}
#if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_timer)
/* handle timer clocks */
for (size_t i = 0U; i < ARRAY_SIZE(timer_ids); i++) {
if (id != timer_ids[i]) {
continue;
}
#if defined(CONFIG_SOC_SERIES_GD32F4XX)
uint32_t cfg1 = sys_read32(config->base + RCU_CFG1_OFFSET);
/*
* The TIMERSEL bit in RCU_CFG1 controls the clock frequency of
* all the timers connected to the APB1 and APB2 domains.
*
* Up to a certain threshold value of APB{1,2} prescaler, timer
* clock equals to CK_AHB. This threshold value depends on
* TIMERSEL setting (2 if TIMERSEL=0, 4 if TIMERSEL=1). Above
* threshold, timer clock is set to a multiple of the APB
* domain clock CK_APB{1,2} (2 if TIMERSEL=0, 4 if TIMERSEL=1).
*/
/* TIMERSEL = 0 */
if ((cfg1 & RCU_CFG1_TIMERSEL_MSK) == 0U) {
if (psc <= 2U) {
*rate = CPU_FREQ;
} else {
*rate *= 2U;
}
/* TIMERSEL = 1 */
} else {
if (psc <= 4U) {
*rate = CPU_FREQ;
} else {
*rate *= 4U;
}
}
#else
/*
* If the APB prescaler equals 1, the timer clock frequencies
* are set to the same frequency as that of the APB domain.
* Otherwise, they are set to twice the frequency of the APB
* domain.
*/
if (psc != 1U) {
*rate *= 2U;
}
#endif /* CONFIG_SOC_SERIES_GD32F4XX */
}
#endif /* DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_timer) */
return 0;
}
static enum clock_control_status
clock_control_gd32_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_gd32_config *config = dev->config;
uint16_t id = *(uint16_t *)sys;
if (sys_test_bit(config->base + GD32_CLOCK_ID_OFFSET(id),
GD32_CLOCK_ID_BIT(id)) != 0) {
return CLOCK_CONTROL_STATUS_ON;
}
return CLOCK_CONTROL_STATUS_OFF;
}
static const struct clock_control_driver_api clock_control_gd32_api = {
.on = clock_control_gd32_on,
.off = clock_control_gd32_off,
.get_rate = clock_control_gd32_get_rate,
.get_status = clock_control_gd32_get_status,
};
static const struct clock_control_gd32_config config = {
.base = DT_REG_ADDR(DT_INST_PARENT(0)),
};
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &config, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&clock_control_gd32_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_gd32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,852 |
```c
/*
*
*/
#define DT_DRV_COMPAT openisa_rv32m1_pcc
#include <errno.h>
#include <soc.h>
#include <zephyr/drivers/clock_control.h>
#include <fsl_clock.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control);
struct rv32m1_pcc_config {
uint32_t base_address;
};
#define DEV_BASE(dev) \
(((struct rv32m1_pcc_config *)(dev->config))->base_address)
static inline clock_ip_name_t clock_ip(const struct device *dev,
clock_control_subsys_t sub_system)
{
uint32_t offset = POINTER_TO_UINT(sub_system);
return MAKE_PCC_REGADDR(DEV_BASE(dev), offset);
}
static int rv32m1_pcc_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
CLOCK_EnableClock(clock_ip(dev, sub_system));
return 0;
}
static int rv32m1_pcc_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
CLOCK_DisableClock(clock_ip(dev, sub_system));
return 0;
}
static int rv32m1_pcc_get_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
*rate = CLOCK_GetIpFreq(clock_ip(dev, sub_system));
return 0;
}
static const struct clock_control_driver_api rv32m1_pcc_api = {
.on = rv32m1_pcc_on,
.off = rv32m1_pcc_off,
.get_rate = rv32m1_pcc_get_rate,
};
#define RV32M1_PCC_INIT(inst) \
static const struct rv32m1_pcc_config rv32m1_pcc##inst##_config = { \
.base_address = DT_INST_REG_ADDR(inst) \
}; \
\
DEVICE_DT_INST_DEFINE(inst, \
NULL, \
NULL, \
NULL, &rv32m1_pcc##inst##_config, \
PRE_KERNEL_1, \
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, \
&rv32m1_pcc_api);
DT_INST_FOREACH_STATUS_OKAY(RV32M1_PCC_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_control_rv32m1_pcc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 490 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include <zephyr/sys/time_units.h>
#include "clock_stm32_ll_common.h"
#if defined(STM32_PLL_ENABLED)
#if defined(LL_RCC_MSIRANGESEL_RUN)
#define CALC_RUN_MSI_FREQ(range) __LL_RCC_CALC_MSI_FREQ(LL_RCC_MSIRANGESEL_RUN, \
range << RCC_CR_MSIRANGE_Pos);
#else
#define CALC_RUN_MSI_FREQ(range) __LL_RCC_CALC_MSI_FREQ(range << RCC_CR_MSIRANGE_Pos);
#endif
/**
* @brief Return PLL source
*/
__unused
static uint32_t get_pll_source(void)
{
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return LL_RCC_PLLSOURCE_HSI;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return LL_RCC_PLLSOURCE_HSE;
} else if (IS_ENABLED(STM32_PLL_SRC_MSI)) {
return LL_RCC_PLLSOURCE_MSI;
}
__ASSERT(0, "Invalid source");
return 0;
}
/**
* @brief get the pll source frequency
*/
__unused
uint32_t get_pllsrc_frequency(void)
{
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return STM32_HSI_FREQ;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return STM32_HSE_FREQ;
#if defined(STM32_MSI_ENABLED)
} else if (IS_ENABLED(STM32_PLL_SRC_MSI)) {
return CALC_RUN_MSI_FREQ(STM32_MSI_RANGE);
#endif
}
__ASSERT(0, "Invalid source");
return 0;
}
/**
* @brief Set up pll configuration
*/
void config_pll_sysclock(void)
{
#ifdef PWR_CR5_R1MODE
/* set power boost mode for sys clock greater than 80MHz */
if (sys_clock_hw_cycles_per_sec() >= MHZ(80)) {
LL_PWR_EnableRange1BoostMode();
}
#endif /* PWR_CR5_R1MODE */
LL_RCC_PLL_ConfigDomain_SYS(get_pll_source(),
pllm(STM32_PLL_M_DIVISOR),
STM32_PLL_N_MULTIPLIER,
pllr(STM32_PLL_R_DIVISOR));
LL_RCC_PLL_EnableDomain_SYS();
}
#endif /* defined(STM32_PLL_ENABLED) */
/**
* @brief Activate default clocks
*/
void config_enable_default_clocks(void)
{
#ifdef LL_APB1_GRP1_PERIPH_PWR
/* Enable the power interface clock */
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
#endif
#if defined(CONFIG_SOC_SERIES_STM32WBX)
/* HW semaphore Clock enable */
LL_AHB3_GRP1_EnableClock(LL_AHB3_GRP1_PERIPH_HSEM);
#endif
}
``` | /content/code_sandbox/drivers/clock_control/clock_stm32l4_l5_wb_wl.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 656 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_RENESAS_RENESAS_CPG_MSSR_H_
#define ZEPHYR_DRIVERS_RENESAS_RENESAS_CPG_MSSR_H_
#include <zephyr/spinlock.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/device_mmio.h>
#define CPG_NUM_DOMAINS 2
struct cpg_clk_info_table {
uint32_t domain;
uint32_t module;
mem_addr_t offset;
uint32_t parent_id;
int64_t in_freq;
int64_t out_freq;
/* TODO: add setting of this field and add function for getting status */
enum clock_control_status status;
struct cpg_clk_info_table *parent;
struct cpg_clk_info_table *children_list;
struct cpg_clk_info_table *next_sibling;
};
struct rcar_cpg_mssr_data {
DEVICE_MMIO_RAM; /* Must be first */
struct cpg_clk_info_table *clk_info_table[CPG_NUM_DOMAINS];
const uint32_t clk_info_table_size[CPG_NUM_DOMAINS];
struct k_spinlock lock;
uint32_t (*get_div_helper)(uint32_t reg, uint32_t module);
int (*set_rate_helper)(uint32_t module, uint32_t *div, uint32_t *div_mask);
};
#define RCAR_CPG_NONE -1
#define RCAR_CPG_KHZ(khz) ((khz) * 1000U)
#define RCAR_CPG_MHZ(mhz) (RCAR_CPG_KHZ(mhz) * 1000U)
#define RCAR_CORE_CLK_INFO_ITEM(id, off, par_id, in_frq) \
{ \
.domain = CPG_CORE, \
.module = id, \
.offset = off, \
.parent_id = par_id, \
.in_freq = in_frq, \
.out_freq = RCAR_CPG_NONE, \
.status = CLOCK_CONTROL_STATUS_UNKNOWN, \
.parent = NULL, \
.children_list = NULL, \
.next_sibling = NULL, \
}
#define RCAR_MOD_CLK_INFO_ITEM(id, par_id) \
{ \
.domain = CPG_MOD, \
.module = id, \
.offset = RCAR_CPG_NONE, \
.parent_id = par_id, \
.in_freq = RCAR_CPG_NONE, \
.out_freq = RCAR_CPG_NONE, \
.status = CLOCK_CONTROL_STATUS_UNKNOWN, \
.parent = NULL, \
.children_list = NULL, \
.next_sibling = NULL, \
}
#ifdef CONFIG_SOC_SERIES_RCAR_GEN3
/* Software Reset Clearing Register offsets */
#define SRSTCLR(i) (0x940 + (i) * 4)
/* CPG write protect offset */
#define CPGWPR 0x900
/* Realtime Module Stop Control Register offsets */
static const uint16_t mstpcr[] = {
0x110, 0x114, 0x118, 0x11c,
0x120, 0x124, 0x128, 0x12c,
0x980, 0x984, 0x988, 0x98c,
};
/* Software Reset Register offsets */
static const uint16_t srcr[] = {
0x0A0, 0x0A8, 0x0B0, 0x0B8,
0x0BC, 0x0C4, 0x1C8, 0x1CC,
0x920, 0x924, 0x928, 0x92C,
};
#elif defined(CONFIG_SOC_SERIES_RCAR_GEN4)
/* Software Reset Clearing Register offsets */
#define SRSTCLR(i) (0x2C80 + (i) * 4)
/* CPG write protect offset */
#define CPGWPR 0x0
/* Realtime Module Stop Control Register offsets */
static const uint16_t mstpcr[] = {
0x2D00, 0x2D04, 0x2D08, 0x2D0C,
0x2D10, 0x2D14, 0x2D18, 0x2D1C,
0x2D20, 0x2D24, 0x2D28, 0x2D2C,
0x2D30, 0x2D34, 0x2D38, 0x2D3C,
0x2D40, 0x2D44, 0x2D48, 0x2D4C,
0x2D50, 0x2D54, 0x2D58, 0x2D5C,
0x2D60, 0x2D64, 0x2D68, 0x2D6C,
};
/* Software Reset Register offsets */
static const uint16_t srcr[] = {
0x2C00, 0x2C04, 0x2C08, 0x2C0C,
0x2C10, 0x2C14, 0x2C18, 0x2C1C,
0x2C20, 0x2C24, 0x2C28, 0x2C2C,
0x2C30, 0x2C34, 0x2C38, 0x2C3C,
0x2C40, 0x2C44, 0x2C48, 0x2C4C,
0x2C50, 0x2C54, 0x2C58, 0x2C5C,
0x2C60, 0x2C64, 0x2C68, 0x2C6C,
};
#endif /* CONFIG_SOC_SERIES_RCAR_GEN3 */
void rcar_cpg_write(uint32_t base_address, uint32_t reg, uint32_t val);
int rcar_cpg_mstp_clock_endisable(uint32_t base_address, uint32_t module, bool enable);
struct cpg_clk_info_table *rcar_cpg_find_clk_info_by_module_id(const struct device *dev,
uint32_t domain,
uint32_t id);
void rcar_cpg_build_clock_relationship(const struct device *dev);
void rcar_cpg_update_all_in_out_freq(const struct device *dev);
int rcar_cpg_get_rate(const struct device *dev, clock_control_subsys_t sys, uint32_t *rate);
int rcar_cpg_set_rate(const struct device *dev, clock_control_subsys_t sys,
clock_control_subsys_rate_t rate);
#endif /* ZEPHYR_DRIVERS_RENESAS_RENESAS_CPG_MSSR_H_ */
``` | /content/code_sandbox/drivers/clock_control/clock_control_renesas_cpg_mssr.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,576 |
```c
/*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_pwr.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_system.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "stm32_hsem.h"
/* Macros to fill up prescaler values */
#define fn_ahb_prescaler(v) LL_RCC_SYSCLK_DIV_ ## v
#define ahb_prescaler(v) fn_ahb_prescaler(v)
#define fn_ahb5_prescaler(v) LL_RCC_AHB5_DIV_ ## v
#define ahb5_prescaler(v) fn_ahb5_prescaler(v)
#define fn_apb1_prescaler(v) LL_RCC_APB1_DIV_ ## v
#define apb1_prescaler(v) fn_apb1_prescaler(v)
#define fn_apb2_prescaler(v) LL_RCC_APB2_DIV_ ## v
#define apb2_prescaler(v) fn_apb2_prescaler(v)
#define fn_apb7_prescaler(v) LL_RCC_APB7_DIV_ ## v
#define apb7_prescaler(v) fn_apb7_prescaler(v)
#define RCC_CALC_FLASH_FREQ __LL_RCC_CALC_HCLK_FREQ
#define GET_CURRENT_FLASH_PRESCALER LL_RCC_GetAHBPrescaler
static uint32_t get_bus_clock(uint32_t clock, uint32_t prescaler)
{
return clock / prescaler;
}
/** @brief Verifies clock is part of active clock configuration */
__unused
static int enabled_clock(uint32_t src_clk)
{
if ((src_clk == STM32_SRC_SYSCLK) ||
((src_clk == STM32_SRC_HSE) && IS_ENABLED(STM32_HSE_ENABLED)) ||
((src_clk == STM32_SRC_HSI16) && IS_ENABLED(STM32_HSI_ENABLED)) ||
((src_clk == STM32_SRC_LSE) && IS_ENABLED(STM32_LSE_ENABLED)) ||
((src_clk == STM32_SRC_LSI) && IS_ENABLED(STM32_LSI_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_P) && IS_ENABLED(STM32_PLL_P_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_Q) && IS_ENABLED(STM32_PLL_Q_ENABLED)) ||
((src_clk == STM32_SRC_PLL1_R) && IS_ENABLED(STM32_PLL_R_ENABLED))) {
return 0;
}
return -ENOTSUP;
}
static inline int stm32_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
volatile int temp;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
/* Delay after enabling the clock, to allow it to become active */
temp = sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus);
UNUSED(temp);
return 0;
}
static inline int stm32_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == 0) {
/* Attempt to toggle a wrong periph clock bit */
return -ENOTSUP;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus,
pclken->enr);
return 0;
}
static inline int stm32_clock_control_configure(const struct device *dev,
clock_control_subsys_t sub_system,
void *data)
{
#if defined(STM32_SRC_CLOCK_MIN)
/* At least one alt src clock available */
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
int err;
ARG_UNUSED(dev);
ARG_UNUSED(data);
err = enabled_clock(pclken->bus);
if (err < 0) {
/* Attempt to configure a src clock not available or not valid */
return err;
}
sys_clear_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_MASK_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
sys_set_bits(DT_REG_ADDR(DT_NODELABEL(rcc)) + STM32_CLOCK_REG_GET(pclken->enr),
STM32_CLOCK_VAL_GET(pclken->enr) << STM32_CLOCK_SHIFT_GET(pclken->enr));
return 0;
#else
/* No src clock available: Not supported */
return -ENOTSUP;
#endif
}
__unused
static uint32_t get_pllsrc_frequency(void)
{
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return STM32_HSI_FREQ;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return STM32_HSE_FREQ;
}
__ASSERT(0, "No PLL Source configured");
return 0;
}
__unused
static uint32_t get_pllsrc(void)
{
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return LL_RCC_PLL1SOURCE_HSI;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return LL_RCC_PLL1SOURCE_HSE;
}
__ASSERT(0, "No PLL Source configured");
return 0;
}
static int stm32_clock_control_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)(sub_system);
/*
* Get AHB Clock (= SystemCoreClock = SYSCLK/prescaler)
* SystemCoreClock is preferred to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
* since it will be updated after clock configuration and hence
* more likely to contain actual clock speed
*/
uint32_t ahb_clock = SystemCoreClock;
uint32_t apb1_clock = get_bus_clock(ahb_clock, STM32_APB1_PRESCALER);
uint32_t apb2_clock = get_bus_clock(ahb_clock, STM32_APB2_PRESCALER);
uint32_t apb7_clock = get_bus_clock(ahb_clock, STM32_APB7_PRESCALER);
uint32_t ahb5_clock;
ARG_UNUSED(dev);
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
/* PLL is the SYSCLK source, use 'ahb5-prescaler' */
ahb5_clock = get_bus_clock(ahb_clock * STM32_AHB_PRESCALER,
STM32_AHB5_PRESCALER);
} else {
/* PLL is not the SYSCLK source, use 'ahb5-div'(if set) */
if (IS_ENABLED(STM32_AHB5_DIV)) {
ahb5_clock = ahb_clock * STM32_AHB_PRESCALER / 2;
} else {
ahb5_clock = ahb_clock * STM32_AHB_PRESCALER;
}
}
__ASSERT(ahb5_clock <= MHZ(32), "AHB5 clock frequency exceeds 32 MHz");
switch (pclken->bus) {
case STM32_CLOCK_BUS_AHB1:
case STM32_CLOCK_BUS_AHB2:
case STM32_CLOCK_BUS_AHB4:
*rate = ahb_clock;
break;
case STM32_CLOCK_BUS_AHB5:
*rate = ahb5_clock;
break;
case STM32_CLOCK_BUS_APB1:
case STM32_CLOCK_BUS_APB1_2:
*rate = apb1_clock;
break;
case STM32_CLOCK_BUS_APB2:
*rate = apb2_clock;
break;
case STM32_CLOCK_BUS_APB7:
*rate = apb7_clock;
break;
case STM32_SRC_SYSCLK:
*rate = SystemCoreClock * STM32_CORE_PRESCALER;
break;
#if defined(STM32_PLL_ENABLED)
case STM32_SRC_PLL1_P:
*rate = __LL_RCC_CALC_PLL1PCLK_FREQ(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_P_DIVISOR);
break;
case STM32_SRC_PLL1_Q:
*rate = __LL_RCC_CALC_PLL1QCLK_FREQ(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_Q_DIVISOR);
break;
case STM32_SRC_PLL1_R:
*rate = __LL_RCC_CALC_PLL1RCLK_FREQ(get_pllsrc_frequency(),
STM32_PLL_M_DIVISOR,
STM32_PLL_N_MULTIPLIER,
STM32_PLL_R_DIVISOR);
break;
#endif /* STM32_PLL_ENABLED */
#if defined(STM32_LSE_ENABLED)
case STM32_SRC_LSE:
*rate = STM32_LSE_FREQ;
break;
#endif
#if defined(STM32_LSI_ENABLED)
case STM32_SRC_LSI:
*rate = STM32_LSI_FREQ;
break;
#endif
#if defined(STM32_HSI_ENABLED)
case STM32_SRC_HSI16:
*rate = STM32_HSI_FREQ;
break;
#endif
#if defined(STM32_HSE_ENABLED)
case STM32_SRC_HSE:
if (IS_ENABLED(STM32_HSE_DIV2)) {
*rate = STM32_HSE_FREQ / 2;
} else {
*rate = STM32_HSE_FREQ;
}
break;
#endif
default:
return -ENOTSUP;
}
return 0;
}
static enum clock_control_status stm32_clock_control_get_status(const struct device *dev,
clock_control_subsys_t sub_system)
{
struct stm32_pclken *pclken = (struct stm32_pclken *)sub_system;
ARG_UNUSED(dev);
if (IN_RANGE(pclken->bus, STM32_PERIPH_BUS_MIN, STM32_PERIPH_BUS_MAX) == true) {
/* Gated clocks */
if ((sys_read32(DT_REG_ADDR(DT_NODELABEL(rcc)) + pclken->bus) & pclken->enr)
== pclken->enr) {
return CLOCK_CONTROL_STATUS_ON;
} else {
return CLOCK_CONTROL_STATUS_OFF;
}
} else {
/* Domain clock sources */
if (enabled_clock(pclken->bus) == 0) {
return CLOCK_CONTROL_STATUS_ON;
} else {
return CLOCK_CONTROL_STATUS_OFF;
}
}
}
static const struct clock_control_driver_api stm32_clock_control_api = {
.on = stm32_clock_control_on,
.off = stm32_clock_control_off,
.get_rate = stm32_clock_control_get_subsys_rate,
.get_status = stm32_clock_control_get_status,
.configure = stm32_clock_control_configure,
};
__unused
static int get_vco_input_range(uint32_t m_div, uint32_t *range)
{
uint32_t vco_freq;
vco_freq = get_pllsrc_frequency() / m_div;
if (MHZ(4) <= vco_freq && vco_freq <= MHZ(8)) {
*range = LL_RCC_PLLINPUTRANGE_4_8;
} else if (MHZ(8) < vco_freq && vco_freq <= MHZ(16)) {
*range = LL_RCC_PLLINPUTRANGE_8_16;
} else {
return -ERANGE;
}
return 0;
}
static void set_regu_voltage(uint32_t hclk_freq)
{
if (hclk_freq <= MHZ(16)) {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE2);
} else {
LL_PWR_SetRegulVoltageScaling(LL_PWR_REGU_VOLTAGE_SCALE1);
}
while (LL_PWR_IsActiveFlag_VOS() == 0) {
}
}
/*
* Unconditionally switch the system clock source to HSI.
*/
__unused
static void stm32_clock_switch_to_hsi(void)
{
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
/* Set HSI as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSI);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSI) {
}
/* Erratum 2.2.4: Spurious deactivation of HSE when HSI is selected as
* system clock source
* Re-enable HSE clock if required after switch source to HSI
*/
if (IS_ENABLED(STM32_HSE_ENABLED)) {
if (IS_ENABLED(STM32_HSE_DIV2)) {
LL_RCC_HSE_EnablePrescaler();
}
/* Enable HSE */
LL_RCC_HSE_Enable();
while (LL_RCC_HSE_IsReady() != 1) {
/* Wait for HSE ready */
}
}
}
__unused
static int set_up_plls(void)
{
#if defined(STM32_PLL_ENABLED)
int r;
uint32_t vco_input_range;
LL_RCC_PLL1_Disable();
/* Configure PLL source */
/* Can be HSE, HSI */
if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_HSE);
} else if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
/* Main PLL configuration and activation */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_HSI);
} else {
return -ENOTSUP;
}
r = get_vco_input_range(STM32_PLL_M_DIVISOR, &vco_input_range);
if (r < 0) {
return r;
}
LL_RCC_PLL1_SetDivider(STM32_PLL_M_DIVISOR);
LL_RCC_PLL1_SetVCOInputRange(vco_input_range);
LL_RCC_PLL1_SetN(STM32_PLL_N_MULTIPLIER);
LL_RCC_PLL1FRACN_Disable();
if (IS_ENABLED(STM32_PLL_P_ENABLED)) {
LL_RCC_PLL1_SetP(STM32_PLL_P_DIVISOR);
LL_RCC_PLL1_EnableDomain_PLL1P();
}
if (IS_ENABLED(STM32_PLL_Q_ENABLED)) {
LL_RCC_PLL1_SetQ(STM32_PLL_Q_DIVISOR);
LL_RCC_PLL1_EnableDomain_PLL1Q();
}
if (IS_ENABLED(STM32_PLL_R_ENABLED)) {
LL_RCC_PLL1_SetR(STM32_PLL_R_DIVISOR);
LL_RCC_PLL1_EnableDomain_PLL1R();
}
/* Enable PLL */
LL_RCC_PLL1_Enable();
while (LL_RCC_PLL1_IsReady() != 1U) {
/* Wait for PLL ready */
}
#else
/* Init PLL source to None */
LL_RCC_PLL1_SetMainSource(LL_RCC_PLL1SOURCE_NONE);
#endif /* STM32_PLL_ENABLED */
return 0;
}
static void set_up_fixed_clock_sources(void)
{
if (IS_ENABLED(STM32_HSE_ENABLED)) {
if (IS_ENABLED(STM32_HSE_DIV2)) {
LL_RCC_HSE_EnablePrescaler();
}
/* Enable HSE */
LL_RCC_HSE_Enable();
while (LL_RCC_HSE_IsReady() != 1) {
/* Wait for HSE ready */
}
}
if (IS_ENABLED(STM32_HSI_ENABLED)) {
/* Enable HSI if not enabled */
if (LL_RCC_HSI_IsReady() != 1) {
/* Enable HSI */
LL_RCC_HSI_Enable();
while (LL_RCC_HSI_IsReady() != 1) {
/* Wait for HSI ready */
}
}
}
if (IS_ENABLED(STM32_LSI_ENABLED)) {
/* LSI belongs to the back-up domain, enable access.*/
/* Set the DBP bit in the Power control register 1 (PWR_CR1) */
LL_PWR_EnableBkUpAccess();
while (!LL_PWR_IsEnabledBkUpAccess()) {
/* Wait for Backup domain access */
}
LL_RCC_LSI1_Enable();
while (LL_RCC_LSI1_IsReady() != 1) {
}
LL_PWR_DisableBkUpAccess();
}
if (IS_ENABLED(STM32_LSE_ENABLED)) {
/* LSE belongs to the back-up domain, enable access.*/
/* Set the DBP bit in the Power control register 1 (PWR_CR1) */
LL_PWR_EnableBkUpAccess();
while (!LL_PWR_IsEnabledBkUpAccess()) {
/* Wait for Backup domain access */
}
/* Configure driving capability */
LL_RCC_LSE_SetDriveCapability(STM32_LSE_DRIVING << RCC_BDCR1_LSEDRV_Pos);
/* Enable LSE Oscillator (32.768 kHz) */
LL_RCC_LSE_Enable();
while (!LL_RCC_LSE_IsReady()) {
/* Wait for LSE ready */
}
/* Enable LSESYS additionally */
LL_RCC_LSE_EnablePropagation();
/* Wait till LSESYS is ready */
while (!LL_RCC_LSE_IsPropagationReady()) {
}
}
}
/**
* @brief Initialize clocks for the stm32
*
* This routine is called to enable and configure the clocks and PLL
* of the soc on the board. It depends on the board definition.
* This function is called on the startup and also to restore the config
* when exiting for low power mode.
*
* @param dev clock device struct
*
* @return 0
*/
int stm32_clock_control_init(const struct device *dev)
{
uint32_t old_flash_freq;
int r;
ARG_UNUSED(dev);
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL) &&
(LL_RCC_GetSysClkSource() == LL_RCC_SYS_CLKSOURCE_STATUS_PLL1R)) {
/* In case of chainloaded application, it may happen that PLL
* was already configured as sysclk src by bootloader.
* Don't test other cases as there are multiple options but
* they will be handled smoothly by the function.
*/
SystemCoreClock = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
return 0;
}
old_flash_freq = RCC_CALC_FLASH_FREQ(HAL_RCC_GetSysClockFreq(),
GET_CURRENT_FLASH_PRESCALER());
/* Set up individual enabled clocks */
set_up_fixed_clock_sources();
/* Set voltage regulator to comply with targeted system frequency */
set_regu_voltage(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
/* If required, apply max step freq for Sysclock w/ PLL input */
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
LL_RCC_PLL1_SetPLL1RCLKDivisionStep(LL_RCC_PLL1RCLK_2_STEP_DIV);
/* Send 2 pulses on CLKPRE like it is done in STM32Cube HAL */
LL_RCC_PLL1_DisablePLL1RCLKDivision();
LL_RCC_PLL1_EnablePLL1RCLKDivision();
LL_RCC_PLL1_DisablePLL1RCLKDivision();
LL_RCC_PLL1_EnablePLL1RCLKDivision();
}
/* Set up PLLs */
r = set_up_plls();
if (r < 0) {
return r;
}
/* If freq increases, set flash latency before any clock setting */
if (old_flash_freq < CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
LL_SetFlashLatency(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
}
LL_RCC_SetAHBPrescaler(ahb_prescaler(STM32_CORE_PRESCALER));
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
/* PLL is the SYSCLK source, use 'ahb5-prescaler' */
LL_RCC_SetAHB5Prescaler(ahb5_prescaler(STM32_AHB5_PRESCALER));
} else {
/* PLL is not the SYSCLK source, use 'ahb5-div'(if set) */
if (IS_ENABLED(STM32_AHB5_DIV)) {
LL_RCC_SetAHB5Divider(LL_RCC_AHB5_DIVIDER_2);
} else {
LL_RCC_SetAHB5Divider(LL_RCC_AHB5_DIVIDER_1);
}
}
if (IS_ENABLED(STM32_SYSCLK_SRC_PLL)) {
/* Set PLL as System Clock Source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_PLL1R);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_PLL1R) {
}
LL_RCC_PLL1_DisablePLL1RCLKDivision();
while (LL_RCC_PLL1_IsPLL1RCLKDivisionReady() == 0) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSE)) {
/* Set HSE as SYSCLCK source */
LL_RCC_SetSysClkSource(LL_RCC_SYS_CLKSOURCE_HSE);
while (LL_RCC_GetSysClkSource() != LL_RCC_SYS_CLKSOURCE_STATUS_HSE) {
}
} else if (IS_ENABLED(STM32_SYSCLK_SRC_HSI)) {
stm32_clock_switch_to_hsi();
}
/* If freq not increased, set flash latency after all clock setting */
if (old_flash_freq >= CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) {
LL_SetFlashLatency(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
}
/* Set voltage regulator to comply with targeted system frequency */
set_regu_voltage(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
SystemCoreClock = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
/* Set bus prescalers prescaler */
LL_RCC_SetAPB1Prescaler(apb1_prescaler(STM32_APB1_PRESCALER));
LL_RCC_SetAPB2Prescaler(apb2_prescaler(STM32_APB2_PRESCALER));
LL_RCC_SetAPB7Prescaler(apb7_prescaler(STM32_APB7_PRESCALER));
return 0;
}
/**
* @brief RCC device, note that priority is intentionally set to 1 so
* that the device init runs just after SOC init
*/
DEVICE_DT_DEFINE(DT_NODELABEL(rcc),
stm32_clock_control_init,
NULL,
NULL, NULL,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&stm32_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_ll_wba.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,982 |
```unknown
# MCUXpresso SDK CCM
config CLOCK_CONTROL_MCUX_CCM
bool "MCUX CCM driver"
default y
depends on DT_HAS_NXP_IMX_CCM_ENABLED
help
Enable support for mcux ccm driver.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.mcux_ccm | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 53 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_lpc11u6x_syscon
#include <zephyr/devicetree.h>
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/drivers/clock_control/lpc11u6x_clock_control.h>
#include "clock_control_lpc11u6x.h"
static void syscon_power_up(struct lpc11u6x_syscon_regs *syscon,
uint32_t bit, bool enable)
{
if (enable) {
syscon->pd_run_cfg = (syscon->pd_run_cfg & ~bit)
| LPC11U6X_PDRUNCFG_MASK;
} else {
syscon->pd_run_cfg = syscon->pd_run_cfg | bit
| LPC11U6X_PDRUNCFG_MASK;
}
}
static void syscon_set_pll_src(struct lpc11u6x_syscon_regs *syscon,
uint32_t src)
{
syscon->sys_pll_clk_sel = src;
syscon->sys_pll_clk_uen = 0;
syscon->sys_pll_clk_uen = 1;
}
static void set_flash_access_time(uint32_t nr_cycles)
{
uint32_t *reg = (uint32_t *) LPC11U6X_FLASH_TIMING_REG;
*reg = (*reg & (~LPC11U6X_FLASH_TIMING_MASK)) | nr_cycles;
}
static void syscon_setup_pll(struct lpc11u6x_syscon_regs *syscon,
uint32_t msel, uint32_t psel)
{
uint32_t val = msel & LPC11U6X_SYS_PLL_CTRL_MSEL_MASK;
val |= (psel & LPC11U6X_SYS_PLL_CTRL_PSEL_MASK) <<
LPC11U6X_SYS_PLL_CTRL_PSEL_SHIFT;
syscon->sys_pll_ctrl = val;
}
static bool syscon_pll_locked(struct lpc11u6x_syscon_regs *syscon)
{
return (syscon->sys_pll_stat & 0x1) != 0;
}
static void syscon_set_main_clock_source(struct lpc11u6x_syscon_regs *syscon,
uint32_t src)
{
syscon->main_clk_sel = src;
syscon->main_clk_uen = 0;
syscon->main_clk_uen = 1;
}
static void syscon_ahb_clock_enable(struct lpc11u6x_syscon_regs *syscon,
uint32_t mask, bool enable)
{
if (enable) {
syscon->sys_ahb_clk_ctrl |= mask;
} else {
syscon->sys_ahb_clk_ctrl &= ~mask;
}
}
static void syscon_peripheral_reset(struct lpc11u6x_syscon_regs *syscon,
uint32_t mask, bool reset)
{
if (reset) {
syscon->p_reset_ctrl &= ~mask;
} else {
syscon->p_reset_ctrl |= mask;
}
}
static void syscon_frg_init(struct lpc11u6x_syscon_regs *syscon)
{
uint32_t div;
div = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / LPC11U6X_USART_CLOCK_RATE;
if (!div) {
div = 1;
}
syscon->frg_clk_div = div;
syscon_peripheral_reset(syscon, LPC11U6X_PRESET_CTRL_FRG, false);
syscon->uart_frg_div = 0xFF;
syscon->uart_frg_mult = ((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / div)
* 256) / LPC11U6X_USART_CLOCK_RATE;
}
static void syscon_frg_deinit(struct lpc11u6x_syscon_regs *syscon)
{
syscon->uart_frg_div = 0x0;
syscon_peripheral_reset(syscon, LPC11U6X_PRESET_CTRL_FRG, true);
}
static int lpc11u6x_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
const struct lpc11u6x_syscon_config *cfg = dev->config;
struct lpc11u6x_syscon_data *data = dev->data;
uint32_t clk_mask = 0, reset_mask = 0;
int ret = 0, init_frg = 0;
k_mutex_lock(&data->mutex, K_FOREVER);
switch ((int) sub_system) {
case LPC11U6X_CLOCK_I2C0:
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_I2C0;
reset_mask = LPC11U6X_PRESET_CTRL_I2C0;
break;
case LPC11U6X_CLOCK_I2C1:
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_I2C1;
reset_mask = LPC11U6X_PRESET_CTRL_I2C1;
break;
case LPC11U6X_CLOCK_GPIO:
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_GPIO |
LPC11U6X_SYS_AHB_CLK_CTRL_PINT;
break;
case LPC11U6X_CLOCK_USART0:
cfg->syscon->usart0_clk_div = 1;
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART0;
break;
case LPC11U6X_CLOCK_USART1:
if (!data->frg_in_use++) {
init_frg = 1;
}
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART1;
reset_mask = LPC11U6X_PRESET_CTRL_USART1;
break;
case LPC11U6X_CLOCK_USART2:
if (!data->frg_in_use++) {
init_frg = 1;
}
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART2;
reset_mask = LPC11U6X_PRESET_CTRL_USART2;
break;
case LPC11U6X_CLOCK_USART3:
if (!data->frg_in_use++) {
init_frg = 1;
}
data->usart34_in_use++;
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART3_4;
reset_mask = LPC11U6X_PRESET_CTRL_USART3;
break;
case LPC11U6X_CLOCK_USART4:
if (!data->frg_in_use++) {
init_frg = 1;
}
data->usart34_in_use++;
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART3_4;
reset_mask = LPC11U6X_PRESET_CTRL_USART4;
break;
default:
k_mutex_unlock(&data->mutex);
return -EINVAL;
}
syscon_ahb_clock_enable(cfg->syscon, clk_mask, true);
if (init_frg) {
syscon_frg_init(cfg->syscon);
}
syscon_peripheral_reset(cfg->syscon, reset_mask, false);
k_mutex_unlock(&data->mutex);
return ret;
}
static int lpc11u6x_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
const struct lpc11u6x_syscon_config *cfg = dev->config;
struct lpc11u6x_syscon_data *data = dev->data;
uint32_t clk_mask = 0, reset_mask = 0;
int ret = 0, deinit_frg = 0;
k_mutex_lock(&data->mutex, K_FOREVER);
switch ((int) sub_system) {
case LPC11U6X_CLOCK_I2C0:
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_I2C0;
reset_mask = LPC11U6X_PRESET_CTRL_I2C0;
break;
case LPC11U6X_CLOCK_I2C1:
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_I2C1;
reset_mask = LPC11U6X_PRESET_CTRL_I2C1;
break;
case LPC11U6X_CLOCK_GPIO:
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_GPIO |
LPC11U6X_SYS_AHB_CLK_CTRL_PINT;
break;
case LPC11U6X_CLOCK_USART0:
cfg->syscon->usart0_clk_div = 0;
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART0;
break;
case LPC11U6X_CLOCK_USART1:
if (!(--data->frg_in_use)) {
deinit_frg = 1;
}
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART1;
reset_mask = LPC11U6X_PRESET_CTRL_USART1;
break;
case LPC11U6X_CLOCK_USART2:
if (!(--data->frg_in_use)) {
deinit_frg = 1;
}
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART2;
reset_mask = LPC11U6X_PRESET_CTRL_USART2;
break;
case LPC11U6X_CLOCK_USART3:
if (!(--data->frg_in_use)) {
deinit_frg = 1;
}
if (!(--data->usart34_in_use)) {
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART3_4;
}
reset_mask = LPC11U6X_PRESET_CTRL_USART3;
break;
case LPC11U6X_CLOCK_USART4:
if (!(--data->frg_in_use)) {
deinit_frg = 1;
}
if (!(--data->usart34_in_use)) {
clk_mask = LPC11U6X_SYS_AHB_CLK_CTRL_USART3_4;
}
reset_mask = LPC11U6X_PRESET_CTRL_USART4;
break;
default:
k_mutex_unlock(&data->mutex);
return -EINVAL;
}
syscon_ahb_clock_enable(cfg->syscon, clk_mask, false);
if (deinit_frg) {
syscon_frg_deinit(cfg->syscon);
}
syscon_peripheral_reset(cfg->syscon, reset_mask, true);
k_mutex_unlock(&data->mutex);
return ret;
}
static int lpc11u6x_clock_control_get_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
switch ((int) sub_system) {
case LPC11U6X_CLOCK_I2C0:
case LPC11U6X_CLOCK_I2C1:
case LPC11U6X_CLOCK_GPIO:
case LPC11U6X_CLOCK_USART0:
*rate = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
break;
case LPC11U6X_CLOCK_USART1:
case LPC11U6X_CLOCK_USART2:
case LPC11U6X_CLOCK_USART3:
case LPC11U6X_CLOCK_USART4:
*rate = LPC11U6X_USART_CLOCK_RATE;
break;
default:
return -EINVAL;
}
return 0;
}
static int lpc11u6x_syscon_init(const struct device *dev)
{
const struct lpc11u6x_syscon_config *cfg = dev->config;
struct lpc11u6x_syscon_data *data = dev->data;
uint32_t val;
k_mutex_init(&data->mutex);
data->frg_in_use = 0;
data->usart34_in_use = 0;
/* Enable SRAM1 and USB ram if needed */
val = 0;
#ifdef CONFIG_CLOCK_CONTROL_LPC11U6X_ENABLE_SRAM1
val |= LPC11U6X_SYS_AHB_CLK_CTRL_SRAM1;
#endif /* CONFIG_CLOCK_CONTROL_LPC11U6X_ENABLE_SRAM1 */
#ifdef CONFIG_CLOCK_CONTROL_LPC11U6X_ENABLE_USB_RAM
val |= LPC11U6X_SYS_AHB_CLK_CTRL_USB_SRAM;
#endif /* CONFIG_CLOCK_CONTROL_LPC11U6X_ENABLE_USB_RAM */
/* Enable IOCON (I/O Control) clock. */
val |= LPC11U6X_SYS_AHB_CLK_CTRL_IOCON;
syscon_ahb_clock_enable(cfg->syscon, val, true);
/* Configure PLL output as the main clock source, with a frequency of
* 48MHz
*/
#ifdef CONFIG_CLOCK_CONTROL_LPC11U6X_PLL_SRC_SYSOSC
syscon_power_up(cfg->syscon, LPC11U6X_PDRUNCFG_SYSOSC_PD, true);
/* Wait ~500us */
for (int i = 0; i < 2500; i++) {
}
/* Configure PLL input */
syscon_set_pll_src(cfg->syscon, LPC11U6X_SYS_PLL_CLK_SEL_SYSOSC);
pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT);
#elif defined(CONFIG_CLOCK_CONTROL_LPC11U6X_PLL_SRC_IRC)
syscon_power_up(cfg->syscon, LPC11U6X_PDRUNCFG_IRC_PD, true);
syscon_set_pll_src(cfg->syscon, LPC11U6X_SYS_PLL_CLK_SEL_IRC);
#endif
/* Flash access takes 3 clock cycles for main clock frequencies
* between 40MHz and 50MHz
*/
set_flash_access_time(LPC11U6X_FLASH_TIMING_3CYCLES);
/* Shutdown PLL to change divider/mult ratios */
syscon_power_up(cfg->syscon, LPC11U6X_PDRUNCFG_PLL_PD, false);
/* Setup PLL to have 48MHz output */
syscon_setup_pll(cfg->syscon, 3, 1);
/* Power up pll and wait */
syscon_power_up(cfg->syscon, LPC11U6X_PDRUNCFG_PLL_PD, true);
while (!syscon_pll_locked(cfg->syscon)) {
}
cfg->syscon->sys_ahb_clk_div = 1;
syscon_set_main_clock_source(cfg->syscon, LPC11U6X_MAIN_CLK_SRC_PLLOUT);
return 0;
}
static const struct clock_control_driver_api lpc11u6x_clock_control_api = {
.on = lpc11u6x_clock_control_on,
.off = lpc11u6x_clock_control_off,
.get_rate = lpc11u6x_clock_control_get_rate,
};
PINCTRL_DT_INST_DEFINE(0);
static const struct lpc11u6x_syscon_config syscon_config = {
.syscon = (struct lpc11u6x_syscon_regs *) DT_INST_REG_ADDR(0),
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
};
static struct lpc11u6x_syscon_data syscon_data;
DEVICE_DT_INST_DEFINE(0,
lpc11u6x_syscon_init,
NULL,
&syscon_data, &syscon_config,
PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&lpc11u6x_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_lpc11u6x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,200 |
```c
/*
*
*/
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/clock_control_litex.h>
#include "clock_control_litex.h"
#include <zephyr/logging/log.h>
#include <zephyr/logging/log_ctrl.h>
#include <zephyr/sys/util.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <zephyr/kernel.h>
#include <soc.h>
LOG_MODULE_REGISTER(CLK_CTRL_LITEX, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
static struct litex_clk_device *ldev; /* global struct for whole driver */
static struct litex_clk_clkout *clkouts;/* clkout array for whole driver */
/* All DRP regs addresses and sizes */
static const struct litex_drp_reg drp[] = {
{DRP_ADDR_RESET, 1},
{DRP_ADDR_LOCKED, 1},
{DRP_ADDR_READ, 1},
{DRP_ADDR_WRITE, 1},
{DRP_ADDR_DRDY, 1},
{DRP_ADDR_ADR, 1},
{DRP_ADDR_DAT_W, 2},
{DRP_ADDR_DAT_R, 2},
};
struct litex_clk_regs_addr litex_clk_regs_addr_init(void)
{
struct litex_clk_regs_addr m;
uint32_t i, addr;
addr = CLKOUT0_REG1;
for (i = 0; i <= CLKOUT_MAX; i++) {
if (i == 5) {
/*
*special case because CLKOUT5 have its reg addresses
*placed lower than other CLKOUTs
*/
m.clkout[5].reg1 = CLKOUT5_REG1;
m.clkout[5].reg2 = CLKOUT5_REG2;
} else {
m.clkout[i].reg1 = addr;
addr++;
m.clkout[i].reg2 = addr;
addr++;
}
}
return m;
}
/*
* These lookup tables are taken from:
* path_to_url
*
* Author: Sam Bobrowicz
*
*/
/* MMCM loop filter lookup table */
static const uint32_t litex_clk_filter_table[] = {
0b0001011111,
0b0001010111,
0b0001111011,
0b0001011011,
0b0001101011,
0b0001110011,
0b0001110011,
0b0001110011,
0b0001110011,
0b0001001011,
0b0001001011,
0b0001001011,
0b0010110011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001010011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0001100011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010010011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011,
0b0010100011
};
/* MMCM lock detection lookup table */
static const uint64_t litex_clk_lock_table[] = {
0b0011000110111110100011111010010000000001,
0b0011000110111110100011111010010000000001,
0b0100001000111110100011111010010000000001,
0b0101101011111110100011111010010000000001,
0b0111001110111110100011111010010000000001,
0b1000110001111110100011111010010000000001,
0b1001110011111110100011111010010000000001,
0b1011010110111110100011111010010000000001,
0b1100111001111110100011111010010000000001,
0b1110011100111110100011111010010000000001,
0b1111111111111000010011111010010000000001,
0b1111111111110011100111111010010000000001,
0b1111111111101110111011111010010000000001,
0b1111111111101011110011111010010000000001,
0b1111111111101000101011111010010000000001,
0b1111111111100111000111111010010000000001,
0b1111111111100011111111111010010000000001,
0b1111111111100010011011111010010000000001,
0b1111111111100000110111111010010000000001,
0b1111111111011111010011111010010000000001,
0b1111111111011101101111111010010000000001,
0b1111111111011100001011111010010000000001,
0b1111111111011010100111111010010000000001,
0b1111111111011001000011111010010000000001,
0b1111111111011001000011111010010000000001,
0b1111111111010111011111111010010000000001,
0b1111111111010101111011111010010000000001,
0b1111111111010101111011111010010000000001,
0b1111111111010100010111111010010000000001,
0b1111111111010100010111111010010000000001,
0b1111111111010010110011111010010000000001,
0b1111111111010010110011111010010000000001,
0b1111111111010010110011111010010000000001,
0b1111111111010001001111111010010000000001,
0b1111111111010001001111111010010000000001,
0b1111111111010001001111111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001,
0b1111111111001111101011111010010000000001
};
/* End of copied code */
/* Helper function for filter lookup table */
static inline uint32_t litex_clk_lookup_filter(uint32_t glob_mul)
{
return litex_clk_filter_table[glob_mul - 1];
}
/* Helper function for lock lookup table */
static inline uint64_t litex_clk_lookup_lock(uint32_t glob_mul)
{
return litex_clk_lock_table[glob_mul - 1];
}
static inline void litex_clk_set_reg(uint32_t reg, uint32_t val)
{
litex_write(drp[reg].addr, drp[reg].size, val);
}
static inline uint32_t litex_clk_get_reg(uint32_t reg)
{
return litex_read(drp[reg].addr, drp[reg].size);
}
static inline void litex_clk_assert_reg(uint32_t reg)
{
int assert = (1 << (drp[reg].size * BITS_PER_BYTE)) - 1;
litex_clk_set_reg(reg, assert);
}
static inline void litex_clk_deassert_reg(uint32_t reg)
{
litex_clk_set_reg(reg, ZERO_REG);
}
static int litex_clk_wait(uint32_t reg)
{
uint32_t timeout;
__ASSERT(reg == DRP_LOCKED || reg == DRP_DRDY, "Unsupported register! Please provide DRP_LOCKED or DRP_DRDY");
if (reg == DRP_LOCKED) {
timeout = ldev->timeout.lock;
} else {
timeout = ldev->timeout.drdy;
}
/*Waiting for signal to assert in reg*/
while (!litex_clk_get_reg(reg) && timeout) {
timeout--;
k_sleep(K_MSEC(1));
}
if (timeout == 0) {
LOG_WRN("Timeout occured when waiting for the register: 0x%x", reg);
return -ETIME;
}
return 0;
}
/* Read value written in given internal MMCM register*/
static int litex_clk_get_DO(uint8_t clk_reg_addr, uint16_t *res)
{
int ret;
litex_clk_set_reg(DRP_ADR, clk_reg_addr);
litex_clk_assert_reg(DRP_READ);
litex_clk_deassert_reg(DRP_READ);
ret = litex_clk_wait(DRP_DRDY);
if (ret != 0) {
return ret;
}
*res = litex_clk_get_reg(DRP_DAT_R);
return 0;
}
/* Get global divider and multiplier values and update global config */
static int litex_clk_update_global_config(void)
{
int ret;
uint16_t divreg, mult2;
uint8_t low_time, high_time;
ret = litex_clk_get_DO(CLKFBOUT_REG2, &mult2);
if (ret != 0) {
return ret;
}
ret = litex_clk_get_DO(DIV_REG, &divreg);
if (ret != 0) {
return ret;
}
if (mult2 & (NO_CNT_MASK << NO_CNT_POS)) {
ldev->g_config.mul = 1;
} else {
uint16_t mult1;
ret = litex_clk_get_DO(CLKFBOUT_REG1, &mult1);
if (ret != 0) {
return ret;
}
low_time = mult1 & HL_TIME_MASK;
high_time = (mult1 >> HIGH_TIME_POS) & HL_TIME_MASK;
ldev->g_config.mul = low_time + high_time;
}
if (divreg & (NO_CNT_MASK << NO_CNT_DIVREG_POS)) {
ldev->g_config.div = 1;
} else {
low_time = divreg & HL_TIME_MASK;
high_time = (divreg >> HIGH_TIME_POS) & HL_TIME_MASK;
ldev->g_config.div = low_time + high_time;
}
return 0;
}
static uint64_t litex_clk_calc_global_frequency(uint32_t mul, uint32_t div)
{
uint64_t f;
f = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * (uint64_t)mul;
f /= div;
return f;
}
/* Calculate frequency with real global params and update global config */
static uint64_t litex_clk_get_real_global_frequency(void)
{
uint64_t f;
litex_clk_update_global_config();
f = litex_clk_calc_global_frequency(ldev->g_config.mul,
ldev->g_config.div);
ldev->g_config.freq = f;
ldev->ts_g_config.div = ldev->g_config.div;
ldev->ts_g_config.mul = ldev->g_config.mul;
ldev->ts_g_config.freq = ldev->g_config.freq;
return f;
}
/* Return dividers of given CLKOUT */
static int litex_clk_get_clkout_divider(struct litex_clk_clkout *lcko,
uint32_t *divider, uint32_t *fract_cnt)
{
struct litex_clk_regs_addr drp_addr = litex_clk_regs_addr_init();
int ret;
uint16_t div, frac;
uint8_t clkout_nr = lcko->id;
uint8_t low_time, high_time;
ret = litex_clk_get_DO(drp_addr.clkout[clkout_nr].reg1, &div);
if (ret != 0) {
return ret;
}
ret = litex_clk_get_DO(drp_addr.clkout[clkout_nr].reg2, &frac);
if (ret != 0) {
return ret;
}
low_time = div & HL_TIME_MASK;
high_time = (div >> HIGH_TIME_POS) & HL_TIME_MASK;
*divider = low_time + high_time;
*fract_cnt = (frac >> FRAC_POS) & FRAC_MASK;
return 0;
}
/* Debug functions */
#ifdef CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG
static void litex_clk_check_DO(char *reg_name, uint8_t clk_reg_addr,
uint16_t *res)
{
int ret;
ret = litex_clk_get_DO(clk_reg_addr, res);
if (ret != 0)
LOG_ERR("%s: read error: %d", reg_name, ret);
else
LOG_DBG("%s: 0x%x", reg_name, *res);
}
static void litex_clk_print_general_regs(void)
{
uint16_t power_reg, div_reg, clkfbout_reg1, clkfbout_reg2,
lock_reg1, lock_reg2, lock_reg3, filt_reg1, filt_reg2;
litex_clk_check_DO("POWER_REG", POWER_REG, &power_reg);
litex_clk_check_DO("DIV_REG", DIV_REG, &div_reg);
litex_clk_check_DO("MUL_REG1", CLKFBOUT_REG1, &clkfbout_reg1);
litex_clk_check_DO("MUL_REG2", CLKFBOUT_REG2, &clkfbout_reg2);
litex_clk_check_DO("LOCK_REG1", LOCK_REG1, &lock_reg1);
litex_clk_check_DO("LOCK_REG2", LOCK_REG2, &lock_reg2);
litex_clk_check_DO("LOCK_REG3", LOCK_REG3, &lock_reg3);
litex_clk_check_DO("FILT_REG1", FILT_REG1, &filt_reg1);
litex_clk_check_DO("FILT_REG2", FILT_REG2, &filt_reg2);
}
static void litex_clk_print_clkout_regs(uint8_t clkout, uint8_t reg1,
uint8_t reg2)
{
uint16_t clkout_reg1, clkout_reg2;
char reg_name[16];
sprintf(reg_name, "CLKOUT%u REG1", clkout);
litex_clk_check_DO(reg_name, reg1, &clkout_reg1);
sprintf(reg_name, "CLKOUT%u REG2", clkout);
litex_clk_check_DO(reg_name, reg2, &clkout_reg2);
}
static void litex_clk_print_all_regs(void)
{
struct litex_clk_regs_addr drp_addr = litex_clk_regs_addr_init();
uint32_t i;
litex_clk_print_general_regs();
for (i = 0; i < ldev->nclkout; i++) {
litex_clk_print_clkout_regs(i, drp_addr.clkout[i].reg1,
drp_addr.clkout[i].reg2);
}
}
static void litex_clk_print_params(struct litex_clk_clkout *lcko)
{
LOG_DBG("CLKOUT%d DUMP:", lcko->id);
LOG_DBG("Defaults:");
LOG_DBG("f: %u d: %u/%u p: %u",
lcko->def.freq, lcko->def.duty.num,
lcko->def.duty.den, lcko->def.phase);
LOG_DBG("Config to set:");
LOG_DBG("div: %u freq: %u duty: %u/%u phase: %d per_off: %u",
lcko->ts_config.div, lcko->ts_config.freq,
lcko->ts_config.duty.num, lcko->ts_config.duty.den,
lcko->ts_config.phase, lcko->config.period_off);
LOG_DBG("Config:");
LOG_DBG("div: %u freq: %u duty: %u/%u phase: %d per_off: %u",
lcko->config.div, lcko->config.freq,
lcko->config.duty.num, lcko->config.duty.den,
lcko->config.phase, lcko->config.period_off);
LOG_DBG("Divide group:");
LOG_DBG("e: %u ht: %u lt: %u nc: %u",
lcko->div.edge, lcko->div.high_time,
lcko->div.low_time, lcko->div.no_cnt);
LOG_DBG("Frac group:");
LOG_DBG("f: %u fen: %u fwff: %u fwfr: %u pmf: %u",
lcko->frac.frac, lcko->frac.frac_en, lcko->frac.frac_wf_f,
lcko->frac.frac_wf_r, lcko->frac.phase_mux_f);
LOG_DBG("Phase group:");
LOG_DBG("dt: %u pm: %u mx: %u",
lcko->phase.delay_time, lcko->phase.phase_mux, lcko->phase.mx);
}
static void litex_clk_print_all_params(void)
{
uint32_t c;
LOG_DBG("Global Config to set:");
LOG_DBG("freq: %llu mul: %u div: %u",
ldev->ts_g_config.freq, ldev->ts_g_config.mul,
ldev->ts_g_config.div);
LOG_DBG("Global Config:");
LOG_DBG("freq: %llu mul: %u div: %u",
ldev->g_config.freq, ldev->g_config.mul, ldev->g_config.div);
for (c = 0; c < ldev->nclkout; c++) {
litex_clk_print_params(&ldev->clkouts[c]);
}
}
#endif /* CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG */
/* Returns raw value ready to be written into MMCM */
static inline uint16_t litex_clk_calc_DI(uint16_t DO_val, uint16_t mask,
uint16_t bitset)
{
uint16_t DI_val;
DI_val = DO_val & mask;
DI_val |= bitset;
return DI_val;
}
/* Sets calculated DI value into DI DRP register */
static int litex_clk_set_DI(uint16_t DI_val)
{
int ret;
litex_clk_set_reg(DRP_DAT_W, DI_val);
litex_clk_assert_reg(DRP_WRITE);
litex_clk_deassert_reg(DRP_WRITE);
ret = litex_clk_wait(DRP_DRDY);
return ret;
}
/*
* Change register value as specified in arguments
*
* mask: preserve or zero MMCM register bits
* by selecting 1 or 0 on desired specific mask positions
* bitset: set those bits in MMCM register which are 1 in bitset
* clk_reg_addr: internal MMCM address of control register
*
*/
static int litex_clk_change_value(uint16_t mask, uint16_t bitset,
uint8_t clk_reg_addr)
{
uint16_t DO_val, DI_val;
int ret;
litex_clk_assert_reg(DRP_RESET);
ret = litex_clk_get_DO(clk_reg_addr, &DO_val);
if (ret != 0) {
return ret;
}
DI_val = litex_clk_calc_DI(DO_val, mask, bitset);
ret = litex_clk_set_DI(DI_val);
if (ret != 0) {
return ret;
}
#ifdef CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG
DI_val = litex_clk_get_reg(DRP_DAT_W);
LOG_DBG("set 0x%x under: 0x%x", DI_val, clk_reg_addr);
#endif
litex_clk_deassert_reg(DRP_DAT_W);
litex_clk_deassert_reg(DRP_RESET);
ret = litex_clk_wait(DRP_LOCKED);
return ret;
}
/*
* Set register values for given CLKOUT
*
* clkout_nr: clock output number
* mask_regX: preserve or zero MMCM register X bits
* by selecting 1 or 0 on desired specific mask positions
* bitset_regX: set those bits in MMCM register X which are 1 in bitset
*
*/
static int litex_clk_set_clock(uint8_t clkout_nr, uint16_t mask_reg1,
uint16_t bitset_reg1, uint16_t mask_reg2,
uint16_t bitset_reg2)
{
struct litex_clk_regs_addr drp_addr = litex_clk_regs_addr_init();
int ret;
if (!(mask_reg2 == FULL_REG_16 && bitset_reg2 == ZERO_REG)) {
ret = litex_clk_change_value(mask_reg2, bitset_reg2,
drp_addr.clkout[clkout_nr].reg2);
if (ret != 0) {
return ret;
}
}
if (!(mask_reg1 == FULL_REG_16 && bitset_reg1 == ZERO_REG)) {
ret = litex_clk_change_value(mask_reg1, bitset_reg1,
drp_addr.clkout[clkout_nr].reg1);
if (ret != 0) {
return ret;
}
}
return 0;
}
/* Set global divider for all CLKOUTs */
static int litex_clk_set_divreg(void)
{
int ret;
uint8_t no_cnt = 0, edge = 0, ht = 0, lt = 0,
div = ldev->ts_g_config.div;
uint16_t bitset = 0;
if (div == 1) {
no_cnt = 1;
} else {
ht = div / 2;
lt = ht;
edge = div % 2;
if (edge) {
lt += edge;
}
}
bitset = (edge << EDGE_DIVREG_POS) |
(no_cnt << NO_CNT_DIVREG_POS) |
(ht << HIGH_TIME_POS) |
(lt << LOW_TIME_POS);
ret = litex_clk_change_value(KEEP_IN_DIV, bitset, DIV_REG);
if (ret != 0) {
return ret;
}
ldev->g_config.div = div;
LOG_DBG("Global divider set to %u", div);
return 0;
}
/* Set global multiplier for all CLKOUTs */
static int litex_clk_set_mulreg(void)
{
int ret;
uint8_t no_cnt = 0, edge = 0, ht = 0, lt = 0,
mul = ldev->ts_g_config.mul;
uint16_t bitset1 = 0;
if (mul == 1) {
no_cnt = 1;
} else {
ht = mul / 2;
lt = ht;
edge = mul % 2;
if (edge) {
lt += edge;
}
}
bitset1 = (ht << HIGH_TIME_POS) |
(lt << LOW_TIME_POS);
ret = litex_clk_change_value(KEEP_IN_MUL_REG1, bitset1, CLKFBOUT_REG1);
if (ret != 0) {
return ret;
}
if (edge || no_cnt) {
uint16_t bitset2 = (edge << EDGE_POS) |
(no_cnt << NO_CNT_POS);
ret = litex_clk_change_value(KEEP_IN_MUL_REG2,
bitset2, CLKFBOUT_REG2);
if (ret != 0) {
return ret;
}
}
ldev->g_config.mul = mul;
LOG_DBG("Global multiplier set to %u", mul);
return 0;
}
static int litex_clk_set_filt(void)
{
uint16_t filt_reg;
uint32_t filt, mul;
int ret;
mul = ldev->g_config.mul;
filt = litex_clk_lookup_filter(mul);
/*
* Preparing and setting filter register values
* according to reg map form Xilinx XAPP888
*/
filt_reg = (((filt >> 9) & 0x1) << 15) |
(((filt >> 7) & 0x3) << 11) |
(((filt >> 6) & 0x1) << 8);
ret = litex_clk_change_value(FILT1_MASK, filt_reg, FILT_REG1);
if (ret != 0) {
return ret;
}
filt_reg = (((filt >> 5) & 0x1) << 15) |
(((filt >> 3) & 0x3) << 11) |
(((filt >> 1) & 0x3) << 7) |
(((filt) & 0x1) << 4);
ret = litex_clk_change_value(FILT2_MASK, filt_reg, FILT_REG2);
return ret;
}
static int litex_clk_set_lock(void)
{
uint16_t lock_reg;
uint32_t mul;
uint64_t lock;
int ret;
mul = ldev->g_config.mul;
lock = litex_clk_lookup_lock(mul);
/*
* Preparing and setting lock register values
* according to reg map form Xilinx XAPP888
*/
lock_reg = (lock >> 20) & 0x3FF;
ret = litex_clk_change_value(LOCK1_MASK, lock_reg, LOCK_REG1);
if (ret != 0) {
return ret;
}
lock_reg = (((lock >> 30) & 0x1F) << 10) |
(lock & 0x3FF);
ret = litex_clk_change_value(LOCK23_MASK, lock_reg, LOCK_REG2);
if (ret != 0) {
return ret;
}
lock_reg = (((lock >> 35) & 0x1F) << 10) |
((lock >> 10) & 0x3FF);
ret = litex_clk_change_value(LOCK23_MASK, lock_reg, LOCK_REG3);
return ret;
}
/* Set all multiplier-related regs: mul, filt and lock regs */
static int litex_clk_set_mul(void)
{
int ret;
ret = litex_clk_set_mulreg();
if (ret != 0) {
return ret;
}
ret = litex_clk_set_filt();
if (ret != 0) {
return ret;
}
ret = litex_clk_set_lock();
return ret;
}
static int litex_clk_set_both_globs(void)
{
/*
* we need to check what change first to prevent
* getting our VCO_FREQ out of possible range
*/
uint64_t vco_freq;
int ret;
/* div-first case */
vco_freq = litex_clk_calc_global_frequency(
ldev->g_config.mul,
ldev->ts_g_config.div);
if (vco_freq > ldev->vco.max || vco_freq < ldev->vco.min) {
/* div-first not safe */
vco_freq = litex_clk_calc_global_frequency(
ldev->ts_g_config.mul,
ldev->g_config.div);
if (vco_freq > ldev->vco.max || vco_freq < ldev->vco.min) {
/* mul-first not safe */
ret = litex_clk_set_divreg();
/* Ignore timeout because we expect that to happen */
if (ret != -ETIME && ret != 0) {
return ret;
} else if (ret == -ETIME) {
ldev->g_config.div = ldev->ts_g_config.div;
LOG_DBG("Global divider set to %u",
ldev->g_config.div);
}
ret = litex_clk_set_mul();
if (ret != 0) {
return ret;
}
} else {
/* mul-first safe */
ret = litex_clk_set_mul();
if (ret != 0) {
return ret;
}
ret = litex_clk_set_divreg();
if (ret != 0) {
return ret;
}
}
} else {
/* div-first safe */
ret = litex_clk_set_divreg();
if (ret != 0) {
return ret;
}
ret = litex_clk_set_mul();
if (ret != 0) {
return ret;
}
}
return 0;
}
/* Set global divider, multiplier, filt and lock values */
static int litex_clk_set_globs(void)
{
int ret;
uint8_t set_div = 0,
set_mul = 0;
set_div = ldev->ts_g_config.div != ldev->g_config.div;
set_mul = ldev->ts_g_config.mul != ldev->g_config.mul;
if (set_div || set_mul) {
if (set_div && set_mul) {
ret = litex_clk_set_both_globs();
if (ret != 0) {
return ret;
}
} else if (set_div) {
/* set divider only */
ret = litex_clk_set_divreg();
if (ret != 0) {
return ret;
}
} else {
/* set multiplier only */
ret = litex_clk_set_mul();
if (ret != 0) {
return ret;
}
}
ldev->g_config.freq = ldev->ts_g_config.freq;
}
return 0;
}
/* Round scaled value*/
static inline uint32_t litex_round(uint32_t val, uint32_t mod)
{
if (val % mod > mod / 2) {
return val / mod + 1;
}
return val / mod;
}
/*
* Duty Cycle
*/
/* Returns accurate duty ratio of given clkout*/
int litex_clk_get_duty_cycle(struct litex_clk_clkout *lcko,
struct clk_duty *duty)
{
struct litex_clk_regs_addr drp_addr = litex_clk_regs_addr_init();
int ret;
uint32_t divider;
uint16_t clkout_reg1, clkout_reg2;
uint8_t clkout_nr, high_time, edge, no_cnt, frac_en, frac_cnt;
clkout_nr = lcko->id;
/* Check if divider is off */
ret = litex_clk_get_DO(drp_addr.clkout[clkout_nr].reg2, &clkout_reg2);
if (ret != 0) {
return ret;
}
edge = (clkout_reg2 >> EDGE_POS) & EDGE_MASK;
no_cnt = (clkout_reg2 >> NO_CNT_POS) & NO_CNT_MASK;
frac_en = (clkout_reg2 >> FRAC_EN_POS) & FRAC_EN_MASK;
frac_cnt = (clkout_reg2 >> FRAC_POS) & FRAC_MASK;
/* get duty 50% when divider is off or fractional is enabled */
if (no_cnt || (frac_en && frac_cnt)) {
duty->num = 1;
duty->den = 2;
return 0;
}
ret = litex_clk_get_DO(drp_addr.clkout[clkout_nr].reg1, &clkout_reg1);
if (ret != 0) {
return ret;
}
divider = clkout_reg1 & HL_TIME_MASK;
high_time = (clkout_reg1 >> HIGH_TIME_POS) & HL_TIME_MASK;
divider += high_time;
/* Scaling to consider edge control bit */
duty->num = high_time * 10 + edge * 5;
duty->den = (divider + edge) * 10;
return 0;
}
/* Calculates duty cycle for given ratio in percent, 1% accuracy */
static inline uint8_t litex_clk_calc_duty_percent(struct clk_duty *duty)
{
uint32_t div, duty_ratio, ht;
ht = duty->num;
div = duty->den;
duty_ratio = ht * 10000 / div;
return (uint8_t)litex_round(duty_ratio, 100);
}
/* Calculate necessary values for setting duty cycle in normal mode */
static int litex_clk_calc_duty_normal(struct litex_clk_clkout *lcko,
int calc_new)
{
struct clk_duty duty;
int delta_d;
uint32_t ht_aprox, synth_duty, min_d;
uint8_t high_time_it, edge_it, high_duty,
divider = lcko->config.div;
int err;
if (calc_new) {
duty = lcko->ts_config.duty;
} else {
err = litex_clk_get_duty_cycle(lcko, &duty);
if (err != 0) {
return err;
}
}
high_duty = litex_clk_calc_duty_percent(&duty);
min_d = INT_MAX;
/* check if duty is available to set */
ht_aprox = high_duty * divider;
if (ht_aprox > ((HIGH_LOW_TIME_REG_MAX * 100) + 50) ||
((HIGH_LOW_TIME_REG_MAX * 100) + 50) <
(divider * 100) - ht_aprox) {
return -EINVAL;
}
/* to prevent high_time == 0 or low_time == 0 */
for (high_time_it = 1; high_time_it < divider; high_time_it++) {
for (edge_it = 0; edge_it < 2; edge_it++) {
synth_duty = (high_time_it * 100 + 50 * edge_it) /
divider;
delta_d = synth_duty - high_duty;
delta_d = abs(delta_d);
/* check if low_time won't be above acceptable range */
if (delta_d < min_d && (divider - high_time_it) <=
HIGH_LOW_TIME_REG_MAX) {
min_d = delta_d;
lcko->div.high_time = high_time_it;
lcko->div.low_time = divider - high_time_it;
lcko->div.edge = edge_it;
lcko->config.duty.num = high_time_it * 100 + 50
* edge_it;
lcko->config.duty.den = divider * 100;
}
}
}
/*
* Calculating values in normal mode,
* clear control bits of fractional part
*/
lcko->frac.frac_wf_f = 0;
lcko->frac.frac_wf_r = 0;
return 0;
}
/* Calculates duty high_time for given divider and ratio */
static inline int litex_clk_calc_duty_high_time(struct clk_duty *duty,
uint32_t divider)
{
uint32_t high_duty;
high_duty = litex_clk_calc_duty_percent(duty) * divider;
return litex_round(high_duty, 100);
}
/* Set duty cycle with given ratio */
static int litex_clk_set_duty_cycle(struct litex_clk_clkout *lcko,
struct clk_duty *duty)
{
int ret;
uint16_t bitset1, bitset2;
uint8_t clkout_nr = lcko->id,
*edge = &lcko->div.edge,
*high_time = &lcko->div.high_time,
high_duty = litex_clk_calc_duty_percent(duty),
*low_time = &lcko->div.low_time;
if (lcko->frac.frac == 0) {
lcko->ts_config.duty = *duty;
LOG_DBG("CLKOUT%d: setting duty: %u/%u",
lcko->id, duty->num, duty->den);
ret = litex_clk_calc_duty_normal(lcko, true);
if (ret != 0) {
LOG_ERR("CLKOUT%d: cannot set %d%% duty cycle",
clkout_nr, high_duty);
return ret;
}
} else {
LOG_ERR("CLKOUT%d: cannot set duty cycle when fractional divider enabled",
clkout_nr);
return -EACCES;
}
bitset1 = (*high_time << HIGH_TIME_POS) |
(*low_time << LOW_TIME_POS);
bitset2 = (*edge << EDGE_POS);
LOG_DBG("SET DUTY CYCLE: e:%u ht:%u lt:%u\nbitset1: 0x%x bitset2: 0x%x",
*edge, *high_time, *low_time, bitset1, bitset2);
ret = litex_clk_set_clock(clkout_nr, REG1_DUTY_MASK, bitset1,
REG2_DUTY_MASK, bitset2);
if (ret != 0) {
return ret;
}
LOG_INF("CLKOUT%d: set duty: %d%%", lcko->id,
litex_clk_calc_duty_percent(&lcko->config.duty));
return 0;
}
/*
* Phase
*/
/* Calculate necessary values for setting phase in normal mode */
static int litex_clk_calc_phase_normal(struct litex_clk_clkout *lcko)
{
uint64_t period_buff;
uint32_t post_glob_div_f, global_period, clkout_period,
*period_off = &lcko->ts_config.period_off;
uint8_t divider = lcko->config.div;
/* ps unit */
post_glob_div_f = (uint32_t)litex_clk_get_real_global_frequency();
period_buff = PICOS_IN_SEC;
period_buff /= post_glob_div_f;
global_period = (uint32_t)period_buff;
clkout_period = global_period * divider;
if (lcko->ts_config.phase != 0) {
int synth_phase, delta_p, min_p, p_o;
uint8_t delay, p_m;
*period_off = litex_round(clkout_period * (*period_off), 10000);
if (*period_off / global_period > DELAY_TIME_MAX) {
return -EINVAL;
}
min_p = INT_MAX;
p_o = *period_off;
/* Delay_time: (0-63) */
for (delay = 0; delay <= DELAY_TIME_MAX; delay++) {
/* phase_mux: (0-7) */
for (p_m = 0; p_m <= PHASE_MUX_MAX; p_m++) {
synth_phase = (delay * global_period) +
((p_m * ((global_period * 100) / 8) / 100));
delta_p = synth_phase - p_o;
delta_p = abs(delta_p);
if (delta_p < min_p) {
min_p = delta_p;
lcko->phase.phase_mux = p_m;
lcko->phase.delay_time = delay;
lcko->config.period_off = synth_phase;
}
}
}
} else {
/* Don't change phase offset*/
lcko->phase.phase_mux = 0;
lcko->phase.delay_time = 0;
}
/*
* Calculating values in normal mode,
* fractional control bits need to be zero
*/
lcko->frac.phase_mux_f = 0;
return 0;
}
/* Convert phase offset to positive lower than 360 deg. and calculate period */
static int litex_clk_prepare_phase(struct litex_clk_clkout *lcko)
{
int *phase = &lcko->ts_config.phase;
*phase %= 360;
if (*phase < 0) {
*phase += 360;
}
lcko->ts_config.period_off = ((*phase * 10000) / 360);
return 0;
}
/* Calculate necessary values for setting phase */
static int litex_clk_calc_phase(struct litex_clk_clkout *lcko)
{
litex_clk_prepare_phase(lcko);
return litex_clk_calc_phase_normal(lcko);
}
/* Returns phase-specific values of given clock output */
static int litex_clk_get_phase_data(struct litex_clk_clkout *lcko,
uint8_t *phase_mux, uint8_t *delay_time)
{
struct litex_clk_regs_addr drp_addr = litex_clk_regs_addr_init();
int ret;
uint16_t r1, r2;
uint8_t clkout_nr = lcko->id;
ret = litex_clk_get_DO(drp_addr.clkout[clkout_nr].reg1, &r1);
if (ret != 0) {
return ret;
}
ret = litex_clk_get_DO(drp_addr.clkout[clkout_nr].reg2, &r2);
if (ret != 0) {
return ret;
}
*phase_mux = (r1 >> PHASE_MUX_POS) & PHASE_MUX_MASK;
*delay_time = (r2 >> DELAY_TIME_POS) & HL_TIME_MASK;
return 0;
}
/* Returns phase of given clock output in time offset */
int litex_clk_get_phase(struct litex_clk_clkout *lcko)
{
uint64_t period_buff;
uint32_t divider = 0, fract_cnt, post_glob_div_f,
pm, global_period, clkout_period, period;
uint8_t phase_mux = 0, delay_time = 0;
int err = 0;
litex_clk_get_phase_data(lcko, &phase_mux, &delay_time);
err = litex_clk_get_clkout_divider(lcko, ÷r, &fract_cnt);
if (err != 0) {
return err;
}
post_glob_div_f = (uint32_t)litex_clk_get_real_global_frequency();
period_buff = PICOS_IN_SEC;
period_buff /= post_glob_div_f;
/* ps unit */
global_period = (uint32_t)period_buff;
clkout_period = global_period * divider;
pm = (phase_mux * global_period * 1000) / PHASE_MUX_RES_FACTOR;
pm = litex_round(pm, 1000);
period = delay_time * global_period + pm;
period = period * 1000 / clkout_period;
period = period * 360;
return litex_round(period, 1000);
}
/* Returns phase of given clock output in degrees */
int litex_clk_get_phase_deg(struct litex_clk_clkout *lcko)
{
uint64_t post_glob_div_f, buff, clkout_period;
post_glob_div_f = (uint32_t)litex_clk_get_real_global_frequency();
buff = PICOS_IN_SEC;
buff /= post_glob_div_f;
clkout_period = (uint32_t)buff;
clkout_period *= lcko->config.div;
buff = lcko->config.period_off * 1000 / clkout_period;
buff *= 360;
buff = litex_round(buff, 1000);
return (int)buff;
}
/* Sets phase given in degrees on given clock output */
int litex_clk_set_phase(struct litex_clk_clkout *lcko, int degrees)
{
int ret;
uint16_t bitset1, bitset2, reg2_mask;
uint8_t *phase_mux = &lcko->phase.phase_mux,
*delay_time = &lcko->phase.delay_time,
clkout_nr = lcko->id;
lcko->ts_config.phase = degrees;
reg2_mask = REG2_PHASE_MASK;
LOG_DBG("CLKOUT%d: setting phase: %u deg", lcko->id, degrees);
ret = litex_clk_calc_phase(lcko);
if (ret != 0) {
LOG_ERR("CLKOUT%d: phase offset %d deg is too high",
clkout_nr, degrees);
return ret;
}
bitset1 = (*phase_mux << PHASE_MUX_POS);
bitset2 = (*delay_time << DELAY_TIME_POS);
ret = litex_clk_set_clock(clkout_nr, REG1_PHASE_MASK, bitset1,
reg2_mask, bitset2);
if (ret != 0) {
return ret;
}
lcko->config.phase = litex_clk_get_phase_deg(lcko);
LOG_INF("CLKOUT%d: set phase: %d deg", lcko->id, lcko->config.phase);
LOG_DBG("SET PHASE: pm:%u dt:%u\nbitset1: 0x%x bitset2: 0x%x",
*phase_mux, *delay_time, bitset1, bitset2);
return 0;
}
/*
* Frequency
*/
/* Returns rate in Hz */
static inline uint32_t litex_clk_calc_rate(struct litex_clk_clkout *lcko)
{
uint64_t f = litex_clk_calc_global_frequency(ldev->ts_g_config.mul,
ldev->ts_g_config.div);
f /= lcko->config.div;
return (uint32_t)f;
}
/*
* Written since there is no pow() in math.h. Only for exponent
* and base above 0. Used for calculating scaling factor for
* frequency margin
*
*/
static uint32_t litex_clk_pow(uint32_t base, uint32_t exp)
{
int ret = 1;
while (exp--) {
ret *= base;
}
return ret;
}
/* Returns true when possible to set frequency with given global settings */
static int litex_clk_calc_clkout_params(struct litex_clk_clkout *lcko,
uint64_t vco_freq)
{
int delta_f;
uint64_t m, clk_freq = 0;
uint32_t d, margin = 1;
if (lcko->margin.exp) {
margin = litex_clk_pow(10, lcko->margin.exp);
}
lcko->div.no_cnt = 0;
for (d = lcko->clkout_div.min; d <= lcko->clkout_div.max; d++) {
clk_freq = vco_freq;
clk_freq /= d;
m = lcko->ts_config.freq * lcko->margin.m;
/* Scale margin according to its exponent */
if (lcko->margin.exp) {
m /= margin;
}
delta_f = clk_freq - lcko->ts_config.freq;
delta_f = abs(delta_f);
if (delta_f <= m) {
lcko->config.freq = (uint32_t)clk_freq;
if (lcko->config.div != d) {
ldev->update_clkout[lcko->id] = 1;
}
lcko->config.div = d;
/* for sake of completeness */
lcko->ts_config.div = d;
/* we are not using fractional divider */
lcko->frac.frac_en = 0;
lcko->frac.frac = 0;
if (d == 1) {
lcko->div.no_cnt = 1;
}
LOG_DBG("CLKOUT%d: freq:%u div:%u gdiv:%u gmul:%u",
lcko->id, lcko->config.freq, lcko->config.div,
ldev->ts_g_config.div, ldev->ts_g_config.mul);
return true;
}
}
return false;
}
/* Compute dividers for all active clock outputs */
static int litex_clk_calc_all_clkout_params(uint64_t vco_freq)
{
struct litex_clk_clkout *lcko;
uint32_t c;
for (c = 0; c < ldev->nclkout; c++) {
lcko = &ldev->clkouts[c];
if (!litex_clk_calc_clkout_params(lcko, vco_freq)) {
return false;
}
}
return true;
}
/* Calculate parameters for whole active part of MMCM */
static int litex_clk_calc_all_params(void)
{
uint32_t div, mul;
uint64_t vco_freq = 0;
for (div = ldev->divclk.min; div <= ldev->divclk.max; div++) {
ldev->ts_g_config.div = div;
for (mul = ldev->clkfbout.max; mul >= ldev->clkfbout.min;
mul--) {
int below, above, all_valid = true;
vco_freq = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * (uint64_t)mul;
vco_freq /= div;
below = vco_freq < (ldev->vco.min
* (1 + ldev->vco_margin));
above = vco_freq > (ldev->vco.max
* (1 - ldev->vco_margin));
if (!below && !above) {
all_valid = litex_clk_calc_all_clkout_params
(vco_freq);
if (all_valid) {
ldev->ts_g_config.mul = mul;
ldev->ts_g_config.freq = vco_freq;
LOG_DBG("GLOBAL: freq:%llu g_div:%u g_mul:%u",
ldev->ts_g_config.freq,
ldev->ts_g_config.div,
ldev->ts_g_config.mul);
return 0;
}
}
}
}
LOG_ERR("Cannot find correct settings for all clock outputs!");
return -ENOTSUP;
}
int litex_clk_check_rate_range(struct litex_clk_clkout *lcko, uint32_t rate)
{
uint64_t max, min, m;
uint32_t div, margin;
m = rate * lcko->margin.m;
if (lcko->margin.exp) {
margin = litex_clk_pow(10, lcko->margin.exp);
}
max = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * (uint64_t)ldev->clkfbout.max;
div = ldev->divclk.min * lcko->clkout_div.min;
max /= div;
max += m;
min = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * ldev->clkfbout.min;
div = ldev->divclk.max * lcko->clkout_div.max;
min /= div;
if (min < m) {
min = 0;
} else {
min -= m;
}
if ((uint64_t)rate < min || (uint64_t)rate > max) {
return -EINVAL;
}
return 0;
}
/* Returns closest available clock rate in Hz */
long litex_clk_round_rate(struct litex_clk_clkout *lcko, unsigned long rate)
{
int ret;
ret = litex_clk_check_rate_range(lcko, rate);
if (ret != 0) {
return -EINVAL;
}
lcko->ts_config.freq = rate;
ret = litex_clk_calc_all_params();
if (ret != 0) {
return ret;
}
return litex_clk_calc_rate(lcko);
}
int litex_clk_write_rate(struct litex_clk_clkout *lcko)
{
int ret;
uint16_t bitset1, bitset2;
uint8_t *divider = &lcko->config.div,
*edge = &lcko->div.edge,
*high_time = &lcko->div.high_time,
*low_time = &lcko->div.low_time,
*no_cnt = &lcko->div.no_cnt,
*frac = &lcko->frac.frac,
*frac_en = &lcko->frac.frac_en,
*frac_wf_r = &lcko->frac.frac_wf_r;
bitset1 = (*high_time << HIGH_TIME_POS) |
(*low_time << LOW_TIME_POS);
bitset2 = (*frac << FRAC_POS) |
(*frac_en << FRAC_EN_POS) |
(*frac_wf_r << FRAC_WF_R_POS) |
(*edge << EDGE_POS) |
(*no_cnt << NO_CNT_POS);
LOG_DBG("SET RATE: div:%u f:%u fwfr:%u fen:%u nc:%u e:%u ht:%u lt:%u\nbitset1: 0x%x bitset2: 0x%x",
*divider, *frac, *frac_wf_r, *frac_en,
*no_cnt, *edge, *high_time, *low_time, bitset1, bitset2);
ret = litex_clk_set_clock(lcko->id, REG1_FREQ_MASK, bitset1,
REG2_FREQ_MASK, bitset2);
if (ret != 0) {
return ret;
}
ldev->update_clkout[lcko->id] = 0;
return 0;
}
int litex_clk_update_clkouts(void)
{
struct litex_clk_clkout *lcko;
int ret;
uint8_t c;
for (c = 0; c < ldev->nclkout; c++) {
if (ldev->update_clkout[c]) {
lcko = &ldev->clkouts[c];
ret = litex_clk_calc_duty_normal(lcko, false);
if (ret != 0) {
return ret;
}
ret = litex_clk_write_rate(lcko);
if (ret != 0) {
return ret;
}
LOG_INF("CLKOUT%d: updated rate: %u to %u HZ",
lcko->id, lcko->ts_config.freq,
lcko->config.freq);
}
}
return 0;
}
/* Set closest available clock rate in Hz, parent_rate ignored */
int litex_clk_set_rate(struct litex_clk_clkout *lcko, unsigned long rate)
{
int ret;
LOG_DBG("CLKOUT%d: setting rate: %lu", lcko->id, rate);
ret = litex_clk_round_rate(lcko, rate);
if (ret < 0) {
return ret;
}
ret = litex_clk_set_globs();
if (ret != 0) {
return ret;
}
ret = litex_clk_calc_duty_normal(lcko, false);
if (ret != 0) {
return ret;
}
ret = litex_clk_write_rate(lcko);
if (ret != 0) {
return ret;
}
LOG_INF("CLKOUT%d: set rate: %u HZ", lcko->id, lcko->config.freq);
ret = litex_clk_update_clkouts();
if (ret != 0) {
return ret;
}
#ifdef CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG
litex_clk_print_all_params();
litex_clk_print_all_regs();
#endif /* CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG */
return 0;
}
/* Set default clock value from device tree for given clkout*/
static int litex_clk_set_def_clkout(int clkout_nr)
{
struct litex_clk_clkout *lcko = &ldev->clkouts[clkout_nr];
int ret;
ret = litex_clk_set_rate(lcko, lcko->def.freq);
if (ret != 0) {
return ret;
}
ret = litex_clk_set_duty_cycle(lcko, &lcko->def.duty);
if (ret != 0) {
return ret;
}
return litex_clk_set_phase(lcko, lcko->def.phase);
}
static int litex_clk_set_all_def_clkouts(void)
{
int c, ret;
for (c = 0; c < ldev->nclkout; c++) {
ret = litex_clk_set_def_clkout(c);
if (ret != 0) {
return ret;
}
}
return 0;
}
/*
* Returns parameters of given clock output
*
* clock: device structure for driver
* sub_system: pointer to struct litex_clk_clkout
* casted to clock_control_subsys with
* all clkout parameters
*/
static int litex_clk_get_subsys_rate(const struct device *clock,
clock_control_subsys_t sys, uint32_t *rate)
{
struct litex_clk_setup *setup = sys;
struct litex_clk_clkout *lcko;
lcko = &ldev->clkouts[setup->clkout_nr];
*rate = litex_clk_calc_rate(lcko);
return 0;
}
static enum clock_control_status litex_clk_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
struct litex_clk_setup *setup = sys;
struct clk_duty duty;
struct litex_clk_clkout *lcko;
int ret;
lcko = &ldev->clkouts[setup->clkout_nr];
setup->rate = litex_clk_calc_rate(lcko);
ret = litex_clk_get_duty_cycle(lcko, &duty);
if (ret != 0) {
return ret;
}
setup->duty = litex_clk_calc_duty_percent(&duty);
setup->phase = litex_clk_get_phase(lcko);
return CLOCK_CONTROL_STATUS_ON;
}
static inline int litex_clk_on(const struct device *dev, clock_control_subsys_t sys)
{
struct litex_clk_setup *setup = sys;
struct clk_duty duty;
struct litex_clk_clkout *lcko;
uint8_t duty_perc;
int ret;
lcko = &ldev->clkouts[setup->clkout_nr];
if (lcko->config.freq != setup->rate) {
ret = litex_clk_set_rate(lcko, setup->rate);
if (ret != 0) {
return ret;
}
}
if (lcko->config.phase != setup->phase) {
ret = litex_clk_set_phase(lcko, setup->phase);
if (ret != 0) {
return ret;
}
}
duty_perc = litex_clk_calc_duty_percent(&lcko->config.duty);
if (duty_perc != setup->duty) {
duty.num = setup->duty;
duty.den = 100;
ret = litex_clk_set_duty_cycle(lcko, &duty);
if (ret != 0) {
return ret;
}
}
return 0;
}
static inline int litex_clk_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
return litex_clk_change_value(ZERO_REG, ZERO_REG, POWER_REG);
}
static const struct clock_control_driver_api litex_clk_api = {
.on = litex_clk_on,
.off = litex_clk_off,
.get_rate = litex_clk_get_subsys_rate,
.get_status = litex_clk_get_status
};
static void litex_clk_dts_clkout_ranges_read(struct litex_clk_range *clkout_div)
{
clkout_div->min = CLKOUT_DIVIDE_MIN;
clkout_div->max = CLKOUT_DIVIDE_MAX;
}
static int litex_clk_dts_timeout_read(struct litex_clk_timeout *timeout)
{
/* Read wait_lock timeout from device property*/
timeout->lock = LOCK_TIMEOUT;
if (timeout->lock < 1) {
LOG_ERR("LiteX CLK driver cannot wait shorter than ca. 1ms\n");
return -EINVAL;
}
/* Read wait_drdy timeout from device property*/
timeout->drdy = DRDY_TIMEOUT;
if (timeout->drdy < 1) {
LOG_ERR("LiteX CLK driver cannot wait shorter than ca. 1ms\n");
return -EINVAL;
}
return 0;
}
static int litex_clk_dts_clkouts_read(void)
{
struct litex_clk_range clkout_div;
struct litex_clk_clkout *lcko;
litex_clk_dts_clkout_ranges_read(&clkout_div);
#if CLKOUT_EXIST(0) == 1
CLKOUT_INIT(0)
#endif
#if CLKOUT_EXIST(1) == 1
CLKOUT_INIT(1)
#endif
#if CLKOUT_EXIST(2) == 1
CLKOUT_INIT(2)
#endif
#if CLKOUT_EXIST(3) == 1
CLKOUT_INIT(3)
#endif
#if CLKOUT_EXIST(4) == 1
CLKOUT_INIT(4)
#endif
#if CLKOUT_EXIST(5) == 1
CLKOUT_INIT(5)
#endif
#if CLKOUT_EXIST(6) == 1
CLKOUT_INIT(6)
#endif
return 0;
}
static void litex_clk_init_clkouts(void)
{
struct litex_clk_clkout *lcko;
int i;
for (i = 0; i < ldev->nclkout; i++) {
lcko = &ldev->clkouts[i];
lcko->base = ldev->base;
/* mark defaults to set */
lcko->ts_config.freq = lcko->def.freq;
lcko->ts_config.duty = lcko->def.duty;
lcko->ts_config.phase = lcko->def.phase;
}
}
static int litex_clk_dts_cnt_clocks(void)
{
return NCLKOUT;
}
static void litex_clk_dts_global_ranges_read(void)
{
ldev->divclk.min = DIVCLK_DIVIDE_MIN;
ldev->divclk.max = DIVCLK_DIVIDE_MAX;
ldev->clkfbout.min = CLKFBOUT_MULT_MIN;
ldev->clkfbout.max = CLKFBOUT_MULT_MAX;
ldev->vco.min = VCO_FREQ_MIN;
ldev->vco.max = VCO_FREQ_MAX;
ldev->vco_margin = VCO_MARGIN;
}
static int litex_clk_dts_global_read(void)
{
int ret;
ldev->nclkout = litex_clk_dts_cnt_clocks();
clkouts = k_malloc(sizeof(struct litex_clk_clkout) * ldev->nclkout);
ldev->update_clkout = k_malloc(sizeof(uint8_t) * ldev->nclkout);
if (!clkouts || !ldev->update_clkout) {
LOG_ERR("CLKOUT memory allocation failure!");
return -ENOMEM;
}
ldev->clkouts = clkouts;
ret = litex_clk_dts_timeout_read(&ldev->timeout);
if (ret != 0) {
return ret;
}
litex_clk_dts_global_ranges_read();
return 0;
}
static int litex_clk_init_glob_clk(void)
{
int ret;
/* Power on MMCM module */
ret = litex_clk_change_value(FULL_REG_16, FULL_REG_16, POWER_REG);
if (ret != 0) {
LOG_ERR("MMCM initialization failure, ret: %d", ret);
return ret;
}
return 0;
}
/* Enable module, set global divider, multiplier, default clkout parameters */
static int litex_clk_init(const struct device *dev)
{
int ret;
ldev = k_malloc(sizeof(struct litex_clk_device));
if (ldev == NULL) {
return -ENOMEM;
}
ldev->base = (uint32_t *)DRP_BASE;
if (ldev->base == NULL) {
return -EIO;
}
ret = litex_clk_dts_global_read();
if (ret != 0) {
return ret;
}
ret = litex_clk_dts_clkouts_read();
if (ret != 0) {
return ret;
}
litex_clk_init_clkouts();
ret = litex_clk_init_glob_clk();
if (ret != 0) {
return ret;
}
ret = litex_clk_set_all_def_clkouts();
if (ret != 0) {
return ret;
}
#ifdef CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG
litex_clk_print_all_params();
litex_clk_print_all_regs();
#endif /* CONFIG_CLOCK_CONTROL_LOG_LEVEL_DBG */
LOG_INF("LiteX Clock Control driver initialized");
return 0;
}
static const struct litex_clk_device ldev_init = {
.base = (uint32_t *)DRP_BASE,
.timeout = {LOCK_TIMEOUT, DRDY_TIMEOUT},
.divclk = {DIVCLK_DIVIDE_MIN, DIVCLK_DIVIDE_MAX},
.clkfbout = {CLKFBOUT_MULT_MIN, CLKFBOUT_MULT_MAX},
.vco = {VCO_FREQ_MIN, VCO_FREQ_MAX},
.vco_margin = VCO_MARGIN,
.nclkout = NCLKOUT
};
DEVICE_DT_DEFINE(DT_NODELABEL(clock0), litex_clk_init, NULL,
NULL, &ldev_init, POST_KERNEL,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY, &litex_clk_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_litex.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 14,495 |
```unknown
# Infineon CAT1 clock control driver
# an affiliate of Cypress Semiconductor Corporation
config CLOCK_CONTROL_INFINEON_CAT1
bool "Infineon CAT1 clock control driver"
default y
depends on SOC_FAMILY_INFINEON_CAT1
help
This option enables the clock control driver for Infineon CAT1 family.
``` | /content/code_sandbox/drivers/clock_control/Kconfig.ifx_cat1 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 71 |
```c
/*
*
*
*/
#include <soc.h>
#include <stm32_ll_bus.h>
#include <stm32_ll_crs.h>
#include <stm32_ll_rcc.h>
#include <stm32_ll_utils.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include "clock_stm32_ll_common.h"
#if defined(STM32_PLL_ENABLED)
/**
* @brief Return PLL source
*/
__unused
static uint32_t get_pll_source(void)
{
/* Configure PLL source */
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return LL_RCC_PLLSOURCE_HSI;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return LL_RCC_PLLSOURCE_HSE;
}
__ASSERT(0, "Invalid source");
return 0;
}
/**
* @brief get the pll source frequency
*/
__unused
uint32_t get_pllsrc_frequency(void)
{
if (IS_ENABLED(STM32_PLL_SRC_HSI)) {
return STM32_HSI_FREQ;
} else if (IS_ENABLED(STM32_PLL_SRC_HSE)) {
return STM32_HSE_FREQ;
}
__ASSERT(0, "Invalid source");
return 0;
}
/**
* @brief Set up pll configuration
*/
__unused
void config_pll_sysclock(void)
{
LL_RCC_PLL_ConfigDomain_SYS(get_pll_source(),
pllm(STM32_PLL_M_DIVISOR),
STM32_PLL_N_MULTIPLIER,
pllr(STM32_PLL_R_DIVISOR));
LL_RCC_PLL_EnableDomain_SYS();
}
#endif /* defined(STM32_PLL_ENABLED) */
/**
* @brief Activate default clocks
*/
void config_enable_default_clocks(void)
{
/* Enable the power interface clock */
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
#if defined(CRS)
if (IS_ENABLED(STM32_HSI48_CRS_USB_SOF)) {
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_CRS);
/*
* After reset the CRS configuration register
* (CRS_CFGR) value corresponds to an USB SOF
* synchronization. FIXME: write it anyway.
*/
LL_CRS_EnableAutoTrimming();
LL_CRS_EnableFreqErrorCounter();
}
#endif /* defined(CRS) */
}
``` | /content/code_sandbox/drivers/clock_control/clock_stm32g0.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 508 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_CLOCK_AGILEX5_LL_H_
#define ZEPHYR_INCLUDE_DRIVERS_CLOCK_AGILEX5_LL_H_
#include <stdint.h>
#include <zephyr/sys/sys_io.h>
/* Clock manager register offsets */
#define CLKMGR_CTRL 0x00
#define CLKMGR_STAT 0x04
#define CLKMGR_INTRCLR 0x14
/* Clock manager main PLL group register offsets */
#define CLKMGR_MAINPLL_OFFSET 0x24
#define CLKMGR_MAINPLL_EN 0x00
#define CLKMGR_MAINPLL_BYPASS 0x0C
#define CLKMGR_MAINPLL_MPUCLK 0x18
#define CLKMGR_MAINPLL_BYPASSS 0x10
#define CLKMGR_MAINPLL_NOCCLK 0x1C
#define CLKMGR_MAINPLL_NOCDIV 0x20
#define CLKMGR_MAINPLL_PLLGLOB 0x24
#define CLKMGR_MAINPLL_FDBCK 0x28
#define CLKMGR_MAINPLL_MEM 0x2C
#define CLKMGR_MAINPLL_MEMSTAT 0x30
#define CLKMGR_MAINPLL_VCOCALIB 0x34
#define CLKMGR_MAINPLL_PLLC0 0x38
#define CLKMGR_MAINPLL_PLLC1 0x3C
#define CLKMGR_MAINPLL_PLLC2 0x40
#define CLKMGR_MAINPLL_PLLC3 0x44
#define CLKMGR_MAINPLL_PLLM 0x48
#define CLKMGR_MAINPLL_LOSTLOCK 0x54
/* Clock manager peripheral PLL group register offsets */
#define CLKMGR_PERPLL_OFFSET 0x7C
#define CLKMGR_PERPLL_EN 0x00
#define CLKMGR_PERPLL_BYPASS 0x0C
#define CLKMGR_PERPLL_BYPASSS 0x10
#define CLKMGR_PERPLL_EMACCTL 0x18
#define CLKMGR_PERPLL_GPIODIV 0x1C
#define CLKMGR_PERPLL_PLLGLOB 0x20
#define CLKMGR_PERPLL_FDBCK 0x24
#define CLKMGR_PERPLL_MEM 0x28
#define CLKMGR_PERPLL_MEMSTAT 0x2C
#define CLKMGR_PERPLL_VCOCALIB 0x30
#define CLKMGR_PERPLL_PLLC0 0x34
#define CLKMGR_PERPLL_PLLC1 0x38
#define CLKMGR_PERPLL_PLLC2 0x3C
#define CLKMGR_PERPLL_PLLC3 0x40
#define CLKMGR_PERPLL_PLLM 0x44
#define CLKMGR_PERPLL_LOSTLOCK 0x50
/* Clock manager control/intel group register offsets */
#define CLKMGR_INTEL_OFFSET 0xD0
#define CLKMGR_INTEL_JTAG 0x00
#define CLKMGR_INTEL_EMACACTR 0x4
#define CLKMGR_INTEL_EMACBCTR 0x8
#define CLKMGR_INTEL_EMACPTPCTR 0x0C
#define CLKMGR_INTEL_GPIODBCTR 0x10
#define CLKMGR_INTEL_SDMMCCTR 0x14
#define CLKMGR_INTEL_S2FUSER0CTR 0x18
#define CLKMGR_INTEL_S2FUSER1CTR 0x1C
#define CLKMGR_INTEL_PSIREFCTR 0x20
#define CLKMGR_INTEL_EXTCNTRST 0x24
/* Clock manager macros */
#define CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001U
#define CLKMGR_STAT_BUSY_E_BUSY 0x1
#define CLKMGR_STAT_BUSY(x) (((x) & 0x00000001U) >> 0)
#define CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100U) >> 8)
#define CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00010000U) >> 16)
#define CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004U
#define CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008U
#define CLKMGR_MAINPLL_L4SPDIV(x) (((x) >> 16) & 0x3)
#define CLKMGR_INTOSC_HZ 460000000U
/* Shared Macros */
#define CLKMGR_PSRC(x) (((x) & 0x00030000U) >> 16)
#define CLKMGR_PSRC_MAIN 0
#define CLKMGR_PSRC_PER 1
#define CLKMGR_PLLGLOB_PSRC_EOSC1 0x0
#define CLKMGR_PLLGLOB_PSRC_INTOSC 0x1
#define CLKMGR_PLLGLOB_PSRC_F2S 0x2
#define CLKMGR_PLLM_MDIV(x) ((x) & 0x000003FFU)
#define CLKMGR_PLLGLOB_PD_SET_MSK 0x00000001U
#define CLKMGR_PLLGLOB_RST_SET_MSK 0x00000002U
#define CLKMGR_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003F00) >> 8)
#define CLKMGR_PLLGLOB_AREFCLKDIV(x) (((x) & 0x00000F00) >> 8)
#define CLKMGR_PLLGLOB_DREFCLKDIV(x) (((x) & 0x00003000) >> 12)
#define CLKMGR_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000003FF)
#define CLKMGR_VCOCALIB_MSCNT_SET(x) (((x) << 16) & 0x00FF0000)
#define CLKMGR_CLR_LOSTLOCK_BYPASS 0x20000000U
#define CLKMGR_PLLC_DIV(x) ((x) & 0x7FF)
#define CLKMGR_INTEL_SDMMC_CNT(x) (((x) & 0x7FF) + 1)
/**
* @brief Initialize the low layer clock control driver
*
* @param base_addr : Clock control device MMIO base address
*
* @return void
*/
void clock_agilex5_ll_init(mm_reg_t base_addr);
/**
* @brief Get MPU(Micro Processor Unit) clock value
*
* @param void
*
* @return returns MPU clock value
*/
uint32_t get_mpu_clk(void);
/**
* @brief Get Watchdog peripheral clock value
*
* @param void
*
* @return returns Watchdog clock value
*/
uint32_t get_wdt_clk(void);
/**
* @brief Get UART peripheral clock value
*
* @param void
*
* @return returns UART clock value
*/
uint32_t get_uart_clk(void);
/**
* @brief Get MMC peripheral clock value
*
* @param void
*
* @return returns MMC clock value
*/
uint32_t get_mmc_clk(void);
/**
* @brief Get Timer peripheral clock value
*
* @param void
*
* @return returns Timer clock value
*/
uint32_t get_timer_clk(void);
#endif /* ZEPHYR_INCLUDE_DRIVERS_CLOCK_AGILEX5_LL_H_ */
``` | /content/code_sandbox/drivers/clock_control/clock_control_agilex5_ll.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,688 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_imx_ccm_rev2
#include <errno.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/dt-bindings/clock/imx_ccm_rev2.h>
#include <fsl_clock.h>
#define LOG_LEVEL CONFIG_CLOCK_CONTROL_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(clock_control);
static int mcux_ccm_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
uint32_t clock_name = (uintptr_t)sub_system;
uint32_t peripheral, instance;
peripheral = (clock_name & IMX_CCM_PERIPHERAL_MASK);
instance = (clock_name & IMX_CCM_INSTANCE_MASK);
switch (peripheral) {
#ifdef CONFIG_ETH_NXP_ENET
#ifdef CONFIG_SOC_MIMX9352_A55
#define ENET1G_CLOCK kCLOCK_Enet1
#else
#define ENET_CLOCK kCLOCK_Enet
#define ENET1G_CLOCK kCLOCK_Enet_1g
#endif
#ifdef ENET_CLOCK
case IMX_CCM_ENET_CLK:
CLOCK_EnableClock(ENET_CLOCK);
return 0;
#endif
case IMX_CCM_ENET1G_CLK:
CLOCK_EnableClock(ENET1G_CLOCK);
return 0;
#endif
default:
(void)instance;
return 0;
}
}
static int mcux_ccm_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
return 0;
}
static int mcux_ccm_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
uint32_t clock_name = (size_t) sub_system;
uint32_t clock_root, peripheral, instance;
peripheral = (clock_name & IMX_CCM_PERIPHERAL_MASK);
instance = (clock_name & IMX_CCM_INSTANCE_MASK);
switch (peripheral) {
#ifdef CONFIG_I2C_MCUX_LPI2C
case IMX_CCM_LPI2C1_CLK:
clock_root = kCLOCK_Root_Lpi2c1 + instance;
break;
#endif
#ifdef CONFIG_SPI_MCUX_LPSPI
case IMX_CCM_LPSPI1_CLK:
clock_root = kCLOCK_Root_Lpspi1 + instance;
break;
#endif
#ifdef CONFIG_UART_MCUX_LPUART
#if defined(CONFIG_SOC_SERIES_IMXRT118X)
case IMX_CCM_LPUART0102_CLK:
case IMX_CCM_LPUART0304_CLK:
clock_root = kCLOCK_Root_Lpuart0102 + instance;
break;
#else
case IMX_CCM_LPUART1_CLK:
case IMX_CCM_LPUART2_CLK:
clock_root = kCLOCK_Root_Lpuart1 + instance;
break;
#endif
#endif
#if CONFIG_IMX_USDHC
case IMX_CCM_USDHC1_CLK:
case IMX_CCM_USDHC2_CLK:
clock_root = kCLOCK_Root_Usdhc1 + instance;
break;
#endif
#ifdef CONFIG_DMA_MCUX_EDMA
case IMX_CCM_EDMA_CLK:
clock_root = kCLOCK_Root_Bus;
break;
case IMX_CCM_EDMA_LPSR_CLK:
clock_root = kCLOCK_Root_Bus_Lpsr;
break;
#endif
#ifdef CONFIG_PWM_MCUX
case IMX_CCM_PWM_CLK:
clock_root = kCLOCK_Root_Bus;
break;
#endif
#ifdef CONFIG_CAN_MCUX_FLEXCAN
case IMX_CCM_CAN1_CLK:
clock_root = kCLOCK_Root_Can1 + instance;
break;
#endif
#ifdef CONFIG_COUNTER_MCUX_GPT
case IMX_CCM_GPT_CLK:
clock_root = kCLOCK_Root_Gpt1 + instance;
break;
#endif
#ifdef CONFIG_I2S_MCUX_SAI
case IMX_CCM_SAI1_CLK:
clock_root = kCLOCK_Root_Sai1;
break;
case IMX_CCM_SAI2_CLK:
clock_root = kCLOCK_Root_Sai2;
break;
case IMX_CCM_SAI3_CLK:
clock_root = kCLOCK_Root_Sai3;
break;
case IMX_CCM_SAI4_CLK:
clock_root = kCLOCK_Root_Sai4;
break;
#endif
#ifdef CONFIG_ETH_NXP_ENET
case IMX_CCM_ENET_CLK:
case IMX_CCM_ENET1G_CLK:
#ifdef CONFIG_SOC_MIMX9352_A55
clock_root = kCLOCK_Root_WakeupAxi;
#else
clock_root = kCLOCK_Root_Bus;
#endif
break;
#endif
#if defined(CONFIG_SOC_MIMX9352_A55) && defined(CONFIG_DAI_NXP_SAI)
case IMX_CCM_SAI1_CLK:
case IMX_CCM_SAI2_CLK:
case IMX_CCM_SAI3_CLK:
clock_root = kCLOCK_Root_Sai1 + instance;
uint32_t mux = CLOCK_GetRootClockMux(clock_root);
uint32_t divider = CLOCK_GetRootClockDiv(clock_root);
/* assumption: SAI's SRC is AUDIO_PLL */
if (mux != 1) {
return -EINVAL;
}
/* assumption: AUDIO_PLL's frequency is 393216000 Hz */
*rate = 393216000 / divider;
return 0;
#endif
#ifdef CONFIG_COUNTER_MCUX_TPM
case IMX_CCM_TPM_CLK:
clock_root = kCLOCK_Root_Tpm1 + instance;
break;
#endif
#ifdef CONFIG_PWM_MCUX_QTMR
case IMX_CCM_QTMR1_CLK:
case IMX_CCM_QTMR2_CLK:
case IMX_CCM_QTMR3_CLK:
case IMX_CCM_QTMR4_CLK:
clock_root = kCLOCK_Root_Bus;
break;
#endif
#ifdef CONFIG_MEMC_MCUX_FLEXSPI
case IMX_CCM_FLEXSPI_CLK:
clock_root = kCLOCK_Root_Flexspi1;
break;
case IMX_CCM_FLEXSPI2_CLK:
clock_root = kCLOCK_Root_Flexspi2;
break;
#endif
#ifdef CONFIG_COUNTER_NXP_PIT
case IMX_CCM_PIT_CLK:
clock_root = kCLOCK_Root_Bus + instance;
break;
#endif
#ifdef CONFIG_ADC_MCUX_LPADC
case IMX_CCM_LPADC1_CLK:
clock_root = kCLOCK_Root_Adc1 + instance;
break;
#endif
default:
return -EINVAL;
}
#ifdef CONFIG_SOC_MIMX9352_A55
*rate = CLOCK_GetIpFreq(clock_root);
#else
*rate = CLOCK_GetRootClockFreq(clock_root);
#endif
return 0;
}
/*
* Since this function is used to reclock the FlexSPI when running in
* XIP, it must be located in RAM when MEMC driver is enabled.
*/
#ifdef CONFIG_MEMC_MCUX_FLEXSPI
#define CCM_SET_FUNC_ATTR __ramfunc
#else
#define CCM_SET_FUNC_ATTR
#endif
static int CCM_SET_FUNC_ATTR mcux_ccm_set_subsys_rate(const struct device *dev,
clock_control_subsys_t subsys,
clock_control_subsys_rate_t rate)
{
uint32_t clock_name = (uintptr_t)subsys;
uint32_t clock_rate = (uintptr_t)rate;
switch (clock_name) {
case IMX_CCM_FLEXSPI_CLK:
__fallthrough;
case IMX_CCM_FLEXSPI2_CLK:
#if defined(CONFIG_SOC_SERIES_IMXRT11XX) && defined(CONFIG_MEMC_MCUX_FLEXSPI)
/* The SOC is using the FlexSPI for XIP. Therefore,
* the FlexSPI itself must be managed within the function,
* which is SOC specific.
*/
return flexspi_clock_set_freq(clock_name, clock_rate);
#endif
default:
/* Silence unused variable warning */
ARG_UNUSED(clock_rate);
return -ENOTSUP;
}
}
static const struct clock_control_driver_api mcux_ccm_driver_api = {
.on = mcux_ccm_on,
.off = mcux_ccm_off,
.get_rate = mcux_ccm_get_subsys_rate,
.set_rate = mcux_ccm_set_subsys_rate,
};
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&mcux_ccm_driver_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_mcux_ccm_rev2.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,862 |
```c
/*
*
*
*/
#include <zephyr/drivers/clock_control.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include <zephyr/logging/log.h>
#include <soc.h>
#define DT_DRV_COMPAT st_stm32_clock_mux
LOG_MODULE_REGISTER(clock_mux, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
struct stm32_clk_mux_config {
const struct stm32_pclken pclken;
};
static int stm32_clk_mux_init(const struct device *dev)
{
const struct stm32_clk_mux_config *cfg = dev->config;
if (clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
(clock_control_subsys_t) &cfg->pclken, NULL) != 0) {
LOG_ERR("Could not enable clock mux");
return -EIO;
}
return 0;
}
#define STM32_MUX_CLK_INIT(id) \
\
static const struct stm32_clk_mux_config stm32_clk_mux_cfg_##id = { \
.pclken = STM32_CLOCK_INFO(0, DT_DRV_INST(id)) \
}; \
\
DEVICE_DT_INST_DEFINE(id, stm32_clk_mux_init, NULL, \
NULL, &stm32_clk_mux_cfg_##id, \
PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS,\
NULL);
DT_INST_FOREACH_STATUS_OKAY(STM32_MUX_CLK_INIT)
``` | /content/code_sandbox/drivers/clock_control/clock_stm32_mux.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 305 |
```unknown
# Raspberry Pi Pico Clock Controller Driver configuration options
config CLOCK_CONTROL_RPI_PICO
bool "Raspberry Pi Pico Clock Controller Driver"
default y
depends on DT_HAS_RASPBERRYPI_PICO_CLOCK_CONTROLLER_ENABLED
if CLOCK_CONTROL_RPI_PICO
config RPI_PICO_ROSC_USE_MEASURED_FREQ
bool "Use measured frequency for ring oscillator"
help
Instead of the dts value, use the value measured by
the frequency counter as the rosc frequency.
endif # CLOCK_CONTROL_RPI_PICO
``` | /content/code_sandbox/drivers/clock_control/Kconfig.rpi_pico | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 115 |
```c
/*
*
*/
#define DT_DRV_COMPAT microchip_xec_pcr
#include <soc.h>
#include <zephyr/arch/cpu.h>
#include <cmsis_core.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/dt-bindings/clock/mchp_xec_pcr.h>
#include <zephyr/irq.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/barrier.h>
LOG_MODULE_REGISTER(clock_control_xec, LOG_LEVEL_ERR);
#define CLK32K_SIL_OSC_DELAY 256
#define CLK32K_PLL_LOCK_WAIT (16 * 1024)
#define CLK32K_PIN_WAIT 4096
#define CLK32K_XTAL_WAIT (16 * 1024)
#define CLK32K_XTAL_MON_WAIT (64 * 1024)
#define XEC_CC_DFLT_PLL_LOCK_WAIT_MS 30
/*
* Counter checks:
* 32KHz period counter minimum for pass/fail: 16-bit
* 32KHz period counter maximum for pass/fail: 16-bit
* 32KHz duty cycle variation max for pass/fail: 16-bit
* 32KHz valid count minimum: 8-bit
*
* 32768 Hz period is 30.518 us
* HW count resolution is 48 MHz.
* One 32KHz clock pulse = 1464.84 48 MHz counts.
*/
#define CNT32K_TMIN 1435
#define CNT32K_TMAX 1495
#define CNT32K_DUTY_MAX 132
#define CNT32K_VAL_MIN 4
#define DEST_PLL 0
#define DEST_PERIPH 1
#define CLK32K_FLAG_CRYSTAL_SE BIT(0)
#define CLK32K_FLAG_PIN_FB_CRYSTAL BIT(1)
#define PCR_PERIPH_RESET_SPIN 8u
#define XEC_CC_XTAL_EN_DELAY_MS_DFLT 300u
#define HIBTIMER_MS_TO_CNT(x) ((uint32_t)(x) * 33U)
#define HIBTIMER_10_MS 328u
#define HIBTIMER_300_MS 9830u
enum pll_clk32k_src {
PLL_CLK32K_SRC_SO = MCHP_XEC_PLL_CLK32K_SRC_SIL_OSC,
PLL_CLK32K_SRC_XTAL = MCHP_XEC_PLL_CLK32K_SRC_XTAL,
PLL_CLK32K_SRC_PIN = MCHP_XEC_PLL_CLK32K_SRC_PIN,
PLL_CLK32K_SRC_MAX,
};
enum periph_clk32k_src {
PERIPH_CLK32K_SRC_SO_SO = MCHP_XEC_PERIPH_CLK32K_SRC_SO_SO,
PERIPH_CLK32K_SRC_XTAL_XTAL = MCHP_XEC_PERIPH_CLK32K_SRC_XTAL_XTAL,
PERIPH_CLK32K_SRC_PIN_SO = MCHP_XEC_PERIPH_CLK32K_SRC_PIN_SO,
PERIPH_CLK32K_SRC_PIN_XTAL = MCHP_XEC_PERIPH_CLK32K_SRC_PIN_XTAL,
PERIPH_CLK32K_SRC_MAX
};
enum clk32k_dest { CLK32K_DEST_PLL = 0, CLK32K_DEST_PERIPH, CLK32K_DEST_MAX };
/* PCR hardware registers for MEC15xx and MEC172x */
#define XEC_CC_PCR_MAX_SCR 5
struct pcr_hw_regs {
volatile uint32_t SYS_SLP_CTRL;
volatile uint32_t PROC_CLK_CTRL;
volatile uint32_t SLOW_CLK_CTRL;
volatile uint32_t OSC_ID;
volatile uint32_t PWR_RST_STS;
volatile uint32_t PWR_RST_CTRL;
volatile uint32_t SYS_RST;
volatile uint32_t TURBO_CLK; /* MEC172x only */
volatile uint32_t TEST20;
uint32_t RSVD1[3];
volatile uint32_t SLP_EN[XEC_CC_PCR_MAX_SCR];
uint32_t RSVD2[3];
volatile uint32_t CLK_REQ[XEC_CC_PCR_MAX_SCR];
uint32_t RSVD3[3];
volatile uint32_t RST_EN[5];
volatile uint32_t RST_EN_LOCK;
/* all registers below are MEC172x only */
volatile uint32_t VBAT_SRST;
volatile uint32_t CLK32K_SRC_VTR;
volatile uint32_t TEST90;
uint32_t RSVD4[(0x00c0 - 0x0094) / 4];
volatile uint32_t CNT32K_PER;
volatile uint32_t CNT32K_PULSE_HI;
volatile uint32_t CNT32K_PER_MIN;
volatile uint32_t CNT32K_PER_MAX;
volatile uint32_t CNT32K_DV;
volatile uint32_t CNT32K_DV_MAX;
volatile uint32_t CNT32K_VALID;
volatile uint32_t CNT32K_VALID_MIN;
volatile uint32_t CNT32K_CTRL;
volatile uint32_t CLK32K_MON_ISTS;
volatile uint32_t CLK32K_MON_IEN;
};
#define XEC_CC_PCR_RST_EN_UNLOCK 0xa6382d4cu
#define XEC_CC_PCR_RST_EN_LOCK 0xa6382d4du
#define XEC_CC_PCR_OSC_ID_PLL_LOCK BIT(8)
#define XEC_CC_PCR_TURBO_CLK_96M BIT(2)
#define XEC_CC_PCR_CLK32K_SRC_MSK 0x3u
#define XEC_CC_PCR_CLK32K_SRC_SIL 0u
#define XEC_CC_PCR_CLK32K_SRC_XTAL 1
#define XEC_CC_PCR_CLK32K_SRC_PIN 2
#define XEC_CC_PCR_CLK32K_SRC_OFF 3
#ifdef CONFIG_SOC_SERIES_MEC15XX
#define XEC_CC_PCR3_CRYPTO_MASK (BIT(26) | BIT(27) | BIT(28))
#else
#define XEC_CC_PCR3_CRYPTO_MASK BIT(26)
#endif
/* VBAT powered hardware registers related to clock configuration */
struct vbatr_hw_regs {
volatile uint32_t PFRS;
uint32_t RSVD1[1];
volatile uint32_t CLK32_SRC;
uint32_t RSVD2[2];
volatile uint32_t CLK32_TRIM;
uint32_t RSVD3[1];
volatile uint32_t CLK32_TRIM_CTRL;
};
/* MEC152x VBAT CLK32_SRC register defines */
#define XEC_CC15_VBATR_USE_SIL_OSC 0u
#define XEC_CC15_VBATR_USE_32KIN_PIN BIT(1)
#define XEC_CC15_VBATR_USE_PAR_CRYSTAL BIT(2)
#define XEC_CC15_VBATR_USE_SE_CRYSTAL (BIT(2) | BIT(3))
/* MEC150x special requirements */
#define XEC_CC15_GCFG_DID_DEV_ID_MEC150x 0x0020U
#define XEC_CC15_TRIM_ENABLE_INT_OSCILLATOR 0x06U
/* MEC172x VBAT CLK32_SRC register defines */
#define XEC_CC_VBATR_CS_SO_EN BIT(0) /* enable and start silicon OSC */
#define XEC_CC_VBATR_CS_XTAL_EN BIT(8) /* enable & start external crystal */
#define XEC_CC_VBATR_CS_XTAL_SE BIT(9) /* crystal XTAL2 used as 32KHz input */
#define XEC_CC_VBATR_CS_XTAL_DHC BIT(10) /* disable high XTAL startup current */
#define XEC_CC_VBATR_CS_XTAL_CNTR_MSK 0x1800u /* XTAL amplifier gain control */
#define XEC_CC_VBATR_CS_XTAL_CNTR_DG 0x0800u
#define XEC_CC_VBATR_CS_XTAL_CNTR_RG 0x1000u
#define XEC_CC_VBATR_CS_XTAL_CNTR_MG 0x1800u
/* MEC172x Select source of peripheral 32KHz clock */
#define XEC_CC_VBATR_CS_PCS_POS 16
#define XEC_CC_VBATR_CS_PCS_MSK0 0x3u
#define XEC_CC_VBATR_CS_PCS_MSK 0x30000u
#define XEC_CC_VBATR_CS_PCS_VTR_VBAT_SO 0u /* VTR & VBAT use silicon OSC */
#define XEC_CC_VBATR_CS_PCS_VTR_VBAT_XTAL 0x10000u /* VTR & VBAT use crystal */
#define XEC_CC_VBATR_CS_PCS_VTR_PIN_SO 0x20000u /* VTR 32KHZ_IN, VBAT silicon OSC */
#define XEC_CC_VBATR_CS_PCS_VTR_PIN_XTAL 0x30000u /* VTR 32KHZ_IN, VBAT XTAL */
#define XEC_CC_VBATR_CS_DI32_VTR_OFF BIT(18) /* disable silicon OSC when VTR off */
enum vbr_clk32k_src {
VBR_CLK32K_SRC_SO_SO = 0,
VBR_CLK32K_SRC_XTAL_XTAL,
VBR_CLK32K_SRC_PIN_SO,
VBR_CLK32K_SRC_PIN_XTAL,
VBR_CLK32K_SRC_MAX,
};
/* GIRQ23 hardware registers */
#define XEC_CC_HTMR_0_GIRQ23_POS 16
/* Driver config */
struct xec_pcr_config {
uintptr_t pcr_base;
uintptr_t vbr_base;
const struct pinctrl_dev_config *pcfg;
uint16_t xtal_enable_delay_ms;
uint16_t pll_lock_timeout_ms;
uint16_t period_min; /* mix and max 32KHz period range */
uint16_t period_max; /* monitor values in units of 48MHz (20.8 ns) */
uint8_t core_clk_div; /* Cortex-M4 clock divider (CPU and NVIC) */
uint8_t xtal_se; /* External 32KHz square wave on XTAL2 pin */
uint8_t max_dc_va; /* 32KHz monitor maximum duty cycle variation */
uint8_t min_valid; /* minimum number of valid consecutive 32KHz pulses */
enum pll_clk32k_src pll_src;
enum periph_clk32k_src periph_src;
uint8_t clkmon_bypass;
uint8_t dis_internal_osc;
};
/*
* Make sure PCR sleep enables are clear except for crypto
* which do not have internal clock gating.
*/
static void pcr_slp_init(struct pcr_hw_regs *pcr)
{
pcr->SYS_SLP_CTRL = 0U;
SCB->SCR &= ~BIT(2);
for (int i = 0; i < XEC_CC_PCR_MAX_SCR; i++) {
pcr->SLP_EN[i] = 0U;
}
pcr->SLP_EN[3] = XEC_CC_PCR3_CRYPTO_MASK;
}
/* MEC172x:
* Check if PLL is locked with timeout provided by a peripheral clock domain
* timer. We assume peripheral domain is still using internal silicon OSC as
* its reference clock. Available peripheral timers using 32KHz are:
* RTOS timer, hibernation timers, RTC, and week timer. We will use hibernation
* timer 0 in 30.5 us tick mode. Maximum internal is 2 seconds.
* A timer count value of 0 is interpreted as no timeout.
* We use the hibernation timer GIRQ interrupt status bit instead of reading
* the timer's count register due to race condition of HW taking at least
* one 32KHz cycle to move pre-load into count register.
* MEC15xx:
* Hibernation timer is using the chosen 32KHz source. If the external 32KHz source
* has a ramp up time, we make not get an accurate delay. This may only occur for
* the parallel crystal.
*/
static int pll_wait_lock_periph(struct pcr_hw_regs *const pcr, uint16_t ms)
{
struct htmr_regs *htmr0 = (struct htmr_regs *)DT_REG_ADDR(DT_NODELABEL(hibtimer0));
struct girq_regs *girq23 = (struct girq_regs *)DT_REG_ADDR(DT_NODELABEL(girq23));
uint32_t hcount = HIBTIMER_MS_TO_CNT(ms);
int rc = 0;
htmr0->PRLD = 0; /* disable */
htmr0->CTRL = 0; /* 30.5 us units */
girq23->SRC = BIT(XEC_CC_HTMR_0_GIRQ23_POS);
htmr0->PRLD = hcount;
while (!(pcr->OSC_ID & MCHP_PCR_OSC_ID_PLL_LOCK)) {
if (hcount) {
if (girq23->SRC & BIT(XEC_CC_HTMR_0_GIRQ23_POS)) {
rc = -ETIMEDOUT;
}
}
}
return rc;
}
static int periph_clk_src_using_pin(enum periph_clk32k_src src)
{
switch (src) {
case PERIPH_CLK32K_SRC_PIN_SO:
case PERIPH_CLK32K_SRC_PIN_XTAL:
return 1;
default:
return 0;
}
}
#ifdef CONFIG_SOC_SERIES_MEC15XX
/* MEC15xx uses the same 32KHz source for both PLL and Peripheral 32K clock domains.
* We ignore the peripheral clock source.
* If XTAL is selected (parallel) or single-ended the external 32KHz MUST stay on
* even when VTR goes off.
* If PIN(32KHZ_IN pin) as the external source, hardware can auto-switch to internal
* silicon OSC if the signal on the 32KHZ_PIN goes away.
* We ignore th
*/
static int soc_clk32_init(const struct device *dev,
enum pll_clk32k_src pll_clk_src,
enum periph_clk32k_src periph_clk_src,
uint32_t flags)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
struct vbatr_hw_regs *const vbr = (struct vbatr_hw_regs *)devcfg->vbr_base;
uint32_t cken = 0U;
int rc = 0;
if (MCHP_DEVICE_ID() == XEC_CC15_GCFG_DID_DEV_ID_MEC150x) {
if (MCHP_REVISION_ID() == MCHP_GCFG_REV_B0) {
vbr->CLK32_TRIM_CTRL = XEC_CC15_TRIM_ENABLE_INT_OSCILLATOR;
}
}
switch (pll_clk_src) {
case PLL_CLK32K_SRC_SO:
cken = XEC_CC15_VBATR_USE_SIL_OSC;
break;
case PLL_CLK32K_SRC_XTAL:
if (flags & CLK32K_FLAG_CRYSTAL_SE) {
cken = XEC_CC15_VBATR_USE_SE_CRYSTAL;
} else {
cken = XEC_CC15_VBATR_USE_PAR_CRYSTAL;
}
break;
case PLL_CLK32K_SRC_PIN: /* 32KHZ_IN pin falls back to Silicon OSC */
cken = XEC_CC15_VBATR_USE_32KIN_PIN;
break;
default: /* do not touch HW */
return -EINVAL;
}
if ((vbr->CLK32_SRC & 0xffU) != cken) {
vbr->CLK32_SRC = cken;
}
rc = pll_wait_lock_periph(pcr, devcfg->xtal_enable_delay_ms);
return rc;
}
#else
static int periph_clk_src_using_si(enum periph_clk32k_src src)
{
switch (src) {
case PERIPH_CLK32K_SRC_SO_SO:
case PERIPH_CLK32K_SRC_PIN_SO:
return 1;
default:
return 0;
}
}
static int periph_clk_src_using_xtal(enum periph_clk32k_src src)
{
switch (src) {
case PERIPH_CLK32K_SRC_XTAL_XTAL:
case PERIPH_CLK32K_SRC_PIN_XTAL:
return 1;
default:
return 0;
}
}
static bool is_sil_osc_enabled(struct vbatr_hw_regs *vbr)
{
if (vbr->CLK32_SRC & XEC_CC_VBATR_CS_SO_EN) {
return true;
}
return false;
}
static void enable_sil_osc(struct vbatr_hw_regs *vbr)
{
vbr->CLK32_SRC |= XEC_CC_VBATR_CS_SO_EN;
}
/* In early Zephyr initialization we don't have timer services. Also, the SoC
* may be running on its ring oscillator (+/- 50% accuracy). Configuring the
* SoC's clock subsystem requires wait/delays. We implement a simple delay
* by writing to a read-only hardware register in the PCR block.
*/
static uint32_t spin_delay(struct pcr_hw_regs *pcr, uint32_t cnt)
{
uint32_t n;
for (n = 0U; n < cnt; n++) {
pcr->OSC_ID = n;
}
return n;
}
/*
* This routine checks if the PLL is locked to its input source. Minimum lock
* time is 3.3 ms. Lock time can be larger when the source is an external
* crystal. Crystal cold start times may vary greatly based on many factors.
* Crystals do not like being power cycled.
*/
static int pll_wait_lock(struct pcr_hw_regs *const pcr, uint32_t wait_cnt)
{
while (!(pcr->OSC_ID & MCHP_PCR_OSC_ID_PLL_LOCK)) {
if (wait_cnt == 0) {
return -ETIMEDOUT;
}
--wait_cnt;
}
return 0;
}
/* caller has enabled internal silicon 32 KHz oscillator */
static void hib_timer_delay(uint32_t hib_timer_count)
{
struct htmr_regs *htmr0 = (struct htmr_regs *)DT_REG_ADDR(DT_NODELABEL(hibtimer0));
struct girq_regs *girq23 = (struct girq_regs *)DT_REG_ADDR(DT_NODELABEL(girq23));
uint32_t hcnt;
while (hib_timer_count) {
hcnt = hib_timer_count;
if (hcnt > UINT16_MAX) {
hcnt -= UINT16_MAX;
}
htmr0->PRLD = 0; /* disable */
while (htmr0->PRLD != 0) {
;
}
htmr0->CTRL = 0; /* 32k time base */
/* clear hibernation timer 0 status */
girq23->SRC = BIT(XEC_CC_HTMR_0_GIRQ23_POS);
htmr0->PRLD = hib_timer_count;
if (hib_timer_count == 0) {
return;
}
while ((girq23->SRC & BIT(XEC_CC_HTMR_0_GIRQ23_POS)) == 0) {
;
}
htmr0->PRLD = 0; /* disable */
while (htmr0->PRLD != 0) {
;
}
girq23->SRC = BIT(XEC_CC_HTMR_0_GIRQ23_POS);
hib_timer_count -= hcnt;
}
}
/* Turn off crystal when we are not using it */
static int disable_32k_crystal(const struct device *dev)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct vbatr_hw_regs *const vbr = (struct vbatr_hw_regs *)devcfg->vbr_base;
uint32_t vbcs = vbr->CLK32_SRC;
vbcs &= ~(XEC_CC_VBATR_CS_XTAL_EN | XEC_CC_VBATR_CS_XTAL_SE | XEC_CC_VBATR_CS_XTAL_DHC);
vbr->CLK32_SRC = vbcs;
return 0;
}
/*
* Start external 32 KHz crystal.
* Assumes peripheral clocks source is Silicon OSC.
* If current configuration matches desired crystal configuration do nothing.
* NOTE: Crystal requires ~300 ms to stabilize.
*/
static int enable_32k_crystal(const struct device *dev, uint32_t flags)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct vbatr_hw_regs *const vbr = (struct vbatr_hw_regs *)devcfg->vbr_base;
uint32_t vbcs = vbr->CLK32_SRC;
uint32_t cfg = MCHP_VBATR_CS_XTAL_EN;
if (flags & CLK32K_FLAG_CRYSTAL_SE) {
cfg |= MCHP_VBATR_CS_XTAL_SE;
}
if ((vbcs & cfg) == cfg) {
return 0;
}
/* Configure crystal connection before enabling the crystal. */
vbr->CLK32_SRC &= ~(MCHP_VBATR_CS_XTAL_SE | MCHP_VBATR_CS_XTAL_DHC |
MCHP_VBATR_CS_XTAL_CNTR_MSK);
if (flags & CLK32K_FLAG_CRYSTAL_SE) {
vbr->CLK32_SRC |= MCHP_VBATR_CS_XTAL_SE;
}
/* Set crystal gain */
vbr->CLK32_SRC |= MCHP_VBATR_CS_XTAL_CNTR_DG;
/* enable crystal */
vbr->CLK32_SRC |= MCHP_VBATR_CS_XTAL_EN;
/* wait for crystal stabilization */
hib_timer_delay(HIBTIMER_MS_TO_CNT(devcfg->xtal_enable_delay_ms));
/* turn off crystal high startup current */
vbr->CLK32_SRC |= MCHP_VBATR_CS_XTAL_DHC;
return 0;
}
/*
* Use PCR clock monitor hardware to test crystal output.
* Requires crystal to have stabilized after enable.
* When enabled the clock monitor hardware measures high/low, edges, and
* duty cycle and compares to programmed limits.
*/
static int check_32k_crystal(const struct device *dev)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
struct htmr_regs *htmr0 = (struct htmr_regs *)DT_REG_ADDR(DT_NODELABEL(hibtimer0));
struct girq_regs *girq23 = (struct girq_regs *)DT_REG_ADDR(DT_NODELABEL(girq23));
uint32_t status = 0;
int rc = 0;
htmr0->PRLD = 0;
htmr0->CTRL = 0;
girq23->SRC = BIT(XEC_CC_HTMR_0_GIRQ23_POS);
pcr->CNT32K_CTRL = 0U;
pcr->CLK32K_MON_IEN = 0U;
pcr->CLK32K_MON_ISTS = MCHP_PCR_CLK32M_ISTS_MASK;
pcr->CNT32K_PER_MIN = devcfg->period_min;
pcr->CNT32K_PER_MAX = devcfg->period_max;
pcr->CNT32K_DV_MAX = devcfg->max_dc_va;
pcr->CNT32K_VALID_MIN = devcfg->min_valid;
pcr->CNT32K_CTRL =
MCHP_PCR_CLK32M_CTRL_PER_EN | MCHP_PCR_CLK32M_CTRL_DC_EN |
MCHP_PCR_CLK32M_CTRL_VAL_EN | MCHP_PCR_CLK32M_CTRL_CLR_CNT;
rc = -ETIMEDOUT;
htmr0->PRLD = HIBTIMER_10_MS;
status = pcr->CLK32K_MON_ISTS;
while ((girq23->SRC & BIT(XEC_CC_HTMR_0_GIRQ23_POS)) == 0) {
if (status == (MCHP_PCR_CLK32M_ISTS_PULSE_RDY |
MCHP_PCR_CLK32M_ISTS_PASS_PER |
MCHP_PCR_CLK32M_ISTS_PASS_DC |
MCHP_PCR_CLK32M_ISTS_VALID)) {
rc = 0;
break;
}
if (status & (MCHP_PCR_CLK32M_ISTS_FAIL |
MCHP_PCR_CLK32M_ISTS_STALL)) {
rc = -EBUSY;
break;
}
status = pcr->CLK32K_MON_ISTS;
}
pcr->CNT32K_CTRL = 0u;
htmr0->PRLD = 0;
girq23->SRC = BIT(XEC_CC_HTMR_0_GIRQ23_POS);
return rc;
}
/*
* Set the clock source for either PLL or Peripheral-32K clock domain.
* The source must be a stable 32 KHz input: internal silicon oscillator,
* external crystal dual-ended crystal, 50% duty cycle waveform on XTAL2 only,
* or a 50% duty cycles waveform on the 32KHZ_PIN.
* NOTE: 32KHZ_PIN is an alternate function of a chip specific GPIO.
* Signal on 32KHZ_PIN may go off when VTR rail go down. MEC172x can automatically
* switch to silicon OSC or XTAL. At this time we do not support fall back to XTAL
* when using 32KHZ_PIN.
* !!! IMPORTANT !!! Fall back from 32KHZ_PIN to SO/XTAL is only for the Peripheral
* Clock domain. If the PLL is configured to use 32KHZ_PIN as its source then the
* PLL will shutdown and the PLL clock domain should switch to the ring oscillator.
* This means the PLL clock domain clock will not longer be accurate and may cause
* FW malfunction(s).
*/
static void connect_pll_32k(const struct device *dev, enum pll_clk32k_src src, uint32_t flags)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
uint32_t pcr_clk_sel;
switch (src) {
case PLL_CLK32K_SRC_XTAL:
pcr_clk_sel = MCHP_PCR_VTR_32K_SRC_XTAL;
break;
case PLL_CLK32K_SRC_PIN:
pcr_clk_sel = MCHP_PCR_VTR_32K_SRC_PIN;
break;
default: /* default to silicon OSC */
pcr_clk_sel = MCHP_PCR_VTR_32K_SRC_SILOSC;
break;
}
pcr->CLK32K_SRC_VTR = pcr_clk_sel;
}
static void connect_periph_32k(const struct device *dev, enum periph_clk32k_src src, uint32_t flags)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct vbatr_hw_regs *const vbr = (struct vbatr_hw_regs *)devcfg->vbr_base;
uint32_t vbr_clk_sel = vbr->CLK32_SRC & ~(MCHP_VBATR_CS_PCS_MSK);
switch (src) {
case PERIPH_CLK32K_SRC_XTAL_XTAL:
vbr_clk_sel |= MCHP_VBATR_CS_PCS_VTR_VBAT_XTAL;
break;
case PERIPH_CLK32K_SRC_PIN_SO:
vbr_clk_sel |= MCHP_VBATR_CS_PCS_VTR_PIN_SO;
break;
case PERIPH_CLK32K_SRC_PIN_XTAL:
vbr_clk_sel |= MCHP_VBATR_CS_PCS_VTR_PIN_XTAL;
break;
default: /* default to silicon OSC for VTR/VBAT */
vbr_clk_sel |= MCHP_VBATR_CS_PCS_VTR_VBAT_SO;
break;
}
vbr->CLK32_SRC = vbr_clk_sel;
}
/* two bit field in PCR VTR 32KHz source register */
enum pll_clk32k_src get_pll_32k_source(const struct device *dev)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
enum pll_clk32k_src src = PLL_CLK32K_SRC_MAX;
switch (pcr->CLK32K_SRC_VTR & XEC_CC_PCR_CLK32K_SRC_MSK) {
case XEC_CC_PCR_CLK32K_SRC_SIL:
src = PLL_CLK32K_SRC_SO;
break;
case XEC_CC_PCR_CLK32K_SRC_XTAL:
src = PLL_CLK32K_SRC_XTAL;
break;
case XEC_CC_PCR_CLK32K_SRC_PIN:
src = PLL_CLK32K_SRC_PIN;
break;
default:
src = PLL_CLK32K_SRC_MAX;
break;
}
return src;
}
/* two bit field in VBAT source 32KHz register */
enum periph_clk32k_src get_periph_32k_source(const struct device *dev)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct vbatr_hw_regs *const vbr = (struct vbatr_hw_regs *)devcfg->vbr_base;
enum periph_clk32k_src src = PERIPH_CLK32K_SRC_MAX;
uint32_t temp;
temp = (vbr->CLK32_SRC & XEC_CC_VBATR_CS_PCS_MSK) >> XEC_CC_VBATR_CS_PCS_POS;
if (temp == VBR_CLK32K_SRC_SO_SO) {
src = PERIPH_CLK32K_SRC_SO_SO;
} else if (temp == VBR_CLK32K_SRC_XTAL_XTAL) {
src = PERIPH_CLK32K_SRC_XTAL_XTAL;
} else if (temp == VBR_CLK32K_SRC_PIN_SO) {
src = PERIPH_CLK32K_SRC_PIN_SO;
} else {
src = PERIPH_CLK32K_SRC_PIN_XTAL;
}
return src;
}
/*
* MEC172x has two 32 KHz clock domains
* PLL domain: 32 KHz clock input for PLL to produce 96 MHz and 48 MHz clocks
* Peripheral domain: 32 KHz clock for subset of peripherals.
* Each domain 32 KHz clock input can be from one of the following sources:
* Internal Silicon oscillator: +/- 2%
* External Crystal connected as parallel or single ended
* External 32KHZ_PIN 50% duty cycle waveform with fall back to either
* Silicon OSC or crystal when 32KHZ_PIN signal goes away or VTR power rail
* goes off.
* At chip reset the PLL is held in reset and the +/- 50% ring oscillator is
* the main clock.
* If no VBAT reset occurs the VBAT 32 KHz source register maintains its state.
*/
static int soc_clk32_init(const struct device *dev,
enum pll_clk32k_src pll_src,
enum periph_clk32k_src periph_src,
uint32_t flags)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
struct vbatr_hw_regs *const vbr = (struct vbatr_hw_regs *)devcfg->vbr_base;
int rc = 0;
/* disable PCR 32K monitor and clear counters */
pcr->CNT32K_CTRL = MCHP_PCR_CLK32M_CTRL_CLR_CNT;
pcr->CLK32K_MON_ISTS = MCHP_PCR_CLK32M_ISTS_MASK;
pcr->CLK32K_MON_IEN = 0;
if (!is_sil_osc_enabled(vbr)) {
enable_sil_osc(vbr);
spin_delay(pcr, CLK32K_SIL_OSC_DELAY);
}
/* Default to 32KHz Silicon OSC for PLL and peripherals */
connect_pll_32k(dev, PLL_CLK32K_SRC_SO, 0);
connect_periph_32k(dev, PERIPH_CLK32K_SRC_SO_SO, 0);
rc = pll_wait_lock(pcr, CLK32K_PLL_LOCK_WAIT);
if (rc) {
LOG_ERR("XEC clock control: MEC172x lock timeout for internal 32K OSC");
return rc;
}
/* If crystal input required, enable and check. Single-ended 32KHz square wave
* on XTAL pin is also handled here.
*/
if ((pll_src == PLL_CLK32K_SRC_XTAL) || periph_clk_src_using_xtal(periph_src)) {
enable_32k_crystal(dev, flags);
if (!devcfg->clkmon_bypass) {
rc = check_32k_crystal(dev);
if (rc) {
/* disable crystal */
vbr->CLK32_SRC &= ~(MCHP_VBATR_CS_XTAL_EN);
LOG_ERR("XEC clock control: MEC172x XTAL check failed: %d", rc);
return rc;
}
}
} else {
disable_32k_crystal(dev);
}
/* Do PLL first so we can use a peripheral timer still on silicon OSC */
if (pll_src != PLL_CLK32K_SRC_SO) {
connect_pll_32k(dev, pll_src, flags);
rc = pll_wait_lock_periph(pcr, devcfg->pll_lock_timeout_ms);
}
if (periph_src != PERIPH_CLK32K_SRC_SO_SO) {
connect_periph_32k(dev, periph_src, flags);
}
/* Configuration requests disabling internal silicon OSC. */
if (devcfg->dis_internal_osc) {
if ((get_pll_32k_source(dev) != PLL_CLK32K_SRC_SO)
&& !periph_clk_src_using_si(get_periph_32k_source(dev))) {
vbr->CLK32_SRC &= ~(XEC_CC_VBATR_CS_SO_EN);
}
}
/* Configuration requests disabling internal silicon OSC. */
if (devcfg->dis_internal_osc) {
if ((get_pll_32k_source(dev) != PLL_CLK32K_SRC_SO)
&& !periph_clk_src_using_si(get_periph_32k_source(dev))) {
vbr->CLK32_SRC &= ~(XEC_CC_VBATR_CS_SO_EN);
}
}
return rc;
}
#endif
/*
* MEC172x Errata document DS80000913C
* Programming the PCR clock divider that divides the clock input to the ARM
* Cortex-M4 may cause a clock glitch. The recommended work-around is to
* issue four NOP instruction before and after the write to the PCR processor
* clock control register. The final four NOP instructions are followed by
* data and instruction barriers to flush the Cortex-M4's pipeline.
* NOTE: Zephyr provides inline functions for Cortex-Mx NOP but not for
* data and instruction barrier instructions. Caller's should only invoke this
* function with interrupts locked.
*/
static void xec_clock_control_core_clock_divider_set(uint8_t clkdiv)
{
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)DT_INST_REG_ADDR_BY_IDX(0, 0);
arch_nop();
arch_nop();
arch_nop();
arch_nop();
pcr->PROC_CLK_CTRL = (uint32_t)clkdiv;
arch_nop();
arch_nop();
arch_nop();
arch_nop();
barrier_dsync_fence_full();
barrier_isync_fence_full();
}
/*
* PCR peripheral sleep enable allows the clocks to a specific peripheral to
* be gated off if the peripheral is not requesting a clock.
* slp_idx = zero based index into 32-bit PCR sleep enable registers.
* slp_pos = bit position in the register
* slp_en if non-zero set the bit else clear the bit
*/
int z_mchp_xec_pcr_periph_sleep(uint8_t slp_idx, uint8_t slp_pos,
uint8_t slp_en)
{
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)DT_INST_REG_ADDR_BY_IDX(0, 0);
if ((slp_idx >= MCHP_MAX_PCR_SCR_REGS) || (slp_pos >= 32)) {
return -EINVAL;
}
if (slp_en) {
pcr->SLP_EN[slp_idx] |= BIT(slp_pos);
} else {
pcr->SLP_EN[slp_idx] &= ~BIT(slp_pos);
}
return 0;
}
/* Most peripherals have a write only reset bit in the PCR reset enable registers.
* The layout of these registers is identical to the PCR sleep enable registers.
* Reset enables are protected by a lock register.
*/
int z_mchp_xec_pcr_periph_reset(uint8_t slp_idx, uint8_t slp_pos)
{
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)DT_INST_REG_ADDR_BY_IDX(0, 0);
if ((slp_idx >= MCHP_MAX_PCR_SCR_REGS) || (slp_pos >= 32)) {
return -EINVAL;
}
uint32_t lock = irq_lock();
pcr->RST_EN_LOCK = XEC_CC_PCR_RST_EN_UNLOCK;
pcr->RST_EN[slp_idx] = BIT(slp_pos);
pcr->RST_EN_LOCK = XEC_CC_PCR_RST_EN_LOCK;
irq_unlock(lock);
return 0;
}
/* clock control driver API implementation */
static int xec_cc_on(const struct device *dev,
clock_control_subsys_t sub_system,
bool turn_on)
{
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)DT_INST_REG_ADDR_BY_IDX(0, 0);
struct mchp_xec_pcr_clk_ctrl *cc = (struct mchp_xec_pcr_clk_ctrl *)sub_system;
uint16_t pcr_idx = 0;
uint16_t bitpos = 0;
if (!cc) {
return -EINVAL;
}
switch (MCHP_XEC_CLK_SRC_GET(cc->pcr_info)) {
case MCHP_XEC_PCR_CLK_CORE:
case MCHP_XEC_PCR_CLK_BUS:
break;
case MCHP_XEC_PCR_CLK_CPU:
if (cc->pcr_info & MCHP_XEC_CLK_CPU_MASK) {
uint32_t lock = irq_lock();
xec_clock_control_core_clock_divider_set(
cc->pcr_info & MCHP_XEC_CLK_CPU_MASK);
irq_unlock(lock);
} else {
return -EINVAL;
}
break;
case MCHP_XEC_PCR_CLK_PERIPH:
case MCHP_XEC_PCR_CLK_PERIPH_FAST:
pcr_idx = MCHP_XEC_PCR_SCR_GET_IDX(cc->pcr_info);
bitpos = MCHP_XEC_PCR_SCR_GET_BITPOS(cc->pcr_info);
if (pcr_idx >= MCHP_MAX_PCR_SCR_REGS) {
return -EINVAL;
}
if (turn_on) {
pcr->SLP_EN[pcr_idx] &= ~BIT(bitpos);
} else {
pcr->SLP_EN[pcr_idx] |= BIT(bitpos);
}
break;
case MCHP_XEC_PCR_CLK_PERIPH_SLOW:
if (turn_on) {
pcr->SLOW_CLK_CTRL =
cc->pcr_info & MCHP_XEC_CLK_SLOW_MASK;
} else {
pcr->SLOW_CLK_CTRL = 0;
}
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Turn on requested clock source.
* Core, CPU, and Bus clocks are always on except in deep sleep state.
* Peripheral clocks can be gated off if the peripheral's PCR sleep enable
* is set and the peripheral indicates it does not need a clock by clearing
* its PCR CLOCK_REQ read-only status.
* Peripheral slow clock my be turned on by writing a non-zero divider value
* to its PCR control register.
*/
static int xec_clock_control_on(const struct device *dev,
clock_control_subsys_t sub_system)
{
return xec_cc_on(dev, sub_system, true);
}
/*
* Turn off clock source.
* Core, CPU, and Bus clocks are always on except in deep sleep when PLL is
* turned off. Exception is 32 KHz clock.
* Peripheral clocks are gated off when the peripheral's sleep enable is set
* and the peripheral indicates is no longer needs a clock by de-asserting
* its read-only PCR CLOCK_REQ bit.
* Peripheral slow clock can be turned off by writing 0 to its control register.
*/
static inline int xec_clock_control_off(const struct device *dev,
clock_control_subsys_t sub_system)
{
return xec_cc_on(dev, sub_system, false);
}
/* MEC172x and future SoC's implement a turbo clock mode where
* ARM Core, QMSPI, and PK use turbo clock. All other peripherals
* use AHB clock or the slow clock.
*/
static uint32_t get_turbo_clock(const struct device *dev)
{
#ifdef CONFIG_SOC_SERIES_MEC15XX
ARG_UNUSED(dev);
return MHZ(48);
#else
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
if (pcr->TURBO_CLK & XEC_CC_PCR_TURBO_CLK_96M) {
return MHZ(96);
}
return MHZ(48);
#endif
}
/*
* MEC172x clock subsystem:
* Two main clock domains: PLL and Peripheral-32K. Each domain's 32 KHz source
* can be selected from one of three inputs:
* internal silicon OSC +/- 2% accuracy
* external crystal connected parallel or single ended
* external 32 KHz 50% duty cycle waveform on 32KHZ_IN pin.
* PLL domain supplies 96 MHz, 48 MHz, and other high speed clocks to all
* peripherals except those in the Peripheral-32K clock domain. The slow clock
* is derived from the 48 MHz produced by the PLL.
* ARM Cortex-M4 core input: 96MHz
* AHB clock input: 48 MHz
* Fast AHB peripherals: 96 MHz internal and 48 MHz AHB interface.
* Slow clock peripherals: PWM, TACH, PROCHOT
* Peripheral-32K domain peripherals:
* WDT, RTC, RTOS timer, hibernation timers, week timer
*
* Peripherals using both PLL and 32K clock domains:
* BBLED, RPMFAN
*/
static int xec_clock_control_get_subsys_rate(const struct device *dev,
clock_control_subsys_t sub_system,
uint32_t *rate)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
uint32_t bus = (uint32_t)sub_system;
uint32_t temp = 0;
uint32_t ahb_clock = MHZ(48);
uint32_t turbo_clock = get_turbo_clock(dev);
switch (bus) {
case MCHP_XEC_PCR_CLK_CORE:
case MCHP_XEC_PCR_CLK_PERIPH_FAST:
*rate = turbo_clock;
break;
case MCHP_XEC_PCR_CLK_CPU:
/* if PCR PROC_CLK_CTRL is 0 the chip is not running */
*rate = turbo_clock / pcr->PROC_CLK_CTRL;
break;
case MCHP_XEC_PCR_CLK_BUS:
case MCHP_XEC_PCR_CLK_PERIPH:
*rate = ahb_clock;
break;
case MCHP_XEC_PCR_CLK_PERIPH_SLOW:
temp = pcr->SLOW_CLK_CTRL;
if (temp) {
*rate = ahb_clock / temp;
} else {
*rate = 0; /* slow clock off */
}
break;
default:
*rate = 0;
return -EINVAL;
}
return 0;
}
#if defined(CONFIG_PM)
void mchp_xec_clk_ctrl_sys_sleep_enable(bool is_deep)
{
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)DT_INST_REG_ADDR_BY_IDX(0, 0);
uint32_t sys_sleep_mode = MCHP_PCR_SYS_SLP_CTRL_SLP_ALL;
if (is_deep) {
sys_sleep_mode |= MCHP_PCR_SYS_SLP_CTRL_SLP_HEAVY;
}
SCB->SCR |= BIT(2);
pcr->SYS_SLP_CTRL = sys_sleep_mode;
}
void mchp_xec_clk_ctrl_sys_sleep_disable(void)
{
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)DT_INST_REG_ADDR_BY_IDX(0, 0);
pcr->SYS_SLP_CTRL = 0;
SCB->SCR &= ~BIT(2);
}
#endif
/* Clock controller driver registration */
static const struct clock_control_driver_api xec_clock_control_api = {
.on = xec_clock_control_on,
.off = xec_clock_control_off,
.get_rate = xec_clock_control_get_subsys_rate,
};
static int xec_clock_control_init(const struct device *dev)
{
const struct xec_pcr_config * const devcfg = dev->config;
struct pcr_hw_regs *const pcr = (struct pcr_hw_regs *)devcfg->pcr_base;
enum pll_clk32k_src pll_clk_src = devcfg->pll_src;
enum periph_clk32k_src periph_clk_src = devcfg->periph_src;
uint32_t clk_flags = 0U;
int rc = 0;
if (devcfg->xtal_se) {
clk_flags |= CLK32K_FLAG_CRYSTAL_SE;
}
pcr_slp_init(pcr);
rc = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT);
if ((pll_clk_src == PLL_CLK32K_SRC_PIN) || periph_clk_src_using_pin(periph_clk_src)) {
if (rc) {
LOG_ERR("XEC clock control: PINCTRL apply error %d", rc);
pll_clk_src = PLL_CLK32K_SRC_SO;
periph_clk_src = PERIPH_CLK32K_SRC_SO_SO;
clk_flags = 0U;
}
}
/* sleep used as debug */
rc = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP);
if ((rc != 0) && (rc != -ENOENT)) {
LOG_ERR("XEC clock control: PINCTRL debug apply error %d", rc);
}
rc = soc_clk32_init(dev, pll_clk_src, periph_clk_src, clk_flags);
if (rc) {
LOG_ERR("XEC clock control: init error %d", rc);
}
xec_clock_control_core_clock_divider_set(devcfg->core_clk_div);
return rc;
}
#define XEC_PLL_32K_SRC(i) \
(enum pll_clk32k_src)DT_INST_PROP_OR(i, pll_32k_src, PLL_CLK32K_SRC_SO)
#define XEC_PERIPH_32K_SRC(i) \
(enum periph_clk32k_src)DT_INST_PROP_OR(0, periph_32k_src, PERIPH_CLK32K_SRC_SO_SO)
PINCTRL_DT_INST_DEFINE(0);
const struct xec_pcr_config pcr_xec_config = {
.pcr_base = DT_INST_REG_ADDR_BY_IDX(0, 0),
.vbr_base = DT_INST_REG_ADDR_BY_IDX(0, 1),
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
.xtal_enable_delay_ms =
(uint16_t)DT_INST_PROP_OR(0, xtal_enable_delay_ms, XEC_CC_XTAL_EN_DELAY_MS_DFLT),
.pll_lock_timeout_ms =
(uint16_t)DT_INST_PROP_OR(0, pll_lock_timeout_ms, XEC_CC_DFLT_PLL_LOCK_WAIT_MS),
.period_min = (uint16_t)DT_INST_PROP_OR(0, clk32kmon_period_min, CNT32K_TMIN),
.period_max = (uint16_t)DT_INST_PROP_OR(0, clk32kmon_period_max, CNT32K_TMAX),
.core_clk_div = (uint8_t)DT_INST_PROP_OR(0, core_clk_div, CONFIG_SOC_MEC_PROC_CLK_DIV),
.xtal_se = (uint8_t)DT_INST_PROP_OR(0, xtal_single_ended, 0),
.max_dc_va = (uint8_t)DT_INST_PROP_OR(0, clk32kmon_duty_cycle_var_max, CNT32K_DUTY_MAX),
.min_valid = (uint8_t)DT_INST_PROP_OR(0, clk32kmon_valid_min, CNT32K_VAL_MIN),
.pll_src = XEC_PLL_32K_SRC(0),
.periph_src = XEC_PERIPH_32K_SRC(0),
.clkmon_bypass = (uint8_t)DT_INST_PROP_OR(0, clkmon_bypass, 0),
.dis_internal_osc = (uint8_t)DT_INST_PROP_OR(0, internal_osc_disable, 0),
};
DEVICE_DT_INST_DEFINE(0,
xec_clock_control_init,
NULL,
NULL, &pcr_xec_config,
PRE_KERNEL_1,
CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
&xec_clock_control_api);
``` | /content/code_sandbox/drivers/clock_control/clock_control_mchp_xec.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,811 |
```unknown
config ADC_SAM
bool "Atmel SAM series ADC Driver"
default y
depends on DT_HAS_ATMEL_SAM_ADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enable Atmel SAM MCU Family Analog-to-Digital Converter (ADC) driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.sam | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```unknown
# Organisation (CSIRO) ABN 41 687 119 230.
# Hidden option for turning on the dummy driver for vnd,adc devices
# used in testing.
config ADC_TEST
def_bool DT_HAS_VND_ADC_ENABLED
depends on DT_HAS_VND_ADC_ENABLED
``` | /content/code_sandbox/drivers/adc/Kconfig.test | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 62 |
```c
/*
*
*/
#define DT_DRV_COMPAT microchip_xec_adc
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_mchp_xec);
#include <zephyr/drivers/adc.h>
#ifdef CONFIG_SOC_SERIES_MEC172X
#include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
#endif
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/pm/device.h>
#include <zephyr/pm/policy.h>
#include <soc.h>
#include <errno.h>
#include <zephyr/irq.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define XEC_ADC_VREF_ANALOG 3300
/* ADC Control Register */
#define XEC_ADC_CTRL_SINGLE_DONE_STATUS BIT(7)
#define XEC_ADC_CTRL_REPEAT_DONE_STATUS BIT(6)
#define XER_ADC_CTRL_SOFT_RESET BIT(4)
#define XEC_ADC_CTRL_POWER_SAVER_DIS BIT(3)
#define XEC_ADC_CTRL_START_REPEAT BIT(2)
#define XEC_ADC_CTRL_START_SINGLE BIT(1)
#define XEC_ADC_CTRL_ACTIVATE BIT(0)
/* ADC implements two interrupt signals:
* One-shot(single) conversion of a set of channels
* Repeat conversion of a set of channels
* Channel sets for single and repeat may be different.
*/
enum adc_pm_policy_state_flag {
ADC_PM_POLICY_STATE_SINGLE_FLAG,
ADC_PM_POLICY_STATE_REPEAT_FLAG,
ADC_PM_POLICY_STATE_FLAG_COUNT,
};
struct adc_xec_regs {
uint32_t control_reg;
uint32_t delay_reg;
uint32_t status_reg;
uint32_t single_reg;
uint32_t repeat_reg;
uint32_t channel_read_reg[8];
uint32_t unused[18];
uint32_t config_reg;
uint32_t vref_channel_reg;
uint32_t vref_control_reg;
uint32_t sar_control_reg;
};
struct adc_xec_config {
struct adc_xec_regs *regs;
uint8_t girq_single;
uint8_t girq_single_pos;
uint8_t girq_repeat;
uint8_t girq_repeat_pos;
uint8_t pcr_regidx;
uint8_t pcr_bitpos;
const struct pinctrl_dev_config *pcfg;
};
struct adc_xec_data {
struct adc_context ctx;
const struct device *adc_dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
#ifdef CONFIG_PM_DEVICE
ATOMIC_DEFINE(pm_policy_state_flag, ADC_PM_POLICY_STATE_FLAG_COUNT);
#endif
};
#ifdef CONFIG_PM_DEVICE
static void adc_xec_pm_policy_state_lock_get(struct adc_xec_data *data,
enum adc_pm_policy_state_flag flag)
{
if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
static void adc_xec_pm_policy_state_lock_put(struct adc_xec_data *data,
enum adc_pm_policy_state_flag flag)
{
if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) {
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
#endif
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_xec_data *data = CONTAINER_OF(ctx, struct adc_xec_data, ctx);
const struct device *adc_dev = data->adc_dev;
const struct adc_xec_config * const devcfg = adc_dev->config;
struct adc_xec_regs *regs = devcfg->regs;
data->repeat_buffer = data->buffer;
#ifdef CONFIG_PM_DEVICE
adc_xec_pm_policy_state_lock_get(data, ADC_PM_POLICY_STATE_SINGLE_FLAG);
#endif
regs->single_reg = ctx->sequence.channels;
regs->control_reg |= XEC_ADC_CTRL_START_SINGLE;
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_xec_data *data = CONTAINER_OF(ctx, struct adc_xec_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int adc_xec_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_xec_config *const cfg = dev->config;
struct adc_xec_regs * const regs = cfg->regs;
uint32_t areg;
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
return -EINVAL;
}
if (channel_cfg->channel_id >= MCHP_ADC_MAX_CHAN) {
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
return -EINVAL;
}
/* Setup VREF */
areg = regs->vref_channel_reg;
areg &= ~MCHP_ADC_CH_VREF_SEL_MASK(channel_cfg->channel_id);
if (channel_cfg->reference == ADC_REF_INTERNAL) {
areg |= MCHP_ADC_CH_VREF_SEL_PAD(channel_cfg->channel_id);
} else if (channel_cfg->reference == ADC_REF_EXTERNAL0) {
areg |= MCHP_ADC_CH_VREF_SEL_GPIO(channel_cfg->channel_id);
} else {
return -EINVAL;
}
regs->vref_channel_reg = areg;
/* Differential mode? */
areg = regs->sar_control_reg;
areg &= ~BIT(MCHP_ADC_SAR_CTRL_SELDIFF_POS);
if (channel_cfg->differential != 0) {
areg |= MCHP_ADC_SAR_CTRL_SELDIFF_EN;
}
regs->sar_control_reg = areg;
return 0;
}
static bool adc_xec_validate_buffer_size(const struct adc_sequence *sequence)
{
int chan_count = 0;
size_t buff_need;
uint32_t chan_mask;
for (chan_mask = 0x80; chan_mask != 0; chan_mask >>= 1) {
if (chan_mask & sequence->channels) {
chan_count++;
}
}
buff_need = chan_count * sizeof(uint16_t);
if (sequence->options) {
buff_need *= 1 + sequence->options->extra_samplings;
}
if (buff_need > sequence->buffer_size) {
return false;
}
return true;
}
static int adc_xec_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_xec_config *const cfg = dev->config;
struct adc_xec_regs * const regs = cfg->regs;
struct adc_xec_data * const data = dev->data;
uint32_t sar_ctrl;
if (sequence->channels & ~BIT_MASK(MCHP_ADC_MAX_CHAN)) {
LOG_ERR("Incorrect channels, bitmask 0x%x", sequence->channels);
return -EINVAL;
}
if (sequence->channels == 0UL) {
LOG_ERR("No channel selected");
return -EINVAL;
}
if (!adc_xec_validate_buffer_size(sequence)) {
LOG_ERR("Incorrect buffer size");
return -ENOMEM;
}
/* Setup ADC resolution */
sar_ctrl = regs->sar_control_reg;
sar_ctrl &= ~(MCHP_ADC_SAR_CTRL_RES_MASK |
(1 << MCHP_ADC_SAR_CTRL_SHIFTD_POS));
if (sequence->resolution == 12) {
sar_ctrl |= MCHP_ADC_SAR_CTRL_RES_12_BITS;
} else if (sequence->resolution == 10) {
sar_ctrl |= MCHP_ADC_SAR_CTRL_RES_10_BITS;
sar_ctrl |= MCHP_ADC_SAR_CTRL_SHIFTD_EN;
} else {
return -EINVAL;
}
regs->sar_control_reg = sar_ctrl;
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_xec_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_xec_data * const data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = adc_xec_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_xec_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_xec_data * const data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = adc_xec_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static void xec_adc_get_sample(const struct device *dev)
{
const struct adc_xec_config *const cfg = dev->config;
struct adc_xec_regs * const regs = cfg->regs;
struct adc_xec_data * const data = dev->data;
uint32_t idx;
uint32_t channels = regs->status_reg;
uint32_t ch_status = channels;
uint32_t bit;
/*
* Using the enabled channel bit set, from
* lowest channel number to highest, find out
* which channel is enabled and copy the ADC
* values from hardware registers to the data
* buffer.
*/
bit = find_lsb_set(channels);
while (bit != 0) {
idx = bit - 1;
*data->buffer = (uint16_t)regs->channel_read_reg[idx];
data->buffer++;
channels &= ~BIT(idx);
bit = find_lsb_set(channels);
}
/* Clear the status register */
regs->status_reg = ch_status;
}
#ifdef CONFIG_SOC_SERIES_MEC172X
static inline void adc_xec_girq_clr(uint8_t girq_idx, uint8_t girq_posn)
{
mchp_xec_ecia_girq_src_clr(girq_idx, girq_posn);
}
static inline void adc_xec_girq_en(uint8_t girq_idx, uint8_t girq_posn)
{
mchp_xec_ecia_girq_src_en(girq_idx, girq_posn);
}
static inline void adc_xec_girq_dis(uint8_t girq_idx, uint8_t girq_posn)
{
mchp_xec_ecia_girq_src_dis(girq_idx, girq_posn);
}
#else
static inline void adc_xec_girq_clr(uint8_t girq_idx, uint8_t girq_posn)
{
MCHP_GIRQ_SRC(girq_idx) = BIT(girq_posn);
}
static inline void adc_xec_girq_en(uint8_t girq_idx, uint8_t girq_posn)
{
MCHP_GIRQ_ENSET(girq_idx) = BIT(girq_posn);
}
static inline void adc_xec_girq_dis(uint8_t girq_idx, uint8_t girq_posn)
{
MCHP_GIRQ_ENCLR(girq_idx) = MCHP_KBC_IBF_GIRQ;
}
#endif
static void adc_xec_single_isr(const struct device *dev)
{
const struct adc_xec_config *const cfg = dev->config;
struct adc_xec_regs * const regs = cfg->regs;
struct adc_xec_data * const data = dev->data;
uint32_t ctrl;
/* Clear START_SINGLE bit and clear SINGLE_DONE_STATUS */
ctrl = regs->control_reg;
ctrl &= ~XEC_ADC_CTRL_START_SINGLE;
ctrl |= XEC_ADC_CTRL_SINGLE_DONE_STATUS;
regs->control_reg = ctrl;
/* Also clear GIRQ source status bit */
adc_xec_girq_clr(cfg->girq_single, cfg->girq_single_pos);
xec_adc_get_sample(dev);
#ifdef CONFIG_PM_DEVICE
adc_xec_pm_policy_state_lock_put(data, ADC_PM_POLICY_STATE_SINGLE_FLAG);
#endif
adc_context_on_sampling_done(&data->ctx, dev);
LOG_DBG("ADC ISR triggered.");
}
#ifdef CONFIG_PM_DEVICE
static int adc_xec_pm_action(const struct device *dev, enum pm_device_action action)
{
const struct adc_xec_config *const devcfg = dev->config;
struct adc_xec_regs * const adc_regs = devcfg->regs;
int ret;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT);
/* ADC activate */
adc_regs->control_reg |= XEC_ADC_CTRL_ACTIVATE;
break;
case PM_DEVICE_ACTION_SUSPEND:
/* ADC deactivate */
adc_regs->control_reg &= ~(XEC_ADC_CTRL_ACTIVATE);
/* If application does not want to turn off ADC pins it will
* not define pinctrl-1 for this node.
*/
ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP);
if (ret == -ENOENT) { /* pinctrl-1 does not exist. */
ret = 0;
}
break;
default:
ret = -ENOTSUP;
}
return ret;
}
#endif /* CONFIG_PM_DEVICE */
struct adc_driver_api adc_xec_api = {
.channel_setup = adc_xec_channel_setup,
.read = adc_xec_read,
#if defined(CONFIG_ADC_ASYNC)
.read_async = adc_xec_read_async,
#endif
.ref_internal = XEC_ADC_VREF_ANALOG,
};
/* ADC Config Register */
#define XEC_ADC_CFG_CLK_VAL(clk_time) ( \
(clk_time << MCHP_ADC_CFG_CLK_LO_TIME_POS) | \
(clk_time << MCHP_ADC_CFG_CLK_HI_TIME_POS))
static int adc_xec_init(const struct device *dev)
{
const struct adc_xec_config *const cfg = dev->config;
struct adc_xec_regs * const regs = cfg->regs;
struct adc_xec_data * const data = dev->data;
int ret;
data->adc_dev = dev;
ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (ret != 0) {
LOG_ERR("XEC ADC V2 pinctrl setup failed (%d)", ret);
return ret;
}
regs->config_reg = XEC_ADC_CFG_CLK_VAL(DT_INST_PROP(0, clktime));
regs->control_reg = XEC_ADC_CTRL_ACTIVATE
| XEC_ADC_CTRL_POWER_SAVER_DIS
| XEC_ADC_CTRL_SINGLE_DONE_STATUS
| XEC_ADC_CTRL_REPEAT_DONE_STATUS;
adc_xec_girq_dis(cfg->girq_repeat, cfg->girq_repeat_pos);
adc_xec_girq_clr(cfg->girq_repeat, cfg->girq_repeat_pos);
adc_xec_girq_dis(cfg->girq_single, cfg->girq_single_pos);
adc_xec_girq_clr(cfg->girq_single, cfg->girq_single_pos);
adc_xec_girq_en(cfg->girq_single, cfg->girq_single_pos);
IRQ_CONNECT(DT_INST_IRQN(0),
DT_INST_IRQ(0, priority),
adc_xec_single_isr, DEVICE_DT_INST_GET(0), 0);
irq_enable(DT_INST_IRQN(0));
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
PINCTRL_DT_INST_DEFINE(0);
static struct adc_xec_config adc_xec_dev_cfg_0 = {
.regs = (struct adc_xec_regs *)(DT_INST_REG_ADDR(0)),
.girq_single = (uint8_t)(DT_INST_PROP_BY_IDX(0, girqs, 0)),
.girq_single_pos = (uint8_t)(DT_INST_PROP_BY_IDX(0, girqs, 1)),
.girq_repeat = (uint8_t)(DT_INST_PROP_BY_IDX(0, girqs, 2)),
.girq_repeat_pos = (uint8_t)(DT_INST_PROP_BY_IDX(0, girqs, 3)),
.pcr_regidx = (uint8_t)(DT_INST_PROP_BY_IDX(0, pcrs, 0)),
.pcr_bitpos = (uint8_t)(DT_INST_PROP_BY_IDX(0, pcrs, 1)),
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
};
static struct adc_xec_data adc_xec_dev_data_0 = {
ADC_CONTEXT_INIT_TIMER(adc_xec_dev_data_0, ctx),
ADC_CONTEXT_INIT_LOCK(adc_xec_dev_data_0, ctx),
ADC_CONTEXT_INIT_SYNC(adc_xec_dev_data_0, ctx),
};
PM_DEVICE_DT_INST_DEFINE(0, adc_xec_pm_action);
DEVICE_DT_INST_DEFINE(0, adc_xec_init, PM_DEVICE_DT_INST_GET(0),
&adc_xec_dev_data_0, &adc_xec_dev_cfg_0,
PRE_KERNEL_1, CONFIG_ADC_INIT_PRIORITY,
&adc_xec_api);
``` | /content/code_sandbox/drivers/adc/adc_mchp_xec.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,596 |
```c
/*
*
*/
#define DT_DRV_COMPAT renesas_smartbond_sdadc
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include <DA1469xAB.h>
#include <da1469x_pd.h>
#include "adc_context.h"
#include <zephyr/dt-bindings/adc/smartbond-adc.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/pm/device.h>
#include <zephyr/pm/policy.h>
#include <zephyr/pm/device_runtime.h>
LOG_MODULE_REGISTER(adc_smartbond_sdadc);
struct sdadc_smartbond_cfg {
const struct pinctrl_dev_config *pcfg;
/** Value for SDADC_CLK_FREQ */
uint8_t sdadc_clk_freq;
};
struct sdadc_smartbond_data {
struct adc_context ctx;
/* Buffer to store channel data */
uint16_t *buffer;
/* Copy of channel mask from sequence */
uint32_t channel_read_mask;
/* Number of bits in sequence channels */
uint8_t sequence_channel_count;
/* Index in buffer to store current value to */
uint8_t result_index;
};
#define SMARTBOND_SDADC_CHANNEL_COUNT 8
struct sdadc_smartbond_channel_cfg {
uint32_t sd_adc_ctrl_reg;
};
static struct sdadc_smartbond_channel_cfg m_sdchannels[SMARTBOND_SDADC_CHANNEL_COUNT];
/* Implementation of the ADC driver API function: adc_channel_setup. */
static int sdadc_smartbond_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
struct sdadc_smartbond_channel_cfg *config = &m_sdchannels[channel_id];
if (channel_id >= SMARTBOND_SDADC_CHANNEL_COUNT) {
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Selected ADC acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->input_positive > SMARTBOND_SDADC_VBAT) {
LOG_ERR("Channels out of range");
return -EINVAL;
}
if (channel_cfg->differential) {
if (channel_cfg->input_negative >= SMARTBOND_SDADC_VBAT) {
LOG_ERR("Differential negative channels out of range");
return -EINVAL;
}
}
config->sd_adc_ctrl_reg = 0;
if ((channel_cfg->input_positive == SMARTBOND_SDADC_VBAT &&
channel_cfg->gain != ADC_GAIN_1_4) ||
(channel_cfg->input_positive != SMARTBOND_SDADC_VBAT &&
channel_cfg->gain != ADC_GAIN_1)) {
LOG_ERR("ADC gain should be 1/4 for VBAT and 1 for all other channels");
return -EINVAL;
}
switch (channel_cfg->reference) {
case ADC_REF_INTERNAL:
break;
default:
LOG_ERR("Selected ADC reference is not valid");
return -EINVAL;
}
config->sd_adc_ctrl_reg =
channel_cfg->input_positive << SDADC_SDADC_CTRL_REG_SDADC_INP_SEL_Pos;
if (channel_cfg->differential) {
config->sd_adc_ctrl_reg |=
channel_cfg->input_negative << SDADC_SDADC_CTRL_REG_SDADC_INN_SEL_Pos;
} else {
config->sd_adc_ctrl_reg |= SDADC_SDADC_CTRL_REG_SDADC_SE_Msk;
}
return 0;
}
#define PER_CHANNEL_ADC_CONFIG_MASK (SDADC_SDADC_CTRL_REG_SDADC_INP_SEL_Msk | \
SDADC_SDADC_CTRL_REG_SDADC_INN_SEL_Msk | \
SDADC_SDADC_CTRL_REG_SDADC_SE_Msk \
)
static inline void sdadc_smartbond_pm_policy_state_lock_get(const struct device *dev,
struct sdadc_smartbond_data *data)
{
#if defined(CONFIG_PM_DEVICE)
pm_device_runtime_get(dev);
/*
* Prevent the SoC from entering the normal sleep state.
*/
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
#endif
}
static inline void sdadc_smartbond_pm_policy_state_lock_put(const struct device *dev,
struct sdadc_smartbond_data *data)
{
#if defined(CONFIG_PM_DEVICE)
/*
* Allow the SoC to enter the normal sleep state once sdadc is done.
*/
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
pm_device_runtime_put(dev);
#endif
}
static int pop_count(uint32_t n)
{
return __builtin_popcount(n);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
uint32_t val;
struct sdadc_smartbond_data *data =
CONTAINER_OF(ctx, struct sdadc_smartbond_data, ctx);
/* Extract lower channel from sequence mask */
int current_channel = u32_count_trailing_zeros(data->channel_read_mask);
/* Wait until the SDADC LDO stabilizes */
while (!(SDADC->SDADC_CTRL_REG & SDADC_SDADC_CTRL_REG_SDADC_LDO_OK_Msk)) {
__NOP();
}
if (ctx->sequence.calibrate) {
/* TODO: Add calibration code */
} else {
val = SDADC->SDADC_CTRL_REG & ~PER_CHANNEL_ADC_CONFIG_MASK;
val |= m_sdchannels[current_channel].sd_adc_ctrl_reg;
val |= SDADC_SDADC_CTRL_REG_SDADC_START_Msk |
SDADC_SDADC_CTRL_REG_SDADC_MINT_Msk;
val |= (ctx->sequence.oversampling - 7) << SDADC_SDADC_CTRL_REG_SDADC_OSR_Pos;
SDADC->SDADC_CTRL_REG = val;
}
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat)
{
struct sdadc_smartbond_data *data =
CONTAINER_OF(ctx, struct sdadc_smartbond_data, ctx);
if (!repeat) {
data->buffer += data->sequence_channel_count;
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
struct sdadc_smartbond_data *data = dev->data;
if (sequence->oversampling < 7U || sequence->oversampling > 10) {
LOG_ERR("Invalid oversampling");
return -EINVAL;
}
if ((sequence->channels == 0) ||
((sequence->channels & ~BIT_MASK(SMARTBOND_SDADC_CHANNEL_COUNT)) != 0)) {
LOG_ERR("Channel scanning is not supported");
return -EINVAL;
}
if (sequence->resolution < 8 || sequence->resolution > 15) {
LOG_ERR("ADC resolution value %d is not valid",
sequence->resolution);
return -EINVAL;
}
error = check_buffer_size(sequence, 1);
if (error) {
return error;
}
data->buffer = sequence->buffer;
data->channel_read_mask = sequence->channels;
data->sequence_channel_count = pop_count(sequence->channels);
data->result_index = 0;
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static void sdadc_smartbond_isr(const struct device *dev)
{
struct sdadc_smartbond_data *data = dev->data;
int current_channel = u32_count_trailing_zeros(data->channel_read_mask);
SDADC->SDADC_CLEAR_INT_REG = 0;
/* Store current channel value, result is left justified, move bits right */
data->buffer[data->result_index++] = ((uint16_t)SDADC->SDADC_RESULT_REG) >>
(16 - data->ctx.sequence.resolution);
/* Exclude channel from mask for further reading */
data->channel_read_mask ^= 1 << current_channel;
if (data->channel_read_mask == 0) {
sdadc_smartbond_pm_policy_state_lock_put(dev, data);
adc_context_on_sampling_done(&data->ctx, dev);
} else {
adc_context_start_sampling(&data->ctx);
}
LOG_DBG("%s ISR triggered.", dev->name);
}
/* Implementation of the ADC driver API function: adc_read. */
static int sdadc_smartbond_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
struct sdadc_smartbond_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
sdadc_smartbond_pm_policy_state_lock_get(dev, data);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#if defined(CONFIG_ADC_ASYNC)
/* Implementation of the ADC driver API function: adc_read_sync. */
static int sdadc_smartbond_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct sdadc_smartbond_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
sdadc_smartbond_pm_policy_state_lock_get(dev, data);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static int sdadc_smartbond_resume(const struct device *dev)
{
int ret;
const struct sdadc_smartbond_cfg *config = dev->config;
da1469x_pd_acquire(MCU_PD_DOMAIN_COM);
SDADC->SDADC_TEST_REG =
(SDADC->SDADC_TEST_REG & ~SDADC_SDADC_TEST_REG_SDADC_CLK_FREQ_Msk) |
(config->sdadc_clk_freq) << SDADC_SDADC_TEST_REG_SDADC_CLK_FREQ_Pos;
SDADC->SDADC_CTRL_REG = SDADC_SDADC_CTRL_REG_SDADC_EN_Msk;
/*
* Configure dt provided device signals when available.
* pinctrl is optional so ENOENT is not setup failure.
*/
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0 && ret != -ENOENT) {
SDADC->SDADC_CTRL_REG = 0;
/* Release the comms domain */
da1469x_pd_release(MCU_PD_DOMAIN_COM);
LOG_ERR("ADC pinctrl setup failed (%d)", ret);
return ret;
}
return 0;
}
#ifdef CONFIG_PM_DEVICE
static int sdadc_smartbond_suspend(const struct device *dev)
{
int ret;
const struct sdadc_smartbond_cfg *config = dev->config;
/* Disable the sdadc LDO */
SDADC->SDADC_CTRL_REG = 0;
/* Release the comms domain */
da1469x_pd_release(MCU_PD_DOMAIN_COM);
/*
* Configure dt provided device signals for sleep.
* pinctrl is optional so ENOENT is not setup failure.
*/
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
if (ret < 0 && ret != -ENOENT) {
LOG_WRN("Failed to configure the sdadc pins to inactive state");
return ret;
}
return 0;
}
static int sdadc_smartbond_pm_action(const struct device *dev,
enum pm_device_action action)
{
int ret;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
ret = sdadc_smartbond_resume(dev);
break;
case PM_DEVICE_ACTION_SUSPEND:
ret = sdadc_smartbond_suspend(dev);
break;
default:
return -ENOTSUP;
}
return ret;
}
#endif /* CONFIG_PM_DEVICE */
static int sdadc_smartbond_init(const struct device *dev)
{
int ret;
struct sdadc_smartbond_data *data = dev->data;
#ifdef CONFIG_PM_DEVICE_RUNTIME
/* Make sure device state is marked as suspended */
pm_device_init_suspended(dev);
ret = pm_device_runtime_enable(dev);
#else
ret = sdadc_smartbond_resume(dev);
#endif
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
sdadc_smartbond_isr, DEVICE_DT_INST_GET(0), 0);
NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
NVIC_EnableIRQ(DT_INST_IRQN(0));
adc_context_unlock_unconditionally(&data->ctx);
return ret;
}
static const struct adc_driver_api sdadc_smartbond_driver_api = {
.channel_setup = sdadc_smartbond_channel_setup,
.read = sdadc_smartbond_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = sdadc_smartbond_read_async,
#endif
.ref_internal = 1200,
};
/*
* There is only one instance on supported SoCs, so inst is guaranteed
* to be 0 if any instance is okay. (We use adc_0 above, so the driver
* is relying on the numeric instance value in a way that happens to
* be safe.)
*
* Just in case that assumption becomes invalid in the future, we use
* a BUILD_ASSERT().
*/
#define SDADC_INIT(inst) \
BUILD_ASSERT((inst) == 0, \
"multiple instances not supported"); \
PINCTRL_DT_INST_DEFINE(inst); \
static const struct sdadc_smartbond_cfg sdadc_smartbond_cfg_##inst = { \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
.sdadc_clk_freq = DT_INST_PROP(inst, clock_freq), \
}; \
static struct sdadc_smartbond_data sdadc_smartbond_data_##inst = { \
ADC_CONTEXT_INIT_TIMER(sdadc_smartbond_data_##inst, ctx), \
ADC_CONTEXT_INIT_LOCK(sdadc_smartbond_data_##inst, ctx), \
ADC_CONTEXT_INIT_SYNC(sdadc_smartbond_data_##inst, ctx), \
}; \
PM_DEVICE_DT_INST_DEFINE(inst, sdadc_smartbond_pm_action); \
DEVICE_DT_INST_DEFINE(inst, \
sdadc_smartbond_init, \
PM_DEVICE_DT_INST_GET(inst), \
&sdadc_smartbond_data_##inst, \
&sdadc_smartbond_cfg_##inst, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&sdadc_smartbond_driver_api);
DT_INST_FOREACH_STATUS_OKAY(SDADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_smartbond_sdadc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,150 |
```unknown
# ADS1112 ADC configuration options
config ADC_ADS1112
bool "Texas Instruments ADS1112 ADC driver"
depends on DT_HAS_TI_ADS1112_ENABLED
select I2C
select ADC_CONFIGURABLE_INPUTS
help
Enable the driver implementation for the ADS1112
``` | /content/code_sandbox/drivers/adc/Kconfig.ads1112 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```c
/*
*
*/
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <haly/nrfy_saadc.h>
#include <zephyr/dt-bindings/adc/nrf-adc.h>
#include <zephyr/linker/devicetree_regions.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_nrfx_saadc);
#define DT_DRV_COMPAT nordic_nrf_saadc
#if (NRF_SAADC_HAS_AIN_AS_PIN)
#if defined(CONFIG_SOC_NRF54H20)
static const uint8_t saadc_psels[NRF_SAADC_AIN7 + 1] = {
[NRF_SAADC_AIN0] = NRF_PIN_PORT_TO_PIN_NUMBER(0U, 1),
[NRF_SAADC_AIN1] = NRF_PIN_PORT_TO_PIN_NUMBER(1U, 1),
[NRF_SAADC_AIN2] = NRF_PIN_PORT_TO_PIN_NUMBER(2U, 1),
[NRF_SAADC_AIN3] = NRF_PIN_PORT_TO_PIN_NUMBER(3U, 1),
[NRF_SAADC_AIN4] = NRF_PIN_PORT_TO_PIN_NUMBER(4U, 1),
[NRF_SAADC_AIN5] = NRF_PIN_PORT_TO_PIN_NUMBER(5U, 1),
[NRF_SAADC_AIN6] = NRF_PIN_PORT_TO_PIN_NUMBER(6U, 1),
[NRF_SAADC_AIN7] = NRF_PIN_PORT_TO_PIN_NUMBER(7U, 1),
};
#elif defined(CONFIG_SOC_NRF54L15)
static const uint8_t saadc_psels[NRF_SAADC_AIN7 + 1] = {
[NRF_SAADC_AIN0] = NRF_PIN_PORT_TO_PIN_NUMBER(4U, 1),
[NRF_SAADC_AIN1] = NRF_PIN_PORT_TO_PIN_NUMBER(5U, 1),
[NRF_SAADC_AIN2] = NRF_PIN_PORT_TO_PIN_NUMBER(6U, 1),
[NRF_SAADC_AIN3] = NRF_PIN_PORT_TO_PIN_NUMBER(7U, 1),
[NRF_SAADC_AIN4] = NRF_PIN_PORT_TO_PIN_NUMBER(11U, 1),
[NRF_SAADC_AIN5] = NRF_PIN_PORT_TO_PIN_NUMBER(12U, 1),
[NRF_SAADC_AIN6] = NRF_PIN_PORT_TO_PIN_NUMBER(13U, 1),
[NRF_SAADC_AIN7] = NRF_PIN_PORT_TO_PIN_NUMBER(14U, 1),
};
#endif
#else
BUILD_ASSERT((NRF_SAADC_AIN0 == NRF_SAADC_INPUT_AIN0) &&
(NRF_SAADC_AIN1 == NRF_SAADC_INPUT_AIN1) &&
(NRF_SAADC_AIN2 == NRF_SAADC_INPUT_AIN2) &&
(NRF_SAADC_AIN3 == NRF_SAADC_INPUT_AIN3) &&
(NRF_SAADC_AIN4 == NRF_SAADC_INPUT_AIN4) &&
(NRF_SAADC_AIN5 == NRF_SAADC_INPUT_AIN5) &&
(NRF_SAADC_AIN6 == NRF_SAADC_INPUT_AIN6) &&
(NRF_SAADC_AIN7 == NRF_SAADC_INPUT_AIN7) &&
#if defined(SAADC_CH_PSELP_PSELP_VDDHDIV5)
(NRF_SAADC_VDDHDIV5 == NRF_SAADC_INPUT_VDDHDIV5) &&
#endif
#if defined(SAADC_CH_PSELP_PSELP_VDD)
(NRF_SAADC_VDD == NRF_SAADC_INPUT_VDD) &&
#endif
1,
"Definitions from nrf-adc.h do not match those from nrf_saadc.h");
#endif
#ifdef CONFIG_SOC_NRF54H20
/* nRF54H20 always uses bounce buffers in RAM */
#define SAADC_MEMORY_SECTION \
COND_CODE_1(DT_NODE_HAS_PROP(DT_NODELABEL(adc), memory_regions), \
(__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \
DT_PHANDLE(DT_NODELABEL(adc), memory_regions)))))), \
())
static uint16_t adc_samples_buffer[SAADC_CH_NUM] SAADC_MEMORY_SECTION;
#define ADC_BUFFER_IN_RAM
#endif /* CONFIG_SOC_NRF54H20 */
struct driver_data {
struct adc_context ctx;
uint8_t positive_inputs[SAADC_CH_NUM];
uint8_t single_ended_channels;
#if defined(ADC_BUFFER_IN_RAM)
void *samples_buffer;
void *user_buffer;
uint8_t active_channels;
#endif
};
static struct driver_data m_data = {
ADC_CONTEXT_INIT_TIMER(m_data, ctx),
ADC_CONTEXT_INIT_LOCK(m_data, ctx),
ADC_CONTEXT_INIT_SYNC(m_data, ctx),
#if defined(ADC_BUFFER_IN_RAM)
.samples_buffer = adc_samples_buffer,
#endif
};
/* Helper function to convert number of samples to the byte representation. */
static uint32_t samples_to_bytes(const struct adc_sequence *sequence, uint16_t number_of_samples)
{
if (NRF_SAADC_8BIT_SAMPLE_WIDTH == 8 && sequence->resolution == 8) {
return number_of_samples;
}
return number_of_samples * 2;
}
/* Helper function to convert acquisition time to register TACQ value. */
static int adc_convert_acq_time(uint16_t acquisition_time, nrf_saadc_acqtime_t *p_tacq_val)
{
int result = 0;
#if NRF_SAADC_HAS_ACQTIME_ENUM
switch (acquisition_time) {
case ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, 3):
*p_tacq_val = NRF_SAADC_ACQTIME_3US;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, 5):
*p_tacq_val = NRF_SAADC_ACQTIME_5US;
break;
case ADC_ACQ_TIME_DEFAULT:
case ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, 10):
*p_tacq_val = NRF_SAADC_ACQTIME_10US;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, 15):
*p_tacq_val = NRF_SAADC_ACQTIME_15US;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, 20):
*p_tacq_val = NRF_SAADC_ACQTIME_20US;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, 40):
*p_tacq_val = NRF_SAADC_ACQTIME_40US;
break;
default:
result = -EINVAL;
}
#else
#define MINIMUM_ACQ_TIME_IN_NS 125
#define DEFAULT_ACQ_TIME_IN_NS 10000
nrf_saadc_acqtime_t tacq = 0;
uint16_t acq_time =
(acquisition_time == ADC_ACQ_TIME_DEFAULT
? DEFAULT_ACQ_TIME_IN_NS
: (ADC_ACQ_TIME_VALUE(acquisition_time) *
(ADC_ACQ_TIME_UNIT(acquisition_time) == ADC_ACQ_TIME_MICROSECONDS
? 1000
: 1)));
tacq = (nrf_saadc_acqtime_t)(acq_time / MINIMUM_ACQ_TIME_IN_NS) - 1;
if ((tacq > NRF_SAADC_ACQTIME_MAX) || (acq_time < MINIMUM_ACQ_TIME_IN_NS)) {
result = -EINVAL;
} else {
*p_tacq_val = tacq;
}
#endif
return result;
}
/* Implementation of the ADC driver API function: adc_channel_setup. */
static int adc_nrfx_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
nrf_saadc_channel_config_t config = {
#if NRF_SAADC_HAS_CH_CONFIG_RES
.resistor_p = NRF_SAADC_RESISTOR_DISABLED,
.resistor_n = NRF_SAADC_RESISTOR_DISABLED,
#endif
.burst = NRF_SAADC_BURST_DISABLED,
};
uint8_t channel_id = channel_cfg->channel_id;
uint32_t input_negative = channel_cfg->input_negative;
if (channel_id >= SAADC_CH_NUM) {
return -EINVAL;
}
switch (channel_cfg->gain) {
#if defined(SAADC_CH_CONFIG_GAIN_Gain1_6)
case ADC_GAIN_1_6:
config.gain = NRF_SAADC_GAIN1_6;
break;
#endif
#if defined(SAADC_CH_CONFIG_GAIN_Gain1_5)
case ADC_GAIN_1_5:
config.gain = NRF_SAADC_GAIN1_5;
break;
#endif
#if defined(SAADC_CH_CONFIG_GAIN_Gain1_4) || defined(SAADC_CH_CONFIG_GAIN_Gain2_8)
case ADC_GAIN_1_4:
config.gain = NRF_SAADC_GAIN1_4;
break;
#endif
#if defined(SAADC_CH_CONFIG_GAIN_Gain1_3) || defined(SAADC_CH_CONFIG_GAIN_Gain2_6)
case ADC_GAIN_1_3:
config.gain = NRF_SAADC_GAIN1_3;
break;
#endif
#if defined(SAADC_CH_CONFIG_GAIN_Gain2_5)
case ADC_GAIN_2_5:
config.gain = NRF_SAADC_GAIN2_5;
break;
#endif
#if defined(SAADC_CH_CONFIG_GAIN_Gain1_2) || defined(SAADC_CH_CONFIG_GAIN_Gain2_4)
case ADC_GAIN_1_2:
config.gain = NRF_SAADC_GAIN1_2;
break;
#endif
#if defined(SAADC_CH_CONFIG_GAIN_Gain2_3)
case ADC_GAIN_2_3:
config.gain = NRF_SAADC_GAIN2_3;
break;
#endif
case ADC_GAIN_1:
config.gain = NRF_SAADC_GAIN1;
break;
case ADC_GAIN_2:
config.gain = NRF_SAADC_GAIN2;
break;
#if defined(SAADC_CH_CONFIG_GAIN_Gain4)
case ADC_GAIN_4:
config.gain = NRF_SAADC_GAIN4;
break;
#endif
default:
LOG_ERR("Selected ADC gain is not valid");
return -EINVAL;
}
switch (channel_cfg->reference) {
#if defined(SAADC_CH_CONFIG_REFSEL_Internal)
case ADC_REF_INTERNAL:
config.reference = NRF_SAADC_REFERENCE_INTERNAL;
break;
#endif
#if defined(SAADC_CH_CONFIG_REFSEL_VDD1_4)
case ADC_REF_VDD_1_4:
config.reference = NRF_SAADC_REFERENCE_VDD4;
break;
#endif
#if defined(SAADC_CH_CONFIG_REFSEL_External)
case ADC_REF_EXTERNAL0:
config.reference = NRF_SAADC_REFERENCE_EXTERNAL;
break;
#endif
default:
LOG_ERR("Selected ADC reference is not valid");
return -EINVAL;
}
int ret = adc_convert_acq_time(channel_cfg->acquisition_time, &config.acq_time);
if (ret) {
LOG_ERR("Selected ADC acquisition time is not valid");
return -EINVAL;
}
/* Store channel mode to allow correcting negative readings in single-ended mode
* after ADC sequence ends.
*/
if (channel_cfg->differential) {
config.mode = NRF_SAADC_MODE_DIFFERENTIAL;
m_data.single_ended_channels &= ~BIT(channel_cfg->channel_id);
} else {
config.mode = NRF_SAADC_MODE_SINGLE_ENDED;
m_data.single_ended_channels |= BIT(channel_cfg->channel_id);
}
/* Keep the channel disabled in hardware (set positive input to
* NRF_SAADC_INPUT_DISABLED) until it is selected to be included
* in a sampling sequence.
*/
#if (NRF_SAADC_HAS_AIN_AS_PIN)
if ((channel_cfg->input_positive > NRF_SAADC_AIN7) ||
(channel_cfg->input_positive < NRF_SAADC_AIN0)) {
return -EINVAL;
}
if (config.mode == NRF_SAADC_MODE_DIFFERENTIAL) {
if (input_negative > NRF_SAADC_AIN7 ||
input_negative < NRF_SAADC_AIN0) {
return -EINVAL;
}
input_negative = saadc_psels[input_negative];
} else {
input_negative = NRF_SAADC_INPUT_DISABLED;
}
/* Store the positive input selection in a dedicated array,
* to get it later when the channel is selected for a sampling
* and to mark the channel as configured (ready to be selected).
*/
m_data.positive_inputs[channel_id] = saadc_psels[channel_cfg->input_positive];
#else
m_data.positive_inputs[channel_id] = channel_cfg->input_positive;
#endif
nrf_saadc_channel_init(NRF_SAADC, channel_id, &config);
nrf_saadc_channel_input_set(NRF_SAADC,
channel_id,
NRF_SAADC_INPUT_DISABLED,
input_negative);
return 0;
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
nrf_saadc_enable(NRF_SAADC);
if (ctx->sequence.calibrate) {
nrf_saadc_task_trigger(NRF_SAADC,
NRF_SAADC_TASK_CALIBRATEOFFSET);
} else {
nrf_saadc_task_trigger(NRF_SAADC, NRF_SAADC_TASK_START);
nrf_saadc_task_trigger(NRF_SAADC, NRF_SAADC_TASK_SAMPLE);
}
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat)
{
ARG_UNUSED(ctx);
if (!repeat) {
#if defined(ADC_BUFFER_IN_RAM)
m_data.user_buffer = (uint8_t *)m_data.user_buffer +
samples_to_bytes(&ctx->sequence, nrfy_saadc_amount_get(NRF_SAADC));
#else
nrf_saadc_value_t *buffer =
(uint8_t *)nrf_saadc_buffer_pointer_get(NRF_SAADC) +
samples_to_bytes(&ctx->sequence, nrfy_saadc_amount_get(NRF_SAADC));
nrfy_saadc_buffer_pointer_set(NRF_SAADC, buffer);
#endif
}
}
static int set_resolution(const struct adc_sequence *sequence)
{
nrf_saadc_resolution_t nrf_resolution;
switch (sequence->resolution) {
case 8:
nrf_resolution = NRF_SAADC_RESOLUTION_8BIT;
break;
case 10:
nrf_resolution = NRF_SAADC_RESOLUTION_10BIT;
break;
case 12:
nrf_resolution = NRF_SAADC_RESOLUTION_12BIT;
break;
case 14:
nrf_resolution = NRF_SAADC_RESOLUTION_14BIT;
break;
default:
LOG_ERR("ADC resolution value %d is not valid",
sequence->resolution);
return -EINVAL;
}
nrf_saadc_resolution_set(NRF_SAADC, nrf_resolution);
return 0;
}
static int set_oversampling(const struct adc_sequence *sequence,
uint8_t active_channels)
{
nrf_saadc_oversample_t nrf_oversampling;
if ((active_channels > 1) && (sequence->oversampling > 0)) {
LOG_ERR(
"Oversampling is supported for single channel only");
return -EINVAL;
}
switch (sequence->oversampling) {
case 0:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_DISABLED;
break;
case 1:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_2X;
break;
case 2:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_4X;
break;
case 3:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_8X;
break;
case 4:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_16X;
break;
case 5:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_32X;
break;
case 6:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_64X;
break;
case 7:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_128X;
break;
case 8:
nrf_oversampling = NRF_SAADC_OVERSAMPLE_256X;
break;
default:
LOG_ERR("Oversampling value %d is not valid",
sequence->oversampling);
return -EINVAL;
}
nrf_saadc_oversample_set(NRF_SAADC, nrf_oversampling);
return 0;
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = samples_to_bytes(sequence, active_channels);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static bool has_single_ended(const struct adc_sequence *sequence)
{
return sequence->channels & m_data.single_ended_channels;
}
static void correct_single_ended(const struct adc_sequence *sequence)
{
uint16_t channel_bit = BIT(0);
uint8_t selected_channels = sequence->channels;
uint8_t single_ended_channels = m_data.single_ended_channels;
int16_t *sample = nrf_saadc_buffer_pointer_get(NRF_SAADC);
while (channel_bit <= single_ended_channels) {
if (channel_bit & selected_channels) {
if ((channel_bit & single_ended_channels) && (*sample < 0)) {
*sample = 0;
}
sample++;
}
channel_bit <<= 1;
}
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
uint32_t selected_channels = sequence->channels;
uint8_t resolution = sequence->resolution;
uint8_t active_channels;
uint8_t channel_id;
/* Signal an error if channel selection is invalid (no channels or
* a non-existing one is selected).
*/
if (!selected_channels ||
(selected_channels & ~BIT_MASK(SAADC_CH_NUM))) {
LOG_ERR("Invalid selection of channels");
return -EINVAL;
}
active_channels = 0U;
/* Enable only the channels selected for the pointed sequence.
* Disable all the rest.
*/
channel_id = 0U;
do {
if (selected_channels & BIT(channel_id)) {
/* Signal an error if a selected channel has not been
* configured yet.
*/
if (m_data.positive_inputs[channel_id] == 0U) {
LOG_ERR("Channel %u not configured",
channel_id);
return -EINVAL;
}
/* Signal an error if the channel is configured as
* single ended with a resolution which is identical
* to the sample bit size. The SAADC's "single ended"
* mode is really differential mode with the
* negative input tied to ground. We can therefore
* observe negative values if the positive input falls
* below ground. If the sample bitsize is larger than
* the resolution, we can detect negative values and
* correct them to 0 after the sequencen has ended.
*/
if ((m_data.single_ended_channels & BIT(channel_id)) &&
(NRF_SAADC_8BIT_SAMPLE_WIDTH == 8 && resolution == 8)) {
LOG_ERR("Channel %u invalid single ended resolution",
channel_id);
return -EINVAL;
}
/* When oversampling is used, the burst mode needs to
* be activated. Unfortunately, this mode cannot be
* activated permanently in the channel setup, because
* then the multiple channel sampling fails (the END
* event is not generated) after switching to a single
* channel sampling and back. Thus, when oversampling
* is not used (hence, the multiple channel sampling is
* possible), the burst mode have to be deactivated.
*/
nrf_saadc_burst_set(NRF_SAADC, channel_id,
(sequence->oversampling != 0U ?
NRF_SAADC_BURST_ENABLED :
NRF_SAADC_BURST_DISABLED));
nrf_saadc_channel_pos_input_set(
NRF_SAADC,
channel_id,
m_data.positive_inputs[channel_id]);
++active_channels;
} else {
nrf_saadc_burst_set(
NRF_SAADC,
channel_id,
NRF_SAADC_BURST_DISABLED);
nrf_saadc_channel_pos_input_set(
NRF_SAADC,
channel_id,
NRF_SAADC_INPUT_DISABLED);
}
} while (++channel_id < SAADC_CH_NUM);
error = set_resolution(sequence);
if (error) {
return error;
}
error = set_oversampling(sequence, active_channels);
if (error) {
return error;
}
error = check_buffer_size(sequence, active_channels);
if (error) {
return error;
}
#if defined(ADC_BUFFER_IN_RAM)
m_data.user_buffer = sequence->buffer;
m_data.active_channels = active_channels;
nrf_saadc_buffer_init(NRF_SAADC,
(nrf_saadc_value_t *)m_data.samples_buffer,
active_channels);
#else
nrf_saadc_buffer_init(NRF_SAADC,
(nrf_saadc_value_t *)sequence->buffer,
active_channels);
#endif
adc_context_start_read(&m_data.ctx, sequence);
return adc_context_wait_for_completion(&m_data.ctx);
}
/* Implementation of the ADC driver API function: adc_read. */
static int adc_nrfx_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
adc_context_lock(&m_data.ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&m_data.ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
/* Implementation of the ADC driver API function: adc_read_async. */
static int adc_nrfx_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int error;
adc_context_lock(&m_data.ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&m_data.ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static void saadc_irq_handler(const struct device *dev)
{
if (nrf_saadc_event_check(NRF_SAADC, NRF_SAADC_EVENT_END)) {
nrf_saadc_event_clear(NRF_SAADC, NRF_SAADC_EVENT_END);
nrf_saadc_task_trigger(NRF_SAADC, NRF_SAADC_TASK_STOP);
nrf_saadc_disable(NRF_SAADC);
if (has_single_ended(&m_data.ctx.sequence)) {
correct_single_ended(&m_data.ctx.sequence);
}
#if defined(ADC_BUFFER_IN_RAM)
memcpy(m_data.user_buffer, m_data.samples_buffer,
samples_to_bytes(&m_data.ctx.sequence, m_data.active_channels));
#endif
adc_context_on_sampling_done(&m_data.ctx, dev);
} else if (nrf_saadc_event_check(NRF_SAADC,
NRF_SAADC_EVENT_CALIBRATEDONE)) {
nrf_saadc_event_clear(NRF_SAADC, NRF_SAADC_EVENT_CALIBRATEDONE);
/*
* The workaround for Nordic nRF52832 anomalies 86 and
* 178 is an explicit STOP after CALIBRATEOFFSET
* before issuing START.
*/
nrf_saadc_task_trigger(NRF_SAADC, NRF_SAADC_TASK_STOP);
nrf_saadc_task_trigger(NRF_SAADC, NRF_SAADC_TASK_START);
nrf_saadc_task_trigger(NRF_SAADC, NRF_SAADC_TASK_SAMPLE);
}
}
static int init_saadc(const struct device *dev)
{
nrf_saadc_event_clear(NRF_SAADC, NRF_SAADC_EVENT_END);
nrf_saadc_event_clear(NRF_SAADC, NRF_SAADC_EVENT_CALIBRATEDONE);
nrf_saadc_int_enable(NRF_SAADC,
NRF_SAADC_INT_END | NRF_SAADC_INT_CALIBRATEDONE);
NRFX_IRQ_ENABLE(DT_INST_IRQN(0));
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
saadc_irq_handler, DEVICE_DT_INST_GET(0), 0);
adc_context_unlock_unconditionally(&m_data.ctx);
return 0;
}
static const struct adc_driver_api adc_nrfx_driver_api = {
.channel_setup = adc_nrfx_channel_setup,
.read = adc_nrfx_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_nrfx_read_async,
#endif
#if defined(CONFIG_SOC_NRF54L15)
.ref_internal = 900,
#elif defined(CONFIG_SOC_NRF54H20)
.ref_internal = 1024,
#else
.ref_internal = 600,
#endif
};
/*
* There is only one instance on supported SoCs, so inst is guaranteed
* to be 0 if any instance is okay. (We use adc_0 above, so the driver
* is relying on the numeric instance value in a way that happens to
* be safe.)
*
* Just in case that assumption becomes invalid in the future, we use
* a BUILD_ASSERT().
*/
#define SAADC_INIT(inst) \
BUILD_ASSERT((inst) == 0, \
"multiple instances not supported"); \
DEVICE_DT_INST_DEFINE(0, \
init_saadc, \
NULL, \
NULL, \
NULL, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_nrfx_driver_api);
DT_INST_FOREACH_STATUS_OKAY(SAADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_nrfx_saadc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,539 |
```unknown
# ADC configuration options
config ADC_MCUX_ADC12
bool "MCUX ADC12 driver"
default y
depends on DT_HAS_NXP_KINETIS_ADC12_ENABLED
select PINCTRL
help
Enable the MCUX ADC12 driver.
config ADC_MCUX_ADC16
bool "MCUX ADC16 driver"
default y
depends on DT_HAS_NXP_KINETIS_ADC16_ENABLED
select PINCTRL
help
Enable the MCUX ADC16 driver.
config ADC_MCUX_12B1MSPS_SAR
bool "MCUX 12B1MSPS SAR ADC driver"
default y
depends on DT_HAS_NXP_MCUX_12B1MSPS_SAR_ENABLED
help
Enable the MCUX 12B1MSPS SAR ADC driver.
config ADC_MCUX_LPADC
bool "MCUX LPADC driver"
default y
select ADC_CONFIGURABLE_INPUTS
select REGULATOR
depends on DT_HAS_NXP_LPC_LPADC_ENABLED
help
Enable the MCUX LPADC driver.
config ADC_MCUX_ETC
bool "MCUX ADC ETC driver"
depends on HAS_MCUX_ADC_ETC
help
Enable the MCUX ADC ETC driver.
config ADC_MCUX_GAU
bool "MCUX GAU ADC driver"
default y
depends on DT_HAS_NXP_GAU_ADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enable the GAU ADC driver
if ADC_MCUX_ADC16
choice
prompt "Clock Divide Selection"
default ADC_MCUX_ADC16_CLK_DIV_RATIO_1
config ADC_MCUX_ADC16_CLK_DIV_RATIO_1
bool "Divide ratio is 1"
config ADC_MCUX_ADC16_CLK_DIV_RATIO_2
bool "Divide ratio is 2"
config ADC_MCUX_ADC16_CLK_DIV_RATIO_4
bool "Divide ratio is 4"
config ADC_MCUX_ADC16_CLK_DIV_RATIO_8
bool "Divide ratio is 8"
endchoice
choice ADC_MCUX_ADC16_VREF
prompt "Voltage Reference Selection"
default ADC_MCUX_ADC16_VREF_DEFAULT
config ADC_MCUX_ADC16_VREF_DEFAULT
bool "Default voltage reference pair V_REFH and V_REFL"
config ADC_MCUX_ADC16_VREF_ALTERNATE
bool "Alternate reference pair"
endchoice
config ADC_MCUX_ADC16_ENABLE_EDMA
bool "EDMA for adc driver"
depends on HAS_MCUX_ADC16 && HAS_MCUX_EDMA
help
Enable the MCUX ADC16 driver.
if ADC_MCUX_ADC16_ENABLE_EDMA
config ADC_MCUX_ADC16_HW_TRIGGER
bool "ADC HW TRIGGER"
default y
help
Support HW Trigger ADC
endif # ADC_MCUX_ADC16_ENABLE_EDMA
endif # ADC_MCUX_ADC16
if ADC_MCUX_LPADC
config LPADC_DO_OFFSET_CALIBRATION
bool "Do offset calibration"
help
Do offset calibration
config LPADC_CHANNEL_COUNT
int "LPADC channel count"
default 15
range 1 15
help
Amount of hardware command channels to use, reduce to save RAM.
The user can reduce this value if their application uses fewer than
15 ADC channels. This value corresponds to how many of the CMD
registers can be configured within the ADC.
endif # ADC_MCUX_LPADC
``` | /content/code_sandbox/drivers/adc/Kconfig.mcux | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 712 |
```unknown
#
config ADC_ADS7052
bool "Texas instruments ADS7052 SPI"
default y
depends on DT_HAS_TI_ADS7052_ENABLED
select SPI
select ADC_CONFIGURABLE_INPUTS
help
Enable the driver implementation for the ADS7052
if ADC_ADS7052
config ADC_ADS7052_INIT_PRIORITY
int "ADS7052 init priority"
default 80
help
ADS7052 device initialization priority must come
after SPI initialization
config ADC_ADS7052_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 512
help
Size of the stack used for the internal data acquisition
thread.
config ADC_ADS7052_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
endif # ADC_ADS7052
``` | /content/code_sandbox/drivers/adc/Kconfig.ads7052 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 197 |
```unknown
#
config ADC_AMBIQ
bool "Ambiq Adc Driver"
default y
depends on DT_HAS_AMBIQ_ADC_ENABLED
select AMBIQ_HAL
select AMBIQ_HAL_USE_ADC
help
Enables the Adc driver for Ambiq devices.
``` | /content/code_sandbox/drivers/adc/Kconfig.ambiq | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 57 |
```c
/*
*
*/
#define DT_DRV_COMPAT ti_cc13xx_cc26xx_adc
#include <errno.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_cc13xx_cc26xx);
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/drivers/adc.h>
#include <soc.h>
/* Driverlib includes */
#include <inc/hw_types.h>
#include <driverlib/interrupt.h>
#include <driverlib/ioc.h>
#include <driverlib/rom.h>
#include <driverlib/prcm.h>
#include <driverlib/aux_adc.h>
#include <ti/devices/cc13x2_cc26x2/inc/hw_aux_evctl.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
/**
* Channels are based on ADC_COMPB_IN_* hal_ti definitions, max. index is 16 (included).
*/
#define MAX_CHAN_ID 0x10
/** Internal sample time unit conversion entry. */
struct adc_cc13xx_cc26xx_sample_time_entry {
uint16_t time_us;
uint8_t reg_value;
};
/** Maps standard unit sample times (us) to internal (raw hal_ti register) values */
static const struct adc_cc13xx_cc26xx_sample_time_entry adc_cc13xx_sample_times[] = {
{ 2, AUXADC_SAMPLE_TIME_2P7_US },
{ 5, AUXADC_SAMPLE_TIME_5P3_US },
{ 10, AUXADC_SAMPLE_TIME_10P6_US },
{ 21, AUXADC_SAMPLE_TIME_21P3_US },
{ 42, AUXADC_SAMPLE_TIME_42P6_US },
{ 85, AUXADC_SAMPLE_TIME_85P3_US },
{ 170, AUXADC_SAMPLE_TIME_170_US },
{ 341, AUXADC_SAMPLE_TIME_341_US },
{ 682, AUXADC_SAMPLE_TIME_682_US },
{ 1370, AUXADC_SAMPLE_TIME_1P37_MS },
{ 2730, AUXADC_SAMPLE_TIME_2P73_MS },
{ 5460, AUXADC_SAMPLE_TIME_5P46_MS },
{ 10900, AUXADC_SAMPLE_TIME_10P9_MS },
};
struct adc_cc13xx_cc26xx_data {
struct adc_context ctx;
const struct device *dev;
uint32_t ref_source;
uint8_t sample_time;
uint16_t *buffer;
uint16_t *repeat_buffer;
};
struct adc_cc13xx_cc26xx_cfg {
unsigned long base;
void (*irq_cfg_func)(void);
};
static void adc_cc13xx_cc26xx_isr(const struct device *dev);
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_cc13xx_cc26xx_data *data =
CONTAINER_OF(ctx, struct adc_cc13xx_cc26xx_data, ctx);
data->repeat_buffer = data->buffer;
AUXADCEnableSync(data->ref_source, data->sample_time, AUXADC_TRIGGER_MANUAL);
AUXADCGenManualTrigger();
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat)
{
struct adc_cc13xx_cc26xx_data *data =
CONTAINER_OF(ctx, struct adc_cc13xx_cc26xx_data, ctx);
if (repeat) {
data->buffer = data->repeat_buffer;
} else {
data->buffer++;
}
}
static int adc_cc13xx_cc26xx_init(const struct device *dev)
{
struct adc_cc13xx_cc26xx_data *data = dev->data;
const struct adc_cc13xx_cc26xx_cfg *config = dev->config;
data->dev = dev;
/* clear any previous events */
AUXADCDisable();
HWREG(AUX_EVCTL_BASE + AUX_EVCTL_O_EVTOMCUFLAGSCLR) =
(AUX_EVCTL_EVTOMCUFLAGS_AUX_ADC_IRQ | AUX_EVCTL_EVTOMCUFLAGS_AUX_ADC_DONE);
config->irq_cfg_func();
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static int adc_cc13xx_cc26xx_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct adc_cc13xx_cc26xx_data *data = dev->data;
const uint8_t ch = channel_cfg->channel_id;
uint16_t sample_time_us = 0;
uint8_t i;
if (ch > MAX_CHAN_ID) {
LOG_ERR("Channel 0x%X is not supported, max 0x%X", ch, MAX_CHAN_ID);
return -EINVAL;
}
switch (ADC_ACQ_TIME_UNIT(channel_cfg->acquisition_time)) {
case ADC_ACQ_TIME_TICKS:
data->sample_time = (uint16_t)ADC_ACQ_TIME_VALUE(channel_cfg->acquisition_time);
break;
case ADC_ACQ_TIME_MICROSECONDS:
sample_time_us = (uint16_t)ADC_ACQ_TIME_VALUE(channel_cfg->acquisition_time);
break;
case ADC_ACQ_TIME_NANOSECONDS:
sample_time_us = (uint16_t)(
ADC_ACQ_TIME_VALUE(channel_cfg->acquisition_time) * 1000);
break;
default:
data->sample_time = AUXADC_SAMPLE_TIME_170_US;
break;
}
if (sample_time_us) {
/* choose the nearest sample time configuration */
data->sample_time = adc_cc13xx_sample_times[0].reg_value;
for (i = 0; i < ARRAY_SIZE(adc_cc13xx_sample_times); i++) {
if (adc_cc13xx_sample_times[i].time_us >= sample_time_us) {
break;
}
data->sample_time = adc_cc13xx_sample_times[i].reg_value;
}
if (i >= ARRAY_SIZE(adc_cc13xx_sample_times)) {
LOG_ERR("Acquisition time is not valid");
return -EINVAL;
}
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Gain is not valid");
return -EINVAL;
}
if (channel_cfg->reference == ADC_REF_INTERNAL) {
data->ref_source = AUXADC_REF_FIXED;
} else if (channel_cfg->reference == ADC_REF_VDD_1) {
data->ref_source = AUXADC_REF_VDDS_REL;
} else {
LOG_ERR("Reference is not valid");
return -EINVAL;
}
LOG_DBG("Setup %d acq time %d", ch, data->sample_time);
AUXADCDisable();
AUXADCSelectInput(ch);
return 0;
}
static int cc13xx_cc26xx_read(const struct device *dev,
const struct adc_sequence *sequence,
bool asynchronous,
struct k_poll_signal *sig)
{
struct adc_cc13xx_cc26xx_data *data = dev->data;
int rv;
size_t exp_size;
if (sequence->resolution != 12) {
LOG_ERR("Only 12 Resolution is supported, but %d got",
sequence->resolution);
return -EINVAL;
}
exp_size = sizeof(uint16_t);
if (sequence->options) {
exp_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < exp_size) {
LOG_ERR("Required buffer size is %u, but %u got",
exp_size, sequence->buffer_size);
return -ENOMEM;
}
data->buffer = sequence->buffer;
adc_context_lock(&data->ctx, asynchronous, sig);
adc_context_start_read(&data->ctx, sequence);
rv = adc_context_wait_for_completion(&data->ctx);
adc_context_release(&data->ctx, rv);
return rv;
}
static int adc_cc13xx_cc26xx_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return cc13xx_cc26xx_read(dev, sequence, false, NULL);
}
#ifdef CONFIG_ADC_ASYNC
static int adc_cc13xx_cc26xx_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
return cc13xx_cc26xx_read(dev, sequence, true, async);
}
#endif
/**
* AUX_ADC_IRQ handler, called for either of these events:
* - conversion complete or DMA done (if used);
* - FIFO underflow or overflow;
*/
static void adc_cc13xx_cc26xx_isr(const struct device *dev)
{
struct adc_cc13xx_cc26xx_data *data = dev->data;
/* get the statuses of ADC_DONE and ADC_IRQ events in order to clear them both */
uint32_t ev_status = (
HWREG(AUX_EVCTL_BASE + AUX_EVCTL_O_EVTOMCUFLAGS) &
(AUX_EVCTL_EVTOMCUFLAGS_AUX_ADC_IRQ | AUX_EVCTL_EVTOMCUFLAGS_AUX_ADC_DONE)
);
uint32_t fifo_status;
uint32_t adc_value;
/* clear the AUXADC-related event flags */
HWREG(AUX_EVCTL_BASE + AUX_EVCTL_O_EVTOMCUFLAGSCLR) = ev_status;
/* check the ADC FIFO's status */
fifo_status = AUXADCGetFifoStatus();
LOG_DBG("ISR flags 0x%08X fifo 0x%08X", ev_status, fifo_status);
if ((fifo_status & (AUX_ANAIF_ADCFIFOSTAT_OVERFLOW | AUX_ANAIF_ADCFIFOSTAT_UNDERFLOW))) {
AUXADCFlushFifo();
}
if ((fifo_status & AUX_ANAIF_ADCFIFOSTAT_EMPTY_M)) {
/* no ADC values available */
return;
}
adc_value = AUXADCPopFifo();
LOG_DBG("ADC buf %04X val %d", (unsigned int)data->buffer, adc_value);
*data->buffer = adc_value;
AUXADCDisable();
adc_context_on_sampling_done(&data->ctx, dev);
}
static const struct adc_driver_api cc13xx_cc26xx_driver_api = {
.channel_setup = adc_cc13xx_cc26xx_channel_setup,
.read = adc_cc13xx_cc26xx_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_cc13xx_cc26xx_read_async,
#endif
.ref_internal = 4300, /* fixed reference: 4.3V */
};
#define CC13XX_CC26XX_ADC_INIT(index) \
static void adc_cc13xx_cc26xx_cfg_func_##index(void); \
static const struct adc_cc13xx_cc26xx_cfg adc_cc13xx_cc26xx_cfg_##index = { \
.base = DT_INST_REG_ADDR(index), \
.irq_cfg_func = adc_cc13xx_cc26xx_cfg_func_##index, \
}; \
static struct adc_cc13xx_cc26xx_data adc_cc13xx_cc26xx_data_##index = { \
ADC_CONTEXT_INIT_TIMER(adc_cc13xx_cc26xx_data_##index, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_cc13xx_cc26xx_data_##index, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_cc13xx_cc26xx_data_##index, ctx), \
}; \
DEVICE_DT_INST_DEFINE(index, \
&adc_cc13xx_cc26xx_init, NULL, \
&adc_cc13xx_cc26xx_data_##index, \
&adc_cc13xx_cc26xx_cfg_##index, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&cc13xx_cc26xx_driver_api); \
\
static void adc_cc13xx_cc26xx_cfg_func_##index(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(index), DT_INST_IRQ(index, priority), \
adc_cc13xx_cc26xx_isr, DEVICE_DT_INST_GET(index), 0); \
irq_enable(DT_INST_IRQN(index)); \
}
DT_INST_FOREACH_STATUS_OKAY(CC13XX_CC26XX_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_cc13xx_cc26xx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,569 |
```c
/*
*
* Based on adc_mcux_adc12.c, which are:
*
*/
#define DT_DRV_COMPAT nxp_mcux_12b1msps_sar
#include <zephyr/drivers/adc.h>
#include <fsl_adc.h>
#include <zephyr/drivers/pinctrl.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_mcux_12b1msps_sar);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
struct mcux_12b1msps_sar_adc_config {
ADC_Type *base;
adc_clock_source_t clock_src;
adc_clock_driver_t clock_drv;
adc_reference_voltage_source_t ref_src;
adc_sample_period_mode_t sample_period_mode;
void (*irq_config_func)(const struct device *dev);
const struct pinctrl_dev_config *pincfg;
};
struct mcux_12b1msps_sar_adc_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t channel_id;
};
static int mcux_12b1msps_sar_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
if (channel_id > (ADC_HC_ADCH_MASK >> ADC_HC_ADCH_SHIFT)) {
LOG_ERR("Invalid channel %d", channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Unsupported channel acquisition time");
return -ENOTSUP;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Unsupported channel gain %d", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Unsupported channel reference");
return -ENOTSUP;
}
return 0;
}
static int mcux_12b1msps_sar_adc_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcux_12b1msps_sar_adc_config *config = dev->config;
struct mcux_12b1msps_sar_adc_data *data = dev->data;
adc_hardware_average_mode_t mode;
adc_resolution_t resolution;
ADC_Type *base = config->base;
int error;
uint32_t tmp32;
switch (sequence->resolution) {
case 8:
resolution = kADC_Resolution8Bit;
break;
case 10:
resolution = kADC_Resolution10Bit;
break;
case 12:
resolution = kADC_Resolution12Bit;
break;
default:
LOG_ERR("Unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
tmp32 = base->CFG & ~(ADC_CFG_MODE_MASK);
tmp32 |= ADC_CFG_MODE(resolution);
base->CFG = tmp32;
switch (sequence->oversampling) {
case 0:
mode = kADC_HardwareAverageDiasable;
break;
case 2:
mode = kADC_HardwareAverageCount4;
break;
case 3:
mode = kADC_HardwareAverageCount8;
break;
case 4:
mode = kADC_HardwareAverageCount16;
break;
case 5:
mode = kADC_HardwareAverageCount32;
break;
default:
LOG_ERR("Unsupported oversampling value %d",
sequence->oversampling);
return -ENOTSUP;
}
ADC_SetHardwareAverageConfig(config->base, mode);
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int mcux_12b1msps_sar_adc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct mcux_12b1msps_sar_adc_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, async ? true : false, async);
error = mcux_12b1msps_sar_adc_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int mcux_12b1msps_sar_adc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return mcux_12b1msps_sar_adc_read_async(dev, sequence, NULL);
}
static void mcux_12b1msps_sar_adc_start_channel(const struct device *dev)
{
const struct mcux_12b1msps_sar_adc_config *config = dev->config;
struct mcux_12b1msps_sar_adc_data *data = dev->data;
adc_channel_config_t channel_config;
uint32_t channel_group = 0U;
data->channel_id = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d", data->channel_id);
channel_config.enableInterruptOnConversionCompleted = true;
channel_config.channelNumber = data->channel_id;
ADC_SetChannelConfig(config->base, channel_group, &channel_config);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct mcux_12b1msps_sar_adc_data *data =
CONTAINER_OF(ctx, struct mcux_12b1msps_sar_adc_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
mcux_12b1msps_sar_adc_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct mcux_12b1msps_sar_adc_data *data =
CONTAINER_OF(ctx, struct mcux_12b1msps_sar_adc_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void mcux_12b1msps_sar_adc_isr(const struct device *dev)
{
const struct mcux_12b1msps_sar_adc_config *config = dev->config;
struct mcux_12b1msps_sar_adc_data *data = dev->data;
ADC_Type *base = config->base;
uint32_t channel_group = 0U;
uint16_t result;
result = ADC_GetChannelConversionValue(base, channel_group);
LOG_DBG("Finished channel %d. Result is 0x%04x", data->channel_id,
result);
*data->buffer++ = result;
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
mcux_12b1msps_sar_adc_start_channel(dev);
} else {
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static int mcux_12b1msps_sar_adc_init(const struct device *dev)
{
const struct mcux_12b1msps_sar_adc_config *config = dev->config;
struct mcux_12b1msps_sar_adc_data *data = dev->data;
ADC_Type *base = config->base;
adc_config_t adc_config;
int err;
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
ADC_GetDefaultConfig(&adc_config);
adc_config.referenceVoltageSource = config->ref_src;
adc_config.clockSource = config->clock_src;
adc_config.clockDriver = config->clock_drv;
adc_config.samplePeriodMode = config->sample_period_mode;
adc_config.resolution = kADC_Resolution12Bit;
adc_config.enableContinuousConversion = false;
adc_config.enableOverWrite = false;
adc_config.enableHighSpeed = false;
adc_config.enableLowPower = false;
adc_config.enableLongSample = false;
adc_config.enableAsynchronousClockOutput = true;
ADC_Init(base, &adc_config);
#if !(defined(FSL_FEATURE_ADC_SUPPORT_HARDWARE_TRIGGER_REMOVE) && \
FSL_FEATURE_ADC_SUPPORT_HARDWARE_TRIGGER_REMOVE)
ADC_EnableHardwareTrigger(base, false);
#endif
if (kStatus_Success == ADC_DoAutoCalibration(base)) {
LOG_DBG("ADC_DoAutoCalibration() Done.");
} else {
LOG_WRN("ADC_DoAutoCalibration() Failed.");
}
config->irq_config_func(dev);
data->dev = dev;
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api mcux_12b1msps_sar_adc_driver_api = {
.channel_setup = mcux_12b1msps_sar_adc_channel_setup,
.read = mcux_12b1msps_sar_adc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = mcux_12b1msps_sar_adc_read_async,
#endif
.ref_internal = 3300,
};
#define ASSERT_WITHIN_RANGE(val, min, max, str) \
BUILD_ASSERT(val >= min && val <= max, str)
#define ASSERT_RT_ADC_CLK_DIV_VALID(val, str) \
BUILD_ASSERT(val == 1 || val == 2 || val == 4 || val == 8, str)
#define TO_RT_ADC_CLOCK_DIV(val) _DO_CONCAT(kADC_ClockDriver, val)
#define ACD_MCUX_12B1MSPS_SAR_INIT(n) \
static void mcux_12b1msps_sar_adc_config_func_##n(const struct device *dev); \
\
ASSERT_RT_ADC_CLK_DIV_VALID(DT_INST_PROP(n, clk_divider), \
"Invalid clock divider"); \
ASSERT_WITHIN_RANGE(DT_INST_PROP(n, sample_period_mode), 0, 3, \
"Invalid sample period mode"); \
PINCTRL_DT_INST_DEFINE(n); \
\
static const struct mcux_12b1msps_sar_adc_config mcux_12b1msps_sar_adc_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
.clock_src = kADC_ClockSourceAD, \
.clock_drv = \
TO_RT_ADC_CLOCK_DIV(DT_INST_PROP(n, clk_divider)), \
.ref_src = kADC_ReferenceVoltageSourceAlt0, \
.sample_period_mode = DT_INST_PROP(n, sample_period_mode), \
.irq_config_func = mcux_12b1msps_sar_adc_config_func_##n, \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
}; \
\
static struct mcux_12b1msps_sar_adc_data mcux_12b1msps_sar_adc_data_##n = { \
ADC_CONTEXT_INIT_TIMER(mcux_12b1msps_sar_adc_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(mcux_12b1msps_sar_adc_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(mcux_12b1msps_sar_adc_data_##n, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(n, &mcux_12b1msps_sar_adc_init, NULL, \
&mcux_12b1msps_sar_adc_data_##n, &mcux_12b1msps_sar_adc_config_##n, \
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
&mcux_12b1msps_sar_adc_driver_api); \
\
static void mcux_12b1msps_sar_adc_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
mcux_12b1msps_sar_adc_isr, DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQN(n)); \
}
DT_INST_FOREACH_STATUS_OKAY(ACD_MCUX_12B1MSPS_SAR_INIT)
``` | /content/code_sandbox/drivers/adc/adc_mcux_12b1msps_sar.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,665 |
```c
/*
*
*/
#include <stdint.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/byteorder.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
/*
* This requires to be included _after_ `#define ADC_CONTEXT_USES_KERNEL_TIMER`
*/
#include "adc_context.h"
#define DT_DRV_COMPAT ti_tla2021
LOG_MODULE_REGISTER(tla2021, CONFIG_ADC_LOG_LEVEL);
#define ACQ_THREAD_PRIORITY CONFIG_ADC_TLA2021_ACQUISITION_THREAD_PRIORITY
#define ACQ_THREAD_STACK_SIZE CONFIG_ADC_TLA2021_ACQUISITION_THREAD_STACK_SIZE
#define ADC_CHANNEL_msk BIT(0)
#define ADC_RESOLUTION 12
/*
* Conversion Data Register (RP = 00h) [reset = 0000h]
*/
#define REG_DATA 0x00
#define REG_DATA_pos 4
/*
* Configuration Register (RP = 01h) [reset = 8583h]
*/
#define REG_CONFIG 0x01
#define REG_CONFIG_DEFAULT 0x8583
#define REG_CONFIG_DR_pos 5
#define REG_CONFIG_MODE_pos 8
#define REG_CONFIG_PGA_pos 9 /* TLA2022 and TLA2024 Only */
#define REG_CONFIG_MUX_pos 12 /* TLA2024 Only */
#define REG_CONFIG_OS_pos 15
#define REG_CONFIG_OS_msk (BIT_MASK(1) << REG_CONFIG_OS_pos)
typedef int16_t tla2021_reg_data_t;
typedef uint16_t tla2021_reg_config_t;
struct tla2021_config {
const struct i2c_dt_spec bus;
k_tid_t acq_thread_id;
};
struct tla2021_data {
const struct device *dev;
struct adc_context ctx;
struct k_sem acq_lock;
tla2021_reg_data_t *buffer;
tla2021_reg_data_t *repeat_buffer;
/*
* Shadow register
*/
tla2021_reg_config_t reg_config;
};
static int tla2021_read_register(const struct device *dev, uint8_t reg, uint16_t *value)
{
int ret;
const struct tla2021_config *config = dev->config;
uint8_t tmp[2];
ret = i2c_write_read_dt(&config->bus, ®, sizeof(reg), tmp, sizeof(tmp));
if (ret) {
return ret;
}
*value = sys_get_be16(tmp);
return 0;
}
static int tla2021_write_register(const struct device *dev, uint8_t reg, uint16_t value)
{
int ret;
const struct tla2021_config *config = dev->config;
uint8_t tmp[3] = {reg};
sys_put_be16(value, &tmp[1]);
ret = i2c_write_dt(&config->bus, tmp, sizeof(tmp));
if (ret) {
return ret;
}
return 0;
}
static int tla2021_channel_setup(const struct device *dev, const struct adc_channel_cfg *cfg)
{
if (cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid gain");
return -EINVAL;
}
if (cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Invalid reference");
return -EINVAL;
}
if (cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Invalid acquisition time");
return -EINVAL;
}
return 0;
}
static int tla2021_start_read(const struct device *dev, const struct adc_sequence *seq)
{
struct tla2021_data *data = dev->data;
const size_t num_extra_samples = seq->options ? seq->options->extra_samplings : 0;
const size_t num_samples = (1 + num_extra_samples) * POPCOUNT(seq->channels);
if (!(seq->channels & ADC_CHANNEL_msk)) {
LOG_ERR("Selected channel(s) not supported: %x", seq->channels);
return -EINVAL;
}
if (seq->resolution != ADC_RESOLUTION) {
LOG_ERR("Selected resolution not supported: %d", seq->resolution);
return -EINVAL;
}
if (seq->oversampling) {
LOG_ERR("Oversampling is not supported");
return -EINVAL;
}
if (seq->calibrate) {
LOG_ERR("Calibration is not supported");
return -EINVAL;
}
if (!seq->buffer) {
LOG_ERR("Buffer invalid");
return -EINVAL;
}
if (seq->buffer_size < (num_samples * sizeof(tla2021_reg_data_t))) {
LOG_ERR("buffer size too small");
return -EINVAL;
}
data->buffer = seq->buffer;
adc_context_start_read(&data->ctx, seq);
return adc_context_wait_for_completion(&data->ctx);
}
static int tla2021_read_async(const struct device *dev, const struct adc_sequence *seq,
struct k_poll_signal *async)
{
int ret;
struct tla2021_data *data = dev->data;
adc_context_lock(&data->ctx, async ? true : false, async);
ret = tla2021_start_read(dev, seq);
adc_context_release(&data->ctx, ret);
return ret;
}
static int tla2021_read(const struct device *dev, const struct adc_sequence *seq)
{
return tla2021_read_async(dev, seq, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
int ret;
struct tla2021_data *data = CONTAINER_OF(ctx, struct tla2021_data, ctx);
const struct device *dev = data->dev;
tla2021_reg_config_t reg = data->reg_config;
/*
* Start single-shot conversion
*/
WRITE_BIT(reg, REG_CONFIG_MODE_pos, 1);
WRITE_BIT(reg, REG_CONFIG_OS_pos, 1);
ret = tla2021_write_register(dev, REG_CONFIG, reg);
if (ret) {
LOG_WRN("Failed to start conversion");
}
data->repeat_buffer = data->buffer;
k_sem_give(&data->acq_lock);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct tla2021_data *data = CONTAINER_OF(ctx, struct tla2021_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void tla2021_acq_thread_fn(void *p1, void *p2, void *p3)
{
int ret;
struct tla2021_data *data = p1;
const struct device *dev = data->dev;
while (true) {
k_sem_take(&data->acq_lock, K_FOREVER);
tla2021_reg_config_t reg;
tla2021_reg_data_t res;
/*
* Wait until sampling is done
*/
do {
ret = tla2021_read_register(dev, REG_CONFIG, ®);
if (ret < 0) {
adc_context_complete(&data->ctx, ret);
}
} while (!(reg & REG_CONFIG_OS_msk));
/*
* Read result
*/
ret = tla2021_read_register(dev, REG_DATA, &res);
if (ret) {
adc_context_complete(&data->ctx, ret);
}
/*
* ADC data is stored in the upper 12 bits
*/
res >>= REG_DATA_pos;
*data->buffer++ = res;
adc_context_on_sampling_done(&data->ctx, data->dev);
}
}
static int tla2021_init(const struct device *dev)
{
int ret;
const struct tla2021_config *config = dev->config;
struct tla2021_data *data = dev->data;
k_sem_init(&data->acq_lock, 0, 1);
if (!i2c_is_ready_dt(&config->bus)) {
LOG_ERR("Bus not ready");
return -EINVAL;
}
ret = tla2021_write_register(dev, REG_CONFIG, data->reg_config);
if (ret) {
LOG_ERR("Device reset failed: %d", ret);
return ret;
}
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api tla2021_driver_api = {
.channel_setup = tla2021_channel_setup,
.read = tla2021_read,
.ref_internal = 4096,
#ifdef CONFIG_ADC_ASYNC
.read_async = tla2021_read_async,
#endif
};
#define TLA2021_INIT(n) \
static const struct tla2021_config inst_##n##_config; \
static struct tla2021_data inst_##n##_data; \
K_THREAD_DEFINE(inst_##n##_thread, ACQ_THREAD_STACK_SIZE, tla2021_acq_thread_fn, \
&inst_##n##_data, NULL, NULL, ACQ_THREAD_PRIORITY, 0, 0); \
static const struct tla2021_config inst_##n##_config = { \
.bus = I2C_DT_SPEC_INST_GET(n), \
.acq_thread_id = inst_##n##_thread, \
}; \
static struct tla2021_data inst_##n##_data = { \
.dev = DEVICE_DT_INST_GET(n), \
ADC_CONTEXT_INIT_LOCK(inst_##n##_data, ctx), \
ADC_CONTEXT_INIT_TIMER(inst_##n##_data, ctx), \
ADC_CONTEXT_INIT_SYNC(inst_##n##_data, ctx), \
.reg_config = REG_CONFIG_DEFAULT, \
}; \
DEVICE_DT_INST_DEFINE(n, &tla2021_init, NULL, &inst_##n##_data, &inst_##n##_config, \
POST_KERNEL, CONFIG_ADC_TLA2021_INIT_PRIORITY, &tla2021_driver_api);
DT_INST_FOREACH_STATUS_OKAY(TLA2021_INIT)
BUILD_ASSERT(CONFIG_I2C_INIT_PRIORITY < CONFIG_ADC_TLA2021_INIT_PRIORITY);
``` | /content/code_sandbox/drivers/adc/adc_tla2021.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,181 |
```c
/*
*
*/
#define DT_DRV_COMPAT infineon_xmc4xxx_adc
#include <errno.h>
#include <soc.h>
#include <stdint.h>
#include <xmc_scu.h>
#include <xmc_vadc.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/device.h>
#include <zephyr/irq.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_xmc4xxx);
#define XMC4XXX_CHANNEL_COUNT 8
struct adc_xmc4xxx_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint8_t channel_mask;
};
struct adc_xmc4xxx_cfg {
XMC_VADC_GROUP_t *base;
void (*irq_cfg_func)(void);
uint8_t irq_num;
};
static bool adc_global_init;
static XMC_VADC_GLOBAL_t *const adc_global_ptr = (XMC_VADC_GLOBAL_t *)0x40004000;
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_xmc4xxx_data *data = CONTAINER_OF(ctx, struct adc_xmc4xxx_data, ctx);
const struct device *dev = data->dev;
const struct adc_xmc4xxx_cfg *config = dev->config;
VADC_G_TypeDef *adc_group = config->base;
data->repeat_buffer = data->buffer;
XMC_VADC_GROUP_ScanTriggerConversion(adc_group);
XMC_VADC_GROUP_ScanEnableArbitrationSlot(adc_group);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_xmc4xxx_data *data = CONTAINER_OF(ctx, struct adc_xmc4xxx_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_xmc4xxx_isr(const struct device *dev)
{
struct adc_xmc4xxx_data *data = dev->data;
const struct adc_xmc4xxx_cfg *config = dev->config;
XMC_VADC_GROUP_t *adc_group = config->base;
uint32_t channel_mask = data->channel_mask;
uint32_t ch;
/* Conversion has completed. */
while (channel_mask > 0) {
ch = find_lsb_set(channel_mask) - 1;
*data->buffer++ = XMC_VADC_GROUP_GetResult(adc_group, ch);
channel_mask &= ~BIT(ch);
}
adc_context_on_sampling_done(&data->ctx, dev);
LOG_DBG("%s ISR triggered.", dev->name);
}
static int adc_xmc4xxx_validate_buffer_size(const struct adc_sequence *sequence)
{
int active_channels = 0;
int total_buffer_size;
for (int i = 0; i < XMC4XXX_CHANNEL_COUNT; i++) {
if (sequence->channels & BIT(i)) {
active_channels++;
}
}
total_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
total_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < total_buffer_size) {
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int ret;
struct adc_xmc4xxx_data *data = dev->data;
const struct adc_xmc4xxx_cfg *config = dev->config;
XMC_VADC_GROUP_t *adc_group = config->base;
uint32_t requested_channels = sequence->channels;
uint8_t resolution = sequence->resolution;
uint32_t configured_channels = adc_group->ASSEL & requested_channels;
XMC_VADC_GROUP_CLASS_t group_class = {0};
if (requested_channels == 0) {
LOG_ERR("No channels requested");
return -EINVAL;
}
if (requested_channels != configured_channels) {
LOG_ERR("Selected channels not configured");
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("Oversampling not supported");
return -ENOTSUP;
}
ret = adc_xmc4xxx_validate_buffer_size(sequence);
if (ret < 0) {
LOG_ERR("Invalid sequence buffer size");
return ret;
}
if (resolution == 8) {
group_class.conversion_mode_standard = XMC_VADC_CONVMODE_8BIT;
} else if (resolution == 10) {
group_class.conversion_mode_standard = XMC_VADC_CONVMODE_10BIT;
} else if (resolution == 12) {
group_class.conversion_mode_standard = XMC_VADC_CONVMODE_12BIT;
} else {
LOG_ERR("Invalid resolution");
return -EINVAL;
}
XMC_VADC_GROUP_InputClassInit(adc_group, group_class, XMC_VADC_GROUP_CONV_STD, 0);
data->channel_mask = requested_channels;
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_xmc4xxx_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int ret;
struct adc_xmc4xxx_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
ret = start_read(dev, sequence);
adc_context_release(&data->ctx, ret);
return ret;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_xmc4xxx_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int ret;
struct adc_xmc4xxx_data *data = dev->data;
adc_context_lock(&data->ctx, true, async);
ret = start_read(dev, sequence);
adc_context_release(&data->ctx, ret);
return ret;
}
#endif
static int adc_xmc4xxx_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_xmc4xxx_cfg *config = dev->config;
VADC_G_TypeDef *adc_group = config->base;
uint32_t ch_num = channel_cfg->channel_id;
XMC_VADC_CHANNEL_CONFIG_t channel_config = {0};
if (ch_num >= XMC4XXX_CHANNEL_COUNT) {
LOG_ERR("Channel %d is not valid", ch_num);
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Invalid channel reference");
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Invalid acquisition time");
return -EINVAL;
}
/* check that the group global calibration has successfully finished */
if (adc_group->ARBCFG & VADC_G_ARBCFG_CAL_Msk) {
LOG_WRN("Group calibration hasn't completed yet");
return -EBUSY;
}
channel_config.channel_priority = true;
channel_config.result_reg_number = ch_num;
channel_config.result_alignment = XMC_VADC_RESULT_ALIGN_RIGHT;
channel_config.alias_channel = -1; /* do not alias channel */
XMC_VADC_GROUP_ChannelInit(adc_group, ch_num, &channel_config);
adc_group->RCR[ch_num] = 0;
XMC_VADC_GROUP_ScanAddChannelToSequence(adc_group, ch_num);
return 0;
}
#define VADC_IRQ_MIN 18
#define IRQS_PER_VADC_GROUP 4
static int adc_xmc4xxx_init(const struct device *dev)
{
struct adc_xmc4xxx_data *data = dev->data;
const struct adc_xmc4xxx_cfg *config = dev->config;
VADC_G_TypeDef *adc_group = config->base;
uint8_t service_request;
data->dev = dev;
config->irq_cfg_func();
if (adc_global_init == 0) {
/* defined using xmc_device.h */
#ifdef CLOCK_GATING_SUPPORTED
XMC_SCU_CLOCK_UngatePeripheralClock(XMC_SCU_PERIPHERAL_CLOCK_VADC);
#endif
/* Reset the Hardware */
XMC_SCU_RESET_DeassertPeripheralReset(XMC_SCU_PERIPHERAL_RESET_VADC);
/* enable the module clock */
adc_global_ptr->CLC = 0;
/* global configuration register - defines clock divider to adc clock */
/* automatic post calibration after each conversion is enabled */
adc_global_ptr->GLOBCFG = 0;
/* global result control register is unused */
adc_global_ptr->GLOBRCR = 0;
/* global bound register is unused */
adc_global_ptr->GLOBBOUND = 0;
adc_global_init = 1;
}
adc_group->ARBCFG = 0;
adc_group->BOUND = 0;
XMC_VADC_GROUP_SetPowerMode(adc_group, XMC_VADC_GROUP_POWERMODE_NORMAL);
/* Initiate calibration. It is initialized for all groups. Check that the */
/* calibration completed in the channel setup. */
adc_global_ptr->GLOBCFG |= VADC_GLOBCFG_SUCAL_Msk;
XMC_VADC_GROUP_BackgroundDisableArbitrationSlot(adc_group);
XMC_VADC_GROUP_ScanDisableArbitrationSlot(adc_group);
service_request = (config->irq_num - VADC_IRQ_MIN) % IRQS_PER_VADC_GROUP;
XMC_VADC_GROUP_ScanSetGatingMode(adc_group, XMC_VADC_GATEMODE_IGNORE);
XMC_VADC_GROUP_ScanSetReqSrcEventInterruptNode(adc_group, service_request);
XMC_VADC_GROUP_ScanEnableEvent(adc_group);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api api_xmc4xxx_driver_api = {
.channel_setup = adc_xmc4xxx_channel_setup,
.read = adc_xmc4xxx_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_xmc4xxx_read_async,
#endif
.ref_internal = DT_INST_PROP(0, vref_internal_mv),
};
#define ADC_XMC4XXX_CONFIG(index) \
static void adc_xmc4xxx_cfg_func_##index(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(index), \
DT_INST_IRQ(index, priority), \
adc_xmc4xxx_isr, DEVICE_DT_INST_GET(index), 0); \
irq_enable(DT_INST_IRQN(index)); \
} \
\
static const struct adc_xmc4xxx_cfg adc_xmc4xxx_cfg_##index = { \
.base = (VADC_G_TypeDef *)DT_INST_REG_ADDR(index), \
.irq_cfg_func = adc_xmc4xxx_cfg_func_##index, \
.irq_num = DT_INST_IRQN(index), \
};
#define ADC_XMC4XXX_INIT(index) \
ADC_XMC4XXX_CONFIG(index) \
\
static struct adc_xmc4xxx_data adc_xmc4xxx_data_##index = { \
ADC_CONTEXT_INIT_TIMER(adc_xmc4xxx_data_##index, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_xmc4xxx_data_##index, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_xmc4xxx_data_##index, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(index, \
&adc_xmc4xxx_init, NULL, \
&adc_xmc4xxx_data_##index, &adc_xmc4xxx_cfg_##index, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&api_xmc4xxx_driver_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_XMC4XXX_INIT)
``` | /content/code_sandbox/drivers/adc/adc_xmc4xxx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,577 |
```unknown
#
menuconfig ADC_MAX11102_17
bool "Maxim Integrated MAX11102-MAX11117"
default y
depends on DT_HAS_MAXIM_MAX11102_ENABLED \
|| DT_HAS_MAXIM_MAX11103_ENABLED \
|| DT_HAS_MAXIM_MAX11105_ENABLED \
|| DT_HAS_MAXIM_MAX11106_ENABLED \
|| DT_HAS_MAXIM_MAX11110_ENABLED \
|| DT_HAS_MAXIM_MAX11111_ENABLED \
|| DT_HAS_MAXIM_MAX11115_ENABLED \
|| DT_HAS_MAXIM_MAX11116_ENABLED \
|| DT_HAS_MAXIM_MAX11117_ENABLED
select SPI
help
Enable the driver implementation for the MAX11102-MAX11117 family
config ADC_MAX11102_17_ACQUISITION_THREAD_INIT_PRIO
int "ADC data acquisition thread priority"
default 0
depends on ADC_MAX11102_17 && ADC_ASYNC
config ADC_MAX11102_17_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 400
depends on ADC_MAX11102_17 && ADC_ASYNC
help
Size of the stack used for the internal data acquisition
thread.
``` | /content/code_sandbox/drivers/adc/Kconfig.max11102_17 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 257 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/logging/log.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER 1
#include "adc_context.h"
#define DT_DRV_COMPAT ti_ads1119
LOG_MODULE_REGISTER(ADS1119, CONFIG_ADC_LOG_LEVEL);
#define ADS1119_CONFIG_VREF(x) (FIELD_PREP(BIT(0), x))
#define ADS1119_CONFIG_CM(x) (FIELD_PREP(BIT(1), x))
#define ADS1119_CONFIG_DR(x) (FIELD_PREP(BIT_MASK(2) << 2, x))
#define ADS1119_CONFIG_GAIN(x) (FIELD_PREP(BIT(4), x))
#define ADS1119_CONFIG_MUX(x) (FIELD_PREP(BIT_MASK(3) << 5, x))
#define ADS1119_STATUS_MASK_ID BIT_MASK(7)
#define ADS1119_STATUS_MASK_READY BIT(7)
#define ADS1119_REG_SHIFT 2
#define ADS1119_RESOLUTION 16
#define ADS1119_REF_INTERNAL 2048
enum ads1119_cmd {
ADS1119_CMD_RESET = 0x06,
ADS1119_CMD_START_SYNC = 0x08,
ADS1119_CMD_POWER_DOWN = 0x02,
ADS1119_CMD_READ_DATA = 0x10,
ADS1119_CMD_READ_REG = 0x20,
ADS1119_CMD_WRITE_REG = 0x40,
};
enum ads1119_reg {
ADS1119_REG_CONFIG = 0 << ADS1119_REG_SHIFT,
ADS1119_REG_STATUS = 1 << ADS1119_REG_SHIFT,
};
enum {
ADS1119_CONFIG_VREF_INTERNAL = 0,
ADS1119_CONFIG_VREF_EXTERNAL = 1,
};
enum {
ADS1119_CONFIG_MUX_DIFF_0_1 = 0,
ADS1119_CONFIG_MUX_DIFF_2_3 = 1,
ADS1119_CONFIG_MUX_DIFF_1_2 = 2,
ADS1119_CONFIG_MUX_SINGLE_0 = 3,
ADS1119_CONFIG_MUX_SINGLE_1 = 4,
ADS1119_CONFIG_MUX_SINGLE_2 = 5,
ADS1119_CONFIG_MUX_SINGLE_3 = 6,
ADS1119_CONFIG_MUX_SHORTED = 7,
};
enum {
ADS1119_CONFIG_DR_20 = 0,
ADS1119_CONFIG_DR_90 = 1,
ADS1119_CONFIG_DR_330 = 2,
ADS1119_CONFIG_DR_1000 = 3,
ADS1119_CONFIG_DR_DEFAULT = ADS1119_CONFIG_DR_20,
};
enum {
ADS1119_CONFIG_GAIN_1 = 0,
ADS1119_CONFIG_GAIN_4 = 1,
};
enum {
ADS1119_CONFIG_CM_SINGLE = 0,
ADS1119_CONFIG_CM_CONTINUOUS = 1,
};
struct ads1119_config {
const struct i2c_dt_spec bus;
#if CONFIG_ADC_ASYNC
k_thread_stack_t *stack;
#endif
};
struct ads1119_data {
struct adc_context ctx;
k_timeout_t ready_time;
struct k_sem acq_sem;
int16_t *buffer;
int16_t *buffer_ptr;
#if CONFIG_ADC_ASYNC
struct k_thread thread;
#endif
bool differential;
};
static int ads1119_read_reg(const struct device *dev, enum ads1119_reg reg_addr, uint8_t *reg_val)
{
const struct ads1119_config *config = dev->config;
return i2c_reg_read_byte_dt(&config->bus, ADS1119_CMD_READ_REG | reg_addr, reg_val);
}
static int ads1119_write_reg(const struct device *dev, uint8_t reg)
{
const struct ads1119_config *config = dev->config;
return i2c_reg_write_byte_dt(&config->bus, ADS1119_CMD_WRITE_REG, reg);
}
static inline int ads1119_acq_time_to_dr(const struct device *dev,
uint16_t acq_time)
{
struct ads1119_data *data = dev->data;
int odr = -EINVAL;
uint16_t acq_value = ADC_ACQ_TIME_VALUE(acq_time);
uint16_t ready_time_us = 0;
if (acq_time == ADC_ACQ_TIME_DEFAULT) {
acq_value = ADS1119_CONFIG_DR_DEFAULT;
} else if (ADC_ACQ_TIME_UNIT(acq_time) != ADC_ACQ_TIME_TICKS) {
return -EINVAL;
}
switch (acq_value) {
case ADS1119_CONFIG_DR_20:
odr = ADS1119_CONFIG_DR_20;
ready_time_us = (1000*1000) / 20;
break;
case ADS1119_CONFIG_DR_90:
odr = ADS1119_CONFIG_DR_90;
ready_time_us = (1000*1000) / 90;
break;
case ADS1119_CONFIG_DR_330:
odr = ADS1119_CONFIG_DR_330;
ready_time_us = (1000*1000) / 330;
break;
case ADS1119_CONFIG_DR_1000:
odr = ADS1119_CONFIG_DR_1000;
ready_time_us = (1000*1000) / 1000;
break;
default:
break;
}
/* As per datasheet acquisition time is a bit longer wait a bit more
* to ensure data ready at first try
*/
data->ready_time = K_USEC(ready_time_us + 10);
return odr;
}
static int ads1119_send_start_read(const struct device *dev)
{
const struct ads1119_config *config = dev->config;
const uint8_t cmd = ADS1119_CMD_START_SYNC;
return i2c_write_dt(&config->bus, &cmd, sizeof(cmd));
}
static int ads1119_wait_data_ready(const struct device *dev)
{
int rc;
struct ads1119_data *data = dev->data;
k_sleep(data->ready_time);
uint8_t status = 0;
rc = ads1119_read_reg(dev, ADS1119_REG_STATUS, &status);
if (rc != 0) {
return rc;
}
while ((status & ADS1119_STATUS_MASK_READY) == 0) {
k_sleep(K_USEC(100));
rc = ads1119_read_reg(dev, ADS1119_REG_STATUS, &status);
if (rc != 0) {
return rc;
}
}
return 0;
}
static int ads1119_read_sample(const struct device *dev, uint16_t *buff)
{
int res;
uint8_t rx_bytes[2];
const struct ads1119_config *config = dev->config;
const uint8_t cmd = ADS1119_CMD_READ_DATA;
res = i2c_write_read_dt(&config->bus,
&cmd, sizeof(cmd),
rx_bytes, sizeof(rx_bytes));
*buff = sys_get_be16(rx_bytes);
return res;
}
static int ads1119_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct ads1119_data *data = dev->data;
uint8_t config = 0;
int dr = 0;
if (channel_cfg->channel_id != 0) {
return -EINVAL;
}
switch (channel_cfg->reference) {
case ADC_REF_EXTERNAL0:
config |= ADS1119_CONFIG_VREF(ADS1119_CONFIG_VREF_EXTERNAL);
break;
case ADC_REF_INTERNAL:
config |= ADS1119_CONFIG_VREF(ADS1119_CONFIG_VREF_INTERNAL);
break;
default:
return -EINVAL;
}
if (channel_cfg->differential) {
if (channel_cfg->input_positive == 0 && channel_cfg->input_negative == 1) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_DIFF_0_1);
} else if (channel_cfg->input_positive == 1 && channel_cfg->input_negative == 2) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_DIFF_1_2);
} else if (channel_cfg->input_positive == 2 && channel_cfg->input_negative == 3) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_DIFF_2_3);
} else {
return -EINVAL;
}
} else {
if (channel_cfg->input_positive == 0) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_SINGLE_0);
} else if (channel_cfg->input_positive == 1) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_SINGLE_1);
} else if (channel_cfg->input_positive == 2) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_SINGLE_2);
} else if (channel_cfg->input_positive == 3) {
config |= ADS1119_CONFIG_MUX(ADS1119_CONFIG_MUX_SINGLE_3);
} else {
return -EINVAL;
}
}
data->differential = channel_cfg->differential;
dr = ads1119_acq_time_to_dr(dev, channel_cfg->acquisition_time);
if (dr < 0) {
return dr;
}
config |= ADS1119_CONFIG_DR(dr);
switch (channel_cfg->gain) {
case ADC_GAIN_1:
config |= ADS1119_CONFIG_GAIN(ADS1119_CONFIG_GAIN_1);
break;
case ADC_GAIN_4:
config |= ADS1119_CONFIG_GAIN(ADS1119_CONFIG_GAIN_4);
break;
default:
return -EINVAL;
}
config |= ADS1119_CONFIG_CM(ADS1119_CONFIG_CM_SINGLE); /* Only single shot supported */
return ads1119_write_reg(dev, config);
}
static int ads1119_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t needed = sizeof(int16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int ads1119_validate_sequence(const struct device *dev, const struct adc_sequence *sequence)
{
const struct ads1119_data *data = dev->data;
const uint8_t resolution = data->differential ? ADS1119_RESOLUTION : ADS1119_RESOLUTION - 1;
if (sequence->resolution != resolution) {
return -EINVAL;
}
if (sequence->channels != BIT(0)) {
return -EINVAL;
}
if (sequence->oversampling) {
return -EINVAL;
}
return ads1119_validate_buffer_size(sequence);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct ads1119_data *data = CONTAINER_OF(ctx,
struct ads1119_data,
ctx);
if (repeat_sampling) {
data->buffer = data->buffer_ptr;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct ads1119_data *data = CONTAINER_OF(ctx,
struct ads1119_data, ctx);
data->buffer_ptr = data->buffer;
k_sem_give(&data->acq_sem);
}
static int ads1119_adc_start_read(const struct device *dev,
const struct adc_sequence *sequence,
bool wait)
{
int rc;
struct ads1119_data *data = dev->data;
rc = ads1119_validate_sequence(dev, sequence);
if (rc != 0) {
return rc;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
if (wait) {
rc = adc_context_wait_for_completion(&data->ctx);
}
return rc;
}
static int ads1119_adc_perform_read(const struct device *dev)
{
int rc;
struct ads1119_data *data = dev->data;
k_sem_take(&data->acq_sem, K_FOREVER);
rc = ads1119_send_start_read(dev);
if (rc) {
adc_context_complete(&data->ctx, rc);
return rc;
}
rc = ads1119_wait_data_ready(dev);
if (rc != 0) {
adc_context_complete(&data->ctx, rc);
return rc;
}
rc = ads1119_read_sample(dev, data->buffer);
if (rc != 0) {
adc_context_complete(&data->ctx, rc);
return rc;
}
data->buffer++;
adc_context_on_sampling_done(&data->ctx, dev);
return rc;
}
#if CONFIG_ADC_ASYNC
static int ads1119_adc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int rc;
struct ads1119_data *data = dev->data;
adc_context_lock(&data->ctx, true, async);
rc = ads1119_adc_start_read(dev, sequence, true);
adc_context_release(&data->ctx, rc);
return rc;
}
static int ads1119_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int rc;
struct ads1119_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
rc = ads1119_adc_start_read(dev, sequence, true);
adc_context_release(&data->ctx, rc);
return rc;
}
#else
static int ads1119_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int rc;
struct ads1119_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
rc = ads1119_adc_start_read(dev, sequence, false);
while (rc == 0 && k_sem_take(&data->ctx.sync, K_NO_WAIT) != 0) {
rc = ads1119_adc_perform_read(dev);
}
adc_context_release(&data->ctx, rc);
return rc;
}
#endif
#if CONFIG_ADC_ASYNC
static void ads1119_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
while (true) {
ads1119_adc_perform_read(dev);
}
}
#endif
static int ads1119_init(const struct device *dev)
{
int rc;
uint8_t status;
const struct ads1119_config *config = dev->config;
struct ads1119_data *data = dev->data;
adc_context_init(&data->ctx);
k_sem_init(&data->acq_sem, 0, 1);
if (!device_is_ready(config->bus.bus)) {
return -ENODEV;
}
rc = ads1119_read_reg(dev, ADS1119_REG_STATUS, &status);
if (rc) {
LOG_ERR("Could not get %s status", dev->name);
return rc;
}
#if CONFIG_ADC_ASYNC
k_tid_t tid =
k_thread_create(&data->thread, config->stack,
CONFIG_ADC_ADS1119_ACQUISITION_THREAD_STACK_SIZE,
ads1119_acquisition_thread,
(void *)dev, NULL, NULL,
CONFIG_ADC_ADS1119_ASYNC_THREAD_INIT_PRIO,
0, K_NO_WAIT);
k_thread_name_set(tid, "adc_ads1119");
#endif
adc_context_unlock_unconditionally(&data->ctx);
return rc;
}
static const struct adc_driver_api api = {
.channel_setup = ads1119_channel_setup,
.read = ads1119_read,
.ref_internal = ADS1119_REF_INTERNAL,
#ifdef CONFIG_ADC_ASYNC
.read_async = ads1119_adc_read_async,
#endif
};
#define ADC_ADS1119_INST_DEFINE(n) \
IF_ENABLED(CONFIG_ADC_ASYNC, \
(static \
K_KERNEL_STACK_DEFINE(thread_stack_##n, \
CONFIG_ADC_ADS1119_ACQUISITION_THREAD_STACK_SIZE);)) \
static const struct ads1119_config config_##n = { \
.bus = I2C_DT_SPEC_GET(DT_DRV_INST(n)), \
IF_ENABLED(CONFIG_ADC_ASYNC, (.stack = thread_stack_##n)) \
}; \
static struct ads1119_data data_##n; \
DEVICE_DT_INST_DEFINE(n, ads1119_init, \
NULL, &data_##n, &config_##n, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&api);
DT_INST_FOREACH_STATUS_OKAY(ADC_ADS1119_INST_DEFINE);
``` | /content/code_sandbox/drivers/adc/adc_ads1119.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,591 |
```c
/*
*
*/
#define DT_DRV_COMPAT ene_kb1200_adc
#include <zephyr/kernel.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <errno.h>
#include <reg/adc.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
struct adc_kb1200_config {
/* ADC Register base address */
struct adc_regs *adc;
/* Pin control */
const struct pinctrl_dev_config *pcfg;
};
struct adc_kb1200_data {
struct adc_context ctx;
const struct device *adc_dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint16_t *buf_end;
};
/* ADC local functions */
static bool adc_kb1200_validate_buffer_size(const struct adc_sequence *sequence)
{
int chan_count = 0;
size_t buff_need;
uint32_t chan_mask;
for (chan_mask = 0x80; chan_mask != 0; chan_mask >>= 1) {
if (chan_mask & sequence->channels) {
chan_count++;
}
}
buff_need = chan_count * sizeof(uint16_t);
if (sequence->options) {
buff_need *= 1 + sequence->options->extra_samplings;
}
if (buff_need > sequence->buffer_size) {
return false;
}
return true;
}
/* ADC Sample Flow (by using adc_context.h api function)
* 1. Start ADC sampling (set up flag ctx->sync)
* adc_context_start_read() -> adc_context_start_sampling()
* 2. Wait ADC sample finish (by monitor flag ctx->sync)
* adc_context_wait_for_completion
* 3. Finish ADC sample (isr clear flag ctx->sync)
* adc_context_on_sampling_done -> adc_context_complete
*/
static int adc_kb1200_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
const struct adc_kb1200_config *config = dev->config;
struct adc_kb1200_data *data = dev->data;
int error = 0;
if (!sequence->channels || (sequence->channels & ~BIT_MASK(ADC_MAX_CHAN))) {
printk("Invalid ADC channels.\n");
return -EINVAL;
}
/* Fixed 10 bit resolution of ene ADC */
if (sequence->resolution != ADC_RESOLUTION) {
printk("Unfixed 10 bit ADC resolution.\n");
return -ENOTSUP;
}
/* Check sequence->buffer_size is enough */
if (!adc_kb1200_validate_buffer_size(sequence)) {
printk("ADC buffer size too small.\n");
return -ENOMEM;
}
/* assign record buffer pointer */
data->buffer = sequence->buffer;
data->buf_end = data->buffer + sequence->buffer_size / sizeof(uint16_t);
/* store device for adc_context_start_read() */
data->adc_dev = dev;
/* Inform adc start sampling */
adc_context_start_read(&data->ctx, sequence);
/* Since kb1200 adc has no irq. So need polling the adc conversion
* flag to be valid, then record adc value.
*/
uint32_t channels = (config->adc->ADCCFG & ADC_CHANNEL_BIT_MASK) >> ADC_CHANNEL_BIT_POS;
while (channels) {
int count;
int ch_num;
count = 0;
ch_num = find_lsb_set(channels) - 1;
/* wait valid flag */
while (config->adc->ADCDAT[ch_num] & ADC_INVALID_VALUE) {
k_busy_wait(ADC_WAIT_TIME);
count++;
if (count >= ADC_WAIT_CNT) {
printk("ADC busy timeout...\n");
error = -EBUSY;
break;
}
}
/* check buffer size is enough then record adc value */
if (data->buffer < data->buf_end) {
*data->buffer = (uint16_t)(config->adc->ADCDAT[ch_num]);
data->buffer++;
} else {
error = -EINVAL;
break;
}
/* clear completed channel */
channels &= ~BIT(ch_num);
}
/* Besause polling the adc conversion flag. don't need wait_for_completion*/
/* Inform adc sampling is done */
adc_context_on_sampling_done(&data->ctx, dev);
return error;
}
/* ADC api functions */
static int adc_kb1200_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
if (channel_cfg->channel_id >= ADC_MAX_CHAN) {
printk("Invalid channel %d.\n", channel_cfg->channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
printk("Unsupported channel acquisition time.\n");
return -ENOTSUP;
}
if (channel_cfg->differential) {
printk("Differential channels are not supported.\n");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
printk("Unsupported channel gain %d.\n", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
printk("Unsupported channel reference.\n");
return -ENOTSUP;
}
printk("ADC channel %d configured.\n", channel_cfg->channel_id);
return 0;
}
static int adc_kb1200_read(const struct device *dev, const struct adc_sequence *sequence)
{
struct adc_kb1200_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = adc_kb1200_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#if defined(CONFIG_ADC_ASYNC)
static int adc_kb1200_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_kb1200_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = adc_kb1200_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
/* ADC api function (using by adc_context.H function) */
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_kb1200_data *data = CONTAINER_OF(ctx, struct adc_kb1200_data, ctx);
const struct device *dev = data->adc_dev;
const struct adc_kb1200_config *config = dev->config;
data->repeat_buffer = data->buffer;
config->adc->ADCCFG = (config->adc->ADCCFG & ~ADC_CHANNEL_BIT_MASK) |
(ctx->sequence.channels << ADC_CHANNEL_BIT_POS);
config->adc->ADCCFG |= ADC_FUNCTION_ENABLE;
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct adc_kb1200_data *data = CONTAINER_OF(ctx, struct adc_kb1200_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
struct adc_driver_api adc_kb1200_api = {
.channel_setup = adc_kb1200_channel_setup,
.read = adc_kb1200_read,
.ref_internal = ADC_VREF_ANALOG,
#if defined(CONFIG_ADC_ASYNC)
.read_async = adc_kb1200_read_async,
#endif
};
static int adc_kb1200_init(const struct device *dev)
{
const struct adc_kb1200_config *config = dev->config;
struct adc_kb1200_data *data = dev->data;
int ret;
adc_context_unlock_unconditionally(&data->ctx);
/* Configure pin-mux for ADC device */
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
printk("ADC pinctrl setup failed (%d).\n", ret);
return ret;
}
return 0;
}
#define ADC_KB1200_DEVICE(inst) \
PINCTRL_DT_INST_DEFINE(inst); \
static struct adc_kb1200_data adc_kb1200_data_##inst = { \
ADC_CONTEXT_INIT_TIMER(adc_kb1200_data_##inst, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_kb1200_data_##inst, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_kb1200_data_##inst, ctx), \
}; \
static const struct adc_kb1200_config adc_kb1200_config_##inst = { \
.adc = (struct adc_regs *)DT_INST_REG_ADDR(inst), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
}; \
DEVICE_DT_INST_DEFINE(inst, &adc_kb1200_init, NULL, &adc_kb1200_data_##inst, \
&adc_kb1200_config_##inst, PRE_KERNEL_1, \
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &adc_kb1200_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_KB1200_DEVICE)
``` | /content/code_sandbox/drivers/adc/adc_ene_kb1200.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,923 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.