text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```unknown
#
#
#
config PWM_PCA9685
bool "PCA9685 16-channel, 12-bit PWM Fm+ I2C-bus LED controller"
default y
depends on DT_HAS_NXP_PCA9685_PWM_ENABLED
select I2C
help
Enable driver for PCA9685 16-channel, 12-bit PWM Fm+ I2C-bus LED
controller.
``` | /content/code_sandbox/drivers/pwm/Kconfig.pca9685 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 87 |
```unknown
config PWM_XMC4XXX_CCU8
bool "Infineon XMC4XXX CCU4 driver"
default y
depends on DT_HAS_INFINEON_XMC4XXX_CCU8_PWM_ENABLED
help
Enables Infineon XMC4XXX CCU8 PWM driver.
``` | /content/code_sandbox/drivers/pwm/Kconfig.xmc4xxx_ccu8 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```c
/*
*
*/
/**
* @file
* @brief PWM shell commands.
*/
#include <zephyr/shell/shell.h>
#include <zephyr/drivers/pwm.h>
#include <stdlib.h>
struct args_index {
uint8_t device;
uint8_t channel;
uint8_t period;
uint8_t pulse;
uint8_t flags;
};
static const struct args_index args_indx = {
.device = 1,
.channel = 2,
.period = 3,
.pulse = 4,
.flags = 5,
};
static int cmd_cycles(const struct shell *sh, size_t argc, char **argv)
{
pwm_flags_t flags = 0;
const struct device *dev;
uint32_t period;
uint32_t pulse;
uint32_t channel;
int err;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "PWM device not found");
return -EINVAL;
}
channel = strtoul(argv[args_indx.channel], NULL, 0);
period = strtoul(argv[args_indx.period], NULL, 0);
pulse = strtoul(argv[args_indx.pulse], NULL, 0);
if (argc == (args_indx.flags + 1)) {
flags = strtoul(argv[args_indx.flags], NULL, 0);
}
err = pwm_set_cycles(dev, channel, period, pulse, flags);
if (err) {
shell_error(sh, "failed to setup PWM (err %d)",
err);
return err;
}
return 0;
}
static int cmd_usec(const struct shell *sh, size_t argc, char **argv)
{
pwm_flags_t flags = 0;
const struct device *dev;
uint32_t period;
uint32_t pulse;
uint32_t channel;
int err;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "PWM device not found");
return -EINVAL;
}
channel = strtoul(argv[args_indx.channel], NULL, 0);
period = strtoul(argv[args_indx.period], NULL, 0);
pulse = strtoul(argv[args_indx.pulse], NULL, 0);
if (argc == (args_indx.flags + 1)) {
flags = strtoul(argv[args_indx.flags], NULL, 0);
}
err = pwm_set(dev, channel, PWM_USEC(period), PWM_USEC(pulse), flags);
if (err) {
shell_error(sh, "failed to setup PWM (err %d)", err);
return err;
}
return 0;
}
static int cmd_nsec(const struct shell *sh, size_t argc, char **argv)
{
pwm_flags_t flags = 0;
const struct device *dev;
uint32_t period;
uint32_t pulse;
uint32_t channel;
int err;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "PWM device not found");
return -EINVAL;
}
channel = strtoul(argv[args_indx.channel], NULL, 0);
period = strtoul(argv[args_indx.period], NULL, 0);
pulse = strtoul(argv[args_indx.pulse], NULL, 0);
if (argc == (args_indx.flags + 1)) {
flags = strtoul(argv[args_indx.flags], NULL, 0);
}
err = pwm_set(dev, channel, period, pulse, flags);
if (err) {
shell_error(sh, "failed to setup PWM (err %d)", err);
return err;
}
return 0;
}
SHELL_STATIC_SUBCMD_SET_CREATE(pwm_cmds,
SHELL_CMD_ARG(cycles, NULL, "<device> <channel> <period in cycles> "
"<pulse width in cycles> [flags]", cmd_cycles, 5, 1),
SHELL_CMD_ARG(usec, NULL, "<device> <channel> <period in usec> "
"<pulse width in usec> [flags]", cmd_usec, 5, 1),
SHELL_CMD_ARG(nsec, NULL, "<device> <channel> <period in nsec> "
"<pulse width in nsec> [flags]", cmd_nsec, 5, 1),
SHELL_SUBCMD_SET_END
);
SHELL_CMD_REGISTER(pwm, &pwm_cmds, "PWM shell commands", NULL);
``` | /content/code_sandbox/drivers/pwm/pwm_shell.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 912 |
```unknown
# i.MX PWM Config
menuconfig PWM_IMX
bool "i.MX PWM Driver"
default y
depends on DT_HAS_FSL_IMX27_PWM_ENABLED
help
Enable support for i.MX pwm driver.
config PWM_PWMSWR_LOOP
int "Loop count for PWM Software Reset"
default 5
depends on PWM_IMX
help
Loop count for PWM Software Reset when disabling PWM channel.
``` | /content/code_sandbox/drivers/pwm/Kconfig.imx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 91 |
```unknown
# PWM configuration options
menuconfig PWM
bool "Pulse Width Modulation (PWM) drivers"
help
Enable config options for PWM drivers.
if PWM
module = PWM
module-str = pwm
source "subsys/logging/Kconfig.template.log_config"
config PWM_INIT_PRIORITY
int "PWM initialization priority"
default KERNEL_INIT_PRIORITY_DEVICE
help
System initialization priority for PWM drivers.
config PWM_SHELL
bool "PWM shell"
depends on SHELL
help
Enable the PWM related shell commands.
config PWM_CAPTURE
bool "Provide API for PWM capture"
help
This option extends the Zephyr PWM API with the ability to capture PWM
period/pulse widths.
source "drivers/pwm/Kconfig.b91"
source "drivers/pwm/Kconfig.cc13xx_cc26xx_timer"
source "drivers/pwm/Kconfig.stm32"
source "drivers/pwm/Kconfig.sifive"
source "drivers/pwm/Kconfig.nrf_sw"
source "drivers/pwm/Kconfig.nrfx"
source "drivers/pwm/Kconfig.mcux_ftm"
source "drivers/pwm/Kconfig.imx"
source "drivers/pwm/Kconfig.it8xxx2"
source "drivers/pwm/Kconfig.esp32"
source "drivers/pwm/Kconfig.sam"
source "drivers/pwm/Kconfig.mcux"
source "drivers/pwm/Kconfig.mcux_sctimer"
source "drivers/pwm/Kconfig.mcux_qtmr"
source "drivers/pwm/Kconfig.xec"
source "drivers/pwm/Kconfig.litex"
source "drivers/pwm/Kconfig.rv32m1_tpm"
source "drivers/pwm/Kconfig.mcux_tpm"
source "drivers/pwm/Kconfig.sam0"
source "drivers/pwm/Kconfig.npcx"
source "drivers/pwm/Kconfig.xlnx"
source "drivers/pwm/Kconfig.mcux_pwt"
source "drivers/pwm/Kconfig.gecko"
source "drivers/pwm/Kconfig.gd32"
source "drivers/pwm/Kconfig.rcar"
source "drivers/pwm/Kconfig.pca9685"
source "drivers/pwm/Kconfig.max31790"
source "drivers/pwm/Kconfig.test"
source "drivers/pwm/Kconfig.rpi_pico"
source "drivers/pwm/Kconfig.intel_blinky"
source "drivers/pwm/Kconfig.xmc4xxx_ccu4"
source "drivers/pwm/Kconfig.xmc4xxx_ccu8"
source "drivers/pwm/Kconfig.mcux_ctimer"
source "drivers/pwm/Kconfig.numaker"
source "drivers/pwm/Kconfig.nxp_s32_emios"
source "drivers/pwm/Kconfig.nxp_flexio"
source "drivers/pwm/Kconfig.ene"
endif # PWM
``` | /content/code_sandbox/drivers/pwm/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 575 |
```unknown
config PWM_MCUX_SCTIMER
bool "MCUX SCTimer PWM driver"
default y
depends on DT_HAS_NXP_SCTIMER_PWM_ENABLED
depends on CLOCK_CONTROL && PINCTRL
help
Enable sctimer based pwm driver.
``` | /content/code_sandbox/drivers/pwm/Kconfig.mcux_sctimer | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 55 |
```c
/*
*
*/
#define DT_DRV_COMPAT ene_kb1200_pwm
#include <zephyr/drivers/pwm.h>
#include <zephyr/drivers/pinctrl.h>
#include <reg/pwm.h>
/* Device config */
struct pwm_kb1200_config {
/* pwm controller base address */
struct pwm_regs *pwm;
const struct pinctrl_dev_config *pcfg;
};
/* Driver data */
struct pwm_kb1200_data {
/* PWM cycles per second */
uint32_t cycles_per_sec;
};
/* PWM api functions */
static int pwm_kb1200_set_cycles(const struct device *dev, uint32_t channel, uint32_t period_cycles,
uint32_t pulse_cycles, pwm_flags_t flags)
{
/* Single channel for each pwm device */
ARG_UNUSED(channel);
const struct pwm_kb1200_config *config = dev->config;
int prescaler;
uint32_t high_len;
uint32_t cycle_len;
/*
* Calculate PWM prescaler that let period_cycles map to
* maximum pwm period cycles and won't exceed it.
* Then prescaler = ceil (period_cycles / pwm_max_period_cycles)
*/
prescaler = DIV_ROUND_UP(period_cycles, PWM_MAX_CYCLES);
if (prescaler > PWM_MAX_PRESCALER) {
return -EINVAL;
}
/* If pulse_cycles is 0, switch PWM off and return. */
if (pulse_cycles == 0) {
config->pwm->PWMCFG &= ~PWM_ENABLE;
return 0;
}
high_len = (pulse_cycles / prescaler);
cycle_len = (period_cycles / prescaler);
/* Select PWM inverted polarity (ie. active-low pulse). */
if (flags & PWM_POLARITY_INVERTED) {
high_len = cycle_len - high_len;
}
/* Set PWM prescaler. */
config->pwm->PWMCFG = (config->pwm->PWMCFG & ~GENMASK(13, 8)) | ((prescaler - 1) << 8);
/*
* period_cycles: PWM Cycle Length
* pulse_cycles : PWM High Length
*/
config->pwm->PWMHIGH = high_len;
config->pwm->PWMCYC = cycle_len;
/* Start pwm */
config->pwm->PWMCFG |= PWM_ENABLE;
return 0;
}
static int pwm_kb1200_get_cycles_per_sec(const struct device *dev, uint32_t channel,
uint64_t *cycles)
{
/* Single channel for each pwm device */
ARG_UNUSED(channel);
ARG_UNUSED(dev);
if (cycles) {
/* User does not have to know about lowest clock,
* the driver will select the most relevant one.
*/
*cycles = PWM_INPUT_FREQ_HI; /*32Mhz*/
}
return 0;
}
static const struct pwm_driver_api pwm_kb1200_driver_api = {
.set_cycles = pwm_kb1200_set_cycles,
.get_cycles_per_sec = pwm_kb1200_get_cycles_per_sec,
};
static int pwm_kb1200_init(const struct device *dev)
{
int ret;
const struct pwm_kb1200_config *config = dev->config;
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret != 0) {
return ret;
}
config->pwm->PWMCFG = PWM_SOURCE_CLK_32M | PWM_RULE1 | PWM_PUSHPULL;
return 0;
}
#define KB1200_PWM_INIT(inst) \
PINCTRL_DT_INST_DEFINE(inst); \
static const struct pwm_kb1200_config pwm_kb1200_cfg_##inst = { \
.pwm = (struct pwm_regs *)DT_INST_REG_ADDR(inst), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
}; \
static struct pwm_kb1200_data pwm_kb1200_data_##inst; \
DEVICE_DT_INST_DEFINE(inst, &pwm_kb1200_init, NULL, &pwm_kb1200_data_##inst, \
&pwm_kb1200_cfg_##inst, PRE_KERNEL_1, CONFIG_PWM_INIT_PRIORITY, \
&pwm_kb1200_driver_api);
DT_INST_FOREACH_STATUS_OKAY(KB1200_PWM_INIT)
``` | /content/code_sandbox/drivers/pwm/pwm_ene_kb1200.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 895 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_sctimer_pwm
#include <errno.h>
#include <zephyr/drivers/pwm.h>
#include <fsl_sctimer.h>
#include <fsl_clock.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(pwm_mcux_sctimer, CONFIG_PWM_LOG_LEVEL);
#define CHANNEL_COUNT FSL_FEATURE_SCT_NUMBER_OF_OUTPUTS
/* Constant identifying that no event number has been set */
#define EVENT_NOT_SET FSL_FEATURE_SCT_NUMBER_OF_EVENTS
struct pwm_mcux_sctimer_config {
SCT_Type *base;
uint32_t prescale;
const struct pinctrl_dev_config *pincfg;
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
};
struct pwm_mcux_sctimer_data {
uint32_t event_number[CHANNEL_COUNT];
sctimer_pwm_signal_param_t channel[CHANNEL_COUNT];
uint32_t match_period;
uint32_t configured_chan;
};
/* Helper to setup channel that has not previously been configured for PWM */
static int mcux_sctimer_new_channel(const struct device *dev,
uint32_t channel, uint32_t period_cycles,
uint32_t duty_cycle)
{
const struct pwm_mcux_sctimer_config *config = dev->config;
struct pwm_mcux_sctimer_data *data = dev->data;
uint32_t clock_freq;
uint32_t pwm_freq;
data->match_period = period_cycles;
if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
&clock_freq)) {
return -EINVAL;
}
pwm_freq = (clock_freq / config->prescale) / period_cycles;
if (pwm_freq == 0) {
LOG_ERR("Could not set up pwm_freq=%d", pwm_freq);
return -EINVAL;
}
SCTIMER_StopTimer(config->base, kSCTIMER_Counter_U);
LOG_DBG("SETUP dutycycle to %u\n", duty_cycle);
data->channel[channel].dutyCyclePercent = duty_cycle;
if (SCTIMER_SetupPwm(config->base, &data->channel[channel],
kSCTIMER_EdgeAlignedPwm, pwm_freq,
clock_freq, &data->event_number[channel]) == kStatus_Fail) {
LOG_ERR("Could not set up pwm");
return -ENOTSUP;
}
SCTIMER_StartTimer(config->base, kSCTIMER_Counter_U);
data->configured_chan++;
return 0;
}
static int mcux_sctimer_pwm_set_cycles(const struct device *dev,
uint32_t channel, uint32_t period_cycles,
uint32_t pulse_cycles, pwm_flags_t flags)
{
const struct pwm_mcux_sctimer_config *config = dev->config;
struct pwm_mcux_sctimer_data *data = dev->data;
uint8_t duty_cycle;
int ret;
if (channel >= CHANNEL_COUNT) {
LOG_ERR("Invalid channel");
return -EINVAL;
}
if (period_cycles == 0) {
LOG_ERR("Channel can not be set to inactive level");
return -ENOTSUP;
}
if ((flags & PWM_POLARITY_INVERTED) == 0) {
data->channel[channel].level = kSCTIMER_HighTrue;
} else {
data->channel[channel].level = kSCTIMER_LowTrue;
}
duty_cycle = 100 * pulse_cycles / period_cycles;
if (duty_cycle == 0 && data->configured_chan == 1) {
/* Only one channel is active. We can turn off the SCTimer
* global counter.
*/
SCT_Type *base = config->base;
/* Stop timer so we can set output directly */
SCTIMER_StopTimer(base, kSCTIMER_Counter_U);
/* Set the output to inactive State */
if (data->channel[channel].level == kSCTIMER_HighTrue) {
base->OUTPUT &= ~(1UL << channel);
} else {
base->OUTPUT |= (1UL << channel);
}
return 0;
}
/* SCTimer has some unique restrictions when operation as a PWM output.
* The peripheral is based around a single counter, with a block of
* match registers that can trigger corresponding events. When used
* as a PWM peripheral, MCUX SDK sets up the SCTimer as follows:
* - one match register is used to set PWM output high, and reset
* SCtimer counter. This sets the PWM period
* - one match register is used to set PWM output low. This sets the
* pulse length
*
* This means that when configured, multiple channels must have the
* same PWM period, since they all share the same SCTimer counter.
*/
if (period_cycles != data->match_period &&
data->event_number[channel] == EVENT_NOT_SET &&
data->match_period == 0U) {
/* No PWM signals have been configured. We can set up the first
* PWM output using the MCUX SDK.
*/
ret = mcux_sctimer_new_channel(dev, channel, period_cycles,
duty_cycle);
if (ret < 0) {
return ret;
}
} else if (data->event_number[channel] == EVENT_NOT_SET) {
/* We have already configured a PWM signal, but this channel
* has not been setup. We can only support this channel
* if the period matches that of other PWM signals.
*/
if (period_cycles != data->match_period) {
LOG_ERR("Only one PWM period is supported between "
"multiple channels");
return -ENOTSUP;
}
/* Setup PWM output using MCUX SDK */
ret = mcux_sctimer_new_channel(dev, channel, period_cycles,
duty_cycle);
} else if (period_cycles != data->match_period) {
uint32_t period_event = data->event_number[channel];
/* We are reconfiguring the period of a configured channel
* MCUX SDK does not provide support for this feature, and
* we cannot do this safely if multiple channels are setup.
*/
if (data->configured_chan != 1) {
LOG_ERR("Cannot change PWM period when multiple "
"channels active");
return -ENOTSUP;
}
/* To make this change, we can simply set the MATCHREL
* registers for the period match, and the next match
* (which the SDK will setup as the pulse match event)
*/
SCTIMER_StopTimer(config->base, kSCTIMER_Counter_U);
config->base->MATCHREL[period_event] = period_cycles - 1U;
config->base->MATCHREL[period_event + 1] = pulse_cycles - 1U;
SCTIMER_StartTimer(config->base, kSCTIMER_Counter_U);
data->match_period = period_cycles;
} else {
/* Only duty cycle needs to be updated */
SCTIMER_UpdatePwmDutycycle(config->base, channel, duty_cycle,
data->event_number[channel]);
}
return 0;
}
static int mcux_sctimer_pwm_get_cycles_per_sec(const struct device *dev,
uint32_t channel,
uint64_t *cycles)
{
const struct pwm_mcux_sctimer_config *config = dev->config;
uint32_t clock_freq;
if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
&clock_freq)) {
return -EINVAL;
}
*cycles = clock_freq / config->prescale;
return 0;
}
static int mcux_sctimer_pwm_init(const struct device *dev)
{
const struct pwm_mcux_sctimer_config *config = dev->config;
struct pwm_mcux_sctimer_data *data = dev->data;
sctimer_config_t pwm_config;
status_t status;
int i;
int err;
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
SCTIMER_GetDefaultConfig(&pwm_config);
pwm_config.prescale_l = config->prescale - 1;
status = SCTIMER_Init(config->base, &pwm_config);
if (status != kStatus_Success) {
LOG_ERR("Unable to init PWM");
return -EIO;
}
for (i = 0; i < CHANNEL_COUNT; i++) {
data->channel[i].output = i;
data->channel[i].level = kSCTIMER_HighTrue;
data->channel[i].dutyCyclePercent = 0;
data->event_number[i] = EVENT_NOT_SET;
}
data->match_period = 0;
data->configured_chan = 0;
return 0;
}
static const struct pwm_driver_api pwm_mcux_sctimer_driver_api = {
.set_cycles = mcux_sctimer_pwm_set_cycles,
.get_cycles_per_sec = mcux_sctimer_pwm_get_cycles_per_sec,
};
#define PWM_MCUX_SCTIMER_DEVICE_INIT_MCUX(n) \
PINCTRL_DT_INST_DEFINE(n); \
static struct pwm_mcux_sctimer_data pwm_mcux_sctimer_data_##n; \
\
static const struct pwm_mcux_sctimer_config pwm_mcux_sctimer_config_##n = { \
.base = (SCT_Type *)DT_INST_REG_ADDR(n), \
.prescale = DT_INST_PROP(n, prescaler), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),\
}; \
\
DEVICE_DT_INST_DEFINE(n, \
mcux_sctimer_pwm_init, \
NULL, \
&pwm_mcux_sctimer_data_##n, \
&pwm_mcux_sctimer_config_##n, \
POST_KERNEL, CONFIG_PWM_INIT_PRIORITY, \
&pwm_mcux_sctimer_driver_api);
DT_INST_FOREACH_STATUS_OKAY(PWM_MCUX_SCTIMER_DEVICE_INIT_MCUX)
``` | /content/code_sandbox/drivers/pwm/pwm_mcux_sctimer.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,247 |
```c
/*
*
*/
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/drivers/pwm.h>
static inline int z_vrfy_pwm_set_cycles(const struct device *dev,
uint32_t channel, uint32_t period,
uint32_t pulse, pwm_flags_t flags)
{
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, set_cycles));
return z_impl_pwm_set_cycles((const struct device *)dev, channel,
period, pulse, flags);
}
#include <zephyr/syscalls/pwm_set_cycles_mrsh.c>
static inline int z_vrfy_pwm_get_cycles_per_sec(const struct device *dev,
uint32_t channel,
uint64_t *cycles)
{
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, get_cycles_per_sec));
K_OOPS(K_SYSCALL_MEMORY_WRITE(cycles, sizeof(uint64_t)));
return z_impl_pwm_get_cycles_per_sec((const struct device *)dev,
channel, (uint64_t *)cycles);
}
#include <zephyr/syscalls/pwm_get_cycles_per_sec_mrsh.c>
#ifdef CONFIG_PWM_CAPTURE
static inline int z_vrfy_pwm_enable_capture(const struct device *dev,
uint32_t channel)
{
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, enable_capture));
return z_impl_pwm_enable_capture((const struct device *)dev, channel);
}
#include <zephyr/syscalls/pwm_enable_capture_mrsh.c>
static inline int z_vrfy_pwm_disable_capture(const struct device *dev,
uint32_t channel)
{
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, disable_capture));
return z_impl_pwm_disable_capture((const struct device *)dev, channel);
}
#include <zephyr/syscalls/pwm_disable_capture_mrsh.c>
static inline int z_vrfy_pwm_capture_cycles(const struct device *dev,
uint32_t channel, pwm_flags_t flags,
uint32_t *period_cycles,
uint32_t *pulse_cycles,
k_timeout_t timeout)
{
uint32_t period;
uint32_t pulse;
int err;
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, configure_capture));
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, enable_capture));
K_OOPS(K_SYSCALL_DRIVER_PWM(dev, disable_capture));
err = z_impl_pwm_capture_cycles((const struct device *)dev, channel,
flags, &period, &pulse, timeout);
if (period_cycles != NULL) {
K_OOPS(k_usermode_to_copy(period_cycles, &period,
sizeof(*period_cycles)));
}
if (pulse_cycles != NULL) {
K_OOPS(k_usermode_to_copy(pulse_cycles, &pulse,
sizeof(*pulse_cycles)));
}
return err;
}
#include <zephyr/syscalls/pwm_capture_cycles_mrsh.c>
#endif /* CONFIG_PWM_CAPTURE */
``` | /content/code_sandbox/drivers/pwm/pwm_handlers.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 577 |
```c
/*
*
*/
#define DT_DRV_COMPAT raspberrypi_pico_pwm
#include <zephyr/device.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pwm.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/reset.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(pwm_rpi_pico, CONFIG_PWM_LOG_LEVEL);
/* pico-sdk includes */
#include <hardware/pwm.h>
#include <hardware/structs/pwm.h>
#define PWM_RPI_PICO_COUNTER_TOP_MAX UINT16_MAX
#define PWM_RPI_NUM_CHANNELS (16U)
struct pwm_rpi_slice_config {
uint8_t integral;
uint8_t frac;
bool phase_correct;
};
struct pwm_rpi_config {
/*
* pwm_controller is the start address of the pwm peripheral.
*/
pwm_hw_t *pwm_controller;
struct pwm_rpi_slice_config slice_configs[NUM_PWM_SLICES];
const struct pinctrl_dev_config *pcfg;
const struct reset_dt_spec reset;
const struct device *clk_dev;
const clock_control_subsys_t clk_id;
};
static float pwm_rpi_get_clkdiv(const struct device *dev, int slice)
{
const struct pwm_rpi_config *cfg = dev->config;
/* the divider is a fixed point 8.4 convert to float for use in pico-sdk */
return (float)cfg->slice_configs[slice].integral +
(float)cfg->slice_configs[slice].frac / 16.0f;
}
static inline uint32_t pwm_rpi_channel_to_slice(uint32_t channel)
{
return channel / 2;
}
static inline uint32_t pwm_rpi_channel_to_pico_channel(uint32_t channel)
{
return channel % 2;
}
static int pwm_rpi_get_cycles_per_sec(const struct device *dev, uint32_t ch, uint64_t *cycles)
{
const struct pwm_rpi_config *cfg = dev->config;
int slice = pwm_rpi_channel_to_slice(ch);
uint32_t pclk;
int ret;
if (ch >= PWM_RPI_NUM_CHANNELS) {
return -EINVAL;
}
ret = clock_control_get_rate(cfg->clk_dev, cfg->clk_id, &pclk);
if (ret < 0 || pclk == 0) {
return -EINVAL;
}
/* No need to check for divide by 0 since the minimum value of
* pwm_rpi_get_clkdiv is 1
*/
*cycles = (uint64_t)((float)pclk / pwm_rpi_get_clkdiv(dev, slice));
return 0;
}
/* The pico_sdk only allows setting the polarity of both channels at once.
* This is a convenience function to make setting the polarity of a single
* channel easier.
*/
static void pwm_rpi_set_channel_polarity(const struct device *dev, int slice,
int pico_channel, bool inverted)
{
const struct pwm_rpi_config *cfg = dev->config;
bool pwm_polarity_a = (cfg->pwm_controller->slice[slice].csr & PWM_CH0_CSR_A_INV_BITS) > 0;
bool pwm_polarity_b = (cfg->pwm_controller->slice[slice].csr & PWM_CH0_CSR_B_INV_BITS) > 0;
if (pico_channel == PWM_CHAN_A) {
pwm_polarity_a = inverted;
} else if (pico_channel == PWM_CHAN_B) {
pwm_polarity_b = inverted;
}
pwm_set_output_polarity(slice, pwm_polarity_a, pwm_polarity_b);
}
static int pwm_rpi_set_cycles(const struct device *dev, uint32_t ch, uint32_t period_cycles,
uint32_t pulse_cycles, pwm_flags_t flags)
{
if (ch >= PWM_RPI_NUM_CHANNELS) {
return -EINVAL;
}
if (period_cycles - 1 > PWM_RPI_PICO_COUNTER_TOP_MAX ||
pulse_cycles > PWM_RPI_PICO_COUNTER_TOP_MAX) {
return -EINVAL;
}
int slice = pwm_rpi_channel_to_slice(ch);
/* this is the channel within a pwm slice */
int pico_channel = pwm_rpi_channel_to_pico_channel(ch);
pwm_rpi_set_channel_polarity(dev, slice, pico_channel,
(flags & PWM_POLARITY_MASK) == PWM_POLARITY_INVERTED);
pwm_set_wrap(slice, period_cycles - 1);
pwm_set_chan_level(slice, pico_channel, pulse_cycles);
return 0;
};
struct pwm_driver_api pwm_rpi_driver_api = {
.get_cycles_per_sec = pwm_rpi_get_cycles_per_sec,
.set_cycles = pwm_rpi_set_cycles,
};
static int pwm_rpi_init(const struct device *dev)
{
const struct pwm_rpi_config *cfg = dev->config;
pwm_config slice_cfg;
size_t slice_idx;
int err;
err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (err) {
LOG_ERR("Failed to configure pins for PWM. err=%d", err);
return err;
}
err = clock_control_on(cfg->clk_dev, cfg->clk_id);
if (err < 0) {
return err;
}
err = reset_line_toggle_dt(&cfg->reset);
if (err < 0) {
return err;
}
for (slice_idx = 0; slice_idx < NUM_PWM_SLICES; slice_idx++) {
slice_cfg = pwm_get_default_config();
pwm_config_set_clkdiv_mode(&slice_cfg, PWM_DIV_FREE_RUNNING);
pwm_init(slice_idx, &slice_cfg, false);
pwm_set_clkdiv_int_frac(slice_idx,
cfg->slice_configs[slice_idx].integral,
cfg->slice_configs[slice_idx].frac);
pwm_set_enabled(slice_idx, true);
}
return 0;
}
#define PWM_INST_RPI_SLICE_DIVIDER(idx, n) \
{ \
.integral = DT_INST_PROP(idx, UTIL_CAT(divider_int_, n)), \
.frac = DT_INST_PROP(idx, UTIL_CAT(divider_frac_, n)), \
}
#define PWM_RPI_INIT(idx) \
\
PINCTRL_DT_INST_DEFINE(idx); \
static const struct pwm_rpi_config pwm_rpi_config_##idx = { \
.pwm_controller = (pwm_hw_t *)DT_INST_REG_ADDR(idx), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
.slice_configs = { \
PWM_INST_RPI_SLICE_DIVIDER(idx, 0), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 1), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 2), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 3), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 4), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 5), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 6), \
PWM_INST_RPI_SLICE_DIVIDER(idx, 7), \
}, \
.reset = RESET_DT_SPEC_INST_GET(idx), \
.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
.clk_id = (clock_control_subsys_t)DT_INST_PHA_BY_IDX(idx, clocks, 0, clk_id), \
}; \
\
DEVICE_DT_INST_DEFINE(idx, pwm_rpi_init, NULL, NULL, &pwm_rpi_config_##idx, POST_KERNEL, \
CONFIG_PWM_INIT_PRIORITY, &pwm_rpi_driver_api);
DT_INST_FOREACH_STATUS_OKAY(PWM_RPI_INIT);
``` | /content/code_sandbox/drivers/pwm/pwm_rpi_pico.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,650 |
```c
/*
*
*/
/*
* PWM driver using the SAM0 Timer/Counter (TCC) in Normal PWM (NPWM) mode.
* Supports the SAMD21 and SAMD5x series.
*/
#define DT_DRV_COMPAT atmel_sam0_tcc_pwm
#include <zephyr/device.h>
#include <errno.h>
#include <zephyr/drivers/pwm.h>
#include <zephyr/drivers/pinctrl.h>
#include <soc.h>
/* Static configuration */
struct pwm_sam0_config {
Tcc *regs;
const struct pinctrl_dev_config *pcfg;
uint8_t channels;
uint8_t counter_size;
uint16_t prescaler;
uint32_t freq;
#ifdef MCLK
volatile uint32_t *mclk;
uint32_t mclk_mask;
uint16_t gclk_id;
#else
uint32_t pm_apbcmask;
uint16_t gclk_clkctrl_id;
#endif
};
/* Wait for the peripheral to finish all commands */
static void wait_synchronization(Tcc *regs)
{
while (regs->SYNCBUSY.reg != 0) {
}
}
static int pwm_sam0_get_cycles_per_sec(const struct device *dev,
uint32_t channel, uint64_t *cycles)
{
const struct pwm_sam0_config *const cfg = dev->config;
if (channel >= cfg->channels) {
return -EINVAL;
}
*cycles = cfg->freq;
return 0;
}
static int pwm_sam0_set_cycles(const struct device *dev, uint32_t channel,
uint32_t period_cycles, uint32_t pulse_cycles,
pwm_flags_t flags)
{
const struct pwm_sam0_config *const cfg = dev->config;
Tcc *regs = cfg->regs;
uint32_t top = 1 << cfg->counter_size;
uint32_t invert_mask = 1 << channel;
bool invert = ((flags & PWM_POLARITY_INVERTED) != 0);
bool inverted = ((regs->DRVCTRL.vec.INVEN & invert_mask) != 0);
if (channel >= cfg->channels) {
return -EINVAL;
}
if (period_cycles >= top || pulse_cycles >= top) {
return -EINVAL;
}
/*
* Update the buffered width and period. These will be automatically
* loaded on the next cycle.
*/
#ifdef TCC_PERBUF_PERBUF
/* SAME51 naming */
regs->CCBUF[channel].reg = TCC_CCBUF_CCBUF(pulse_cycles);
regs->PERBUF.reg = TCC_PERBUF_PERBUF(period_cycles);
#else
/* SAMD21 naming */
regs->CCB[channel].reg = TCC_CCB_CCB(pulse_cycles);
regs->PERB.reg = TCC_PERB_PERB(period_cycles);
#endif
if (invert != inverted) {
regs->CTRLA.bit.ENABLE = 0;
wait_synchronization(regs);
regs->DRVCTRL.vec.INVEN ^= invert_mask;
regs->CTRLA.bit.ENABLE = 1;
wait_synchronization(regs);
}
return 0;
}
static int pwm_sam0_init(const struct device *dev)
{
const struct pwm_sam0_config *const cfg = dev->config;
Tcc *regs = cfg->regs;
int retval;
/* Enable the clocks */
#ifdef MCLK
GCLK->PCHCTRL[cfg->gclk_id].reg =
GCLK_PCHCTRL_GEN_GCLK0 | GCLK_PCHCTRL_CHEN;
*cfg->mclk |= cfg->mclk_mask;
#else
GCLK->CLKCTRL.reg = cfg->gclk_clkctrl_id | GCLK_CLKCTRL_GEN_GCLK0 |
GCLK_CLKCTRL_CLKEN;
PM->APBCMASK.reg |= cfg->pm_apbcmask;
#endif
retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (retval < 0) {
return retval;
}
regs->CTRLA.bit.SWRST = 1;
wait_synchronization(regs);
regs->CTRLA.reg = cfg->prescaler;
regs->WAVE.reg = TCC_WAVE_WAVEGEN_NPWM;
regs->PER.reg = TCC_PER_PER(1);
regs->CTRLA.bit.ENABLE = 1;
wait_synchronization(regs);
return 0;
}
static const struct pwm_driver_api pwm_sam0_driver_api = {
.set_cycles = pwm_sam0_set_cycles,
.get_cycles_per_sec = pwm_sam0_get_cycles_per_sec,
};
#ifdef MCLK
#define PWM_SAM0_INIT_CLOCKS(inst) \
.mclk = (volatile uint32_t *)MCLK_MASK_DT_INT_REG_ADDR(inst), \
.mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(inst, mclk, bit)), \
.gclk_id = DT_INST_CLOCKS_CELL_BY_NAME(inst, gclk, periph_ch)
#else
#define PWM_SAM0_INIT_CLOCKS(inst) \
.pm_apbcmask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(inst, pm, bit)), \
.gclk_clkctrl_id = DT_INST_CLOCKS_CELL_BY_NAME(inst, gclk, clkctrl_id)
#endif
#define PWM_SAM0_INIT(inst) \
PINCTRL_DT_INST_DEFINE(inst); \
static const struct pwm_sam0_config pwm_sam0_config_##inst = { \
.regs = (Tcc *)DT_INST_REG_ADDR(inst), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
.channels = DT_INST_PROP(inst, channels), \
.counter_size = DT_INST_PROP(inst, counter_size), \
.prescaler = UTIL_CAT(TCC_CTRLA_PRESCALER_DIV, \
DT_INST_PROP(inst, prescaler)), \
.freq = SOC_ATMEL_SAM0_GCLK0_FREQ_HZ / \
DT_INST_PROP(inst, prescaler), \
PWM_SAM0_INIT_CLOCKS(inst), \
}; \
\
DEVICE_DT_INST_DEFINE(inst, &pwm_sam0_init, NULL, \
NULL, &pwm_sam0_config_##inst, \
POST_KERNEL, CONFIG_PWM_INIT_PRIORITY, \
&pwm_sam0_driver_api);
DT_INST_FOREACH_STATUS_OKAY(PWM_SAM0_INIT)
``` | /content/code_sandbox/drivers/pwm/pwm_sam0_tcc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,351 |
```c
/*
*
*/
#define DT_DRV_COMPAT silabs_gecko_pwm
#include <zephyr/drivers/pwm.h>
#include <zephyr/dt-bindings/pwm/pwm.h>
#include <em_cmu.h>
#include <em_timer.h>
/** PWM configuration. */
struct pwm_gecko_config {
TIMER_TypeDef *timer;
CMU_Clock_TypeDef clock;
uint16_t prescaler;
TIMER_Prescale_TypeDef prescale_enum;
uint8_t channel;
uint8_t location;
uint8_t port;
uint8_t pin;
};
static int pwm_gecko_set_cycles(const struct device *dev, uint32_t channel,
uint32_t period_cycles, uint32_t pulse_cycles,
pwm_flags_t flags)
{
TIMER_InitCC_TypeDef compare_config = TIMER_INITCC_DEFAULT;
const struct pwm_gecko_config *cfg = dev->config;
if (BUS_RegMaskedRead(&cfg->timer->CC[channel].CTRL,
_TIMER_CC_CTRL_MODE_MASK) != timerCCModePWM) {
#ifdef _TIMER_ROUTE_MASK
BUS_RegMaskedWrite(&cfg->timer->ROUTE,
_TIMER_ROUTE_LOCATION_MASK,
cfg->location << _TIMER_ROUTE_LOCATION_SHIFT);
BUS_RegMaskedSet(&cfg->timer->ROUTE, 1 << channel);
#elif defined(_TIMER_ROUTELOC0_MASK)
BUS_RegMaskedWrite(&cfg->timer->ROUTELOC0,
_TIMER_ROUTELOC0_CC0LOC_MASK <<
(channel * _TIMER_ROUTELOC0_CC1LOC_SHIFT),
cfg->location << (channel * _TIMER_ROUTELOC0_CC1LOC_SHIFT));
BUS_RegMaskedSet(&cfg->timer->ROUTEPEN, 1 << channel);
#else
#error Unsupported device
#endif
compare_config.mode = timerCCModePWM;
TIMER_InitCC(cfg->timer, channel, &compare_config);
}
cfg->timer->CC[channel].CTRL |= (flags & PWM_POLARITY_INVERTED) ?
TIMER_CC_CTRL_OUTINV : 0;
TIMER_TopSet(cfg->timer, period_cycles);
TIMER_CompareBufSet(cfg->timer, channel, pulse_cycles);
return 0;
}
static int pwm_gecko_get_cycles_per_sec(const struct device *dev,
uint32_t channel, uint64_t *cycles)
{
const struct pwm_gecko_config *cfg = dev->config;
*cycles = CMU_ClockFreqGet(cfg->clock) / cfg->prescaler;
return 0;
}
static const struct pwm_driver_api pwm_gecko_driver_api = {
.set_cycles = pwm_gecko_set_cycles,
.get_cycles_per_sec = pwm_gecko_get_cycles_per_sec,
};
static int pwm_gecko_init(const struct device *dev)
{
TIMER_Init_TypeDef timer = TIMER_INIT_DEFAULT;
const struct pwm_gecko_config *cfg = dev->config;
CMU_ClockEnable(cfg->clock, true);
CMU_ClockEnable(cmuClock_GPIO, true);
GPIO_PinModeSet(cfg->port, cfg->pin, gpioModePushPull, 0);
timer.prescale = cfg->prescale_enum;
TIMER_Init(cfg->timer, &timer);
return 0;
}
#define CLOCK_TIMER(id) _CONCAT(cmuClock_TIMER, id)
#define PRESCALING_FACTOR(factor) \
((_CONCAT(timerPrescale, factor)))
#define PWM_GECKO_INIT(index) \
static const struct pwm_gecko_config pwm_gecko_config_##index = { \
.timer = (TIMER_TypeDef *)DT_REG_ADDR(DT_INST_PARENT(index)), \
.clock = CLOCK_TIMER(index), \
.prescaler = DT_INST_PROP(index, prescaler), \
.prescale_enum = (TIMER_Prescale_TypeDef) \
PRESCALING_FACTOR(DT_INST_PROP(index, prescaler)), \
.location = DT_INST_PROP_BY_IDX(index, pin_location, 0), \
.port = (GPIO_Port_TypeDef) \
DT_INST_PROP_BY_IDX(index, pin_location, 1), \
.pin = DT_INST_PROP_BY_IDX(index, pin_location, 2), \
}; \
\
DEVICE_DT_INST_DEFINE(index, &pwm_gecko_init, NULL, NULL, \
&pwm_gecko_config_##index, POST_KERNEL, \
CONFIG_PWM_INIT_PRIORITY, \
&pwm_gecko_driver_api);
DT_INST_FOREACH_STATUS_OKAY(PWM_GECKO_INIT)
``` | /content/code_sandbox/drivers/pwm/pwm_gecko.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 951 |
```c
/*
*
*/
#define DT_DRV_COMPAT telink_b91_pwm
#include <pwm.h>
#include <clock.h>
#include <zephyr/drivers/pwm.h>
#include <zephyr/drivers/pinctrl.h>
struct pwm_b91_config {
const struct pinctrl_dev_config *pcfg;
uint32_t clock_frequency;
uint8_t channels;
uint8_t clk32k_ch_enable;
};
/* API implementation: init */
static int pwm_b91_init(const struct device *dev)
{
const struct pwm_b91_config *config = dev->config;
uint32_t status = 0;
uint8_t clk_32k_en = 0;
uint32_t pwm_clk_div = 0;
/* Calculate and check PWM clock divider */
pwm_clk_div = sys_clk.pclk * 1000 * 1000 / config->clock_frequency - 1;
if (pwm_clk_div > 255) {
return -EINVAL;
}
/* Set PWM Peripheral clock */
pwm_set_clk((unsigned char) (pwm_clk_div & 0xFF));
/* Set PWM 32k Channel clock if enabled */
clk_32k_en |= (config->clk32k_ch_enable & BIT(0)) ? PWM_CLOCK_32K_CHN_PWM0 : 0;
clk_32k_en |= (config->clk32k_ch_enable & BIT(1)) ? PWM_CLOCK_32K_CHN_PWM1 : 0;
clk_32k_en |= (config->clk32k_ch_enable & BIT(2)) ? PWM_CLOCK_32K_CHN_PWM2 : 0;
clk_32k_en |= (config->clk32k_ch_enable & BIT(3)) ? PWM_CLOCK_32K_CHN_PWM3 : 0;
clk_32k_en |= (config->clk32k_ch_enable & BIT(4)) ? PWM_CLOCK_32K_CHN_PWM4 : 0;
clk_32k_en |= (config->clk32k_ch_enable & BIT(5)) ? PWM_CLOCK_32K_CHN_PWM5 : 0;
pwm_32k_chn_en(clk_32k_en);
/* Config PWM pins */
status = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (status < 0) {
return status;
}
return 0;
}
/* API implementation: set_cycles */
static int pwm_b91_set_cycles(const struct device *dev, uint32_t channel,
uint32_t period_cycles, uint32_t pulse_cycles,
pwm_flags_t flags)
{
const struct pwm_b91_config *config = dev->config;
/* check pwm channel */
if (channel >= config->channels) {
return -EINVAL;
}
/* check size of pulse and period (2 bytes) */
if ((period_cycles > 0xFFFFu) ||
(pulse_cycles > 0xFFFFu)) {
return -EINVAL;
}
/* set polarity */
if (flags & PWM_POLARITY_INVERTED) {
pwm_invert_en(channel);
} else {
pwm_invert_dis(channel);
}
/* set pulse and period */
pwm_set_tcmp(channel, pulse_cycles);
pwm_set_tmax(channel, period_cycles);
/* start pwm */
pwm_start(channel);
return 0;
}
/* API implementation: get_cycles_per_sec */
static int pwm_b91_get_cycles_per_sec(const struct device *dev,
uint32_t channel, uint64_t *cycles)
{
const struct pwm_b91_config *config = dev->config;
/* check pwm channel */
if (channel >= config->channels) {
return -EINVAL;
}
if ((config->clk32k_ch_enable & BIT(channel)) != 0U) {
*cycles = 32000u;
} else {
*cycles = sys_clk.pclk * 1000 * 1000 / (reg_pwm_clkdiv + 1);
}
return 0;
}
/* PWM driver APIs structure */
static const struct pwm_driver_api pwm_b91_driver_api = {
.set_cycles = pwm_b91_set_cycles,
.get_cycles_per_sec = pwm_b91_get_cycles_per_sec,
};
/* PWM driver registration */
#define PWM_B91_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
\
static const struct pwm_b91_config config##n = { \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.clock_frequency = DT_INST_PROP(n, clock_frequency), \
.channels = DT_INST_PROP(n, channels), \
.clk32k_ch_enable = \
((DT_INST_PROP(n, clk32k_ch0_enable) << 0U) | \
(DT_INST_PROP(n, clk32k_ch1_enable) << 1U) | \
(DT_INST_PROP(n, clk32k_ch2_enable) << 2U) | \
(DT_INST_PROP(n, clk32k_ch3_enable) << 3U) | \
(DT_INST_PROP(n, clk32k_ch4_enable) << 4U) | \
(DT_INST_PROP(n, clk32k_ch5_enable) << 5U)), \
}; \
\
DEVICE_DT_INST_DEFINE(n, pwm_b91_init, \
NULL, NULL, &config##n, \
POST_KERNEL, CONFIG_PWM_INIT_PRIORITY, \
&pwm_b91_driver_api);
DT_INST_FOREACH_STATUS_OKAY(PWM_B91_INIT)
``` | /content/code_sandbox/drivers/pwm/pwm_b91.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,180 |
```unknown
# Atmel SAM0 TCC as PWM configuration
config PWM_SAM0_TCC
bool "Atmel SAM0 MCU Family TCC PWM Driver"
default y
depends on DT_HAS_ATMEL_SAM0_TCC_PWM_ENABLED
help
Enable PWM driver for Atmel SAM0 MCUs using the TCC timer/counter.
``` | /content/code_sandbox/drivers/pwm/Kconfig.sam0 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 73 |
```unknown
config PWM_ENE_KB1200
bool "ENE KB1200 PWM driver"
default y
depends on DT_HAS_ENE_KB1200_PWM_ENABLED
select PINCTRL
help
This option enables the PWM driver for KB1200 processors.
``` | /content/code_sandbox/drivers/pwm/Kconfig.ene | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 56 |
```unknown
config PWM_MCUX_QTMR
bool "MCUX QMTR PWM driver"
default y
depends on DT_HAS_NXP_QTMR_PWM_ENABLED
depends on CLOCK_CONTROL && PINCTRL
help
Enable QTMR based pwm driver.
``` | /content/code_sandbox/drivers/pwm/Kconfig.mcux_qtmr | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 55 |
```unknown
config PWM_TELINK_B91
bool "Telink Semiconductor B91 PWM driver"
default y
depends on DT_HAS_TELINK_B91_PWM_ENABLED
help
Enables Telink B91 PWM driver.
``` | /content/code_sandbox/drivers/pwm/Kconfig.b91 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 47 |
```c
/*
*
* Heavily based on pwm_mcux_ftm.c, which is:
*
*/
#define DT_DRV_COMPAT openisa_rv32m1_tpm
#include <zephyr/drivers/clock_control.h>
#include <errno.h>
#include <zephyr/drivers/pwm.h>
#include <soc.h>
#include <fsl_tpm.h>
#include <fsl_clock.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(pwm_rv32m1_tpm, CONFIG_PWM_LOG_LEVEL);
#define MAX_CHANNELS ARRAY_SIZE(TPM0->CONTROLS)
struct rv32m1_tpm_config {
TPM_Type *base;
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
tpm_clock_source_t tpm_clock_source;
tpm_clock_prescale_t prescale;
uint8_t channel_count;
tpm_pwm_mode_t mode;
const struct pinctrl_dev_config *pincfg;
};
struct rv32m1_tpm_data {
uint32_t clock_freq;
uint32_t period_cycles;
tpm_chnl_pwm_signal_param_t channel[MAX_CHANNELS];
};
static int rv32m1_tpm_set_cycles(const struct device *dev, uint32_t channel,
uint32_t period_cycles, uint32_t pulse_cycles,
pwm_flags_t flags)
{
const struct rv32m1_tpm_config *config = dev->config;
struct rv32m1_tpm_data *data = dev->data;
uint8_t duty_cycle;
if (period_cycles == 0U) {
LOG_ERR("Channel can not be set to inactive level");
return -ENOTSUP;
}
if (channel >= config->channel_count) {
LOG_ERR("Invalid channel");
return -ENOTSUP;
}
duty_cycle = pulse_cycles * 100U / period_cycles;
data->channel[channel].dutyCyclePercent = duty_cycle;
if ((flags & PWM_POLARITY_INVERTED) == 0) {
data->channel[channel].level = kTPM_HighTrue;
} else {
data->channel[channel].level = kTPM_LowTrue;
}
LOG_DBG("pulse_cycles=%d, period_cycles=%d, duty_cycle=%d, flags=%d",
pulse_cycles, period_cycles, duty_cycle, flags);
if (period_cycles != data->period_cycles) {
uint32_t pwm_freq;
status_t status;
if (data->period_cycles != 0) {
/* Only warn when not changing from zero */
LOG_WRN("Changing period cycles from %d to %d"
" affects all %d channels in %s",
data->period_cycles, period_cycles,
config->channel_count, dev->name);
}
data->period_cycles = period_cycles;
pwm_freq = (data->clock_freq >> config->prescale) /
period_cycles;
LOG_DBG("pwm_freq=%d, clock_freq=%d", pwm_freq,
data->clock_freq);
if (pwm_freq == 0U) {
LOG_ERR("Could not set up pwm_freq=%d", pwm_freq);
return -EINVAL;
}
TPM_StopTimer(config->base);
status = TPM_SetupPwm(config->base, data->channel,
config->channel_count, config->mode,
pwm_freq, data->clock_freq);
if (status != kStatus_Success) {
LOG_ERR("Could not set up pwm");
return -ENOTSUP;
}
TPM_StartTimer(config->base, config->tpm_clock_source);
} else {
TPM_UpdateChnlEdgeLevelSelect(config->base, channel,
data->channel[channel].level);
TPM_UpdatePwmDutycycle(config->base, channel, config->mode,
duty_cycle);
}
return 0;
}
static int rv32m1_tpm_get_cycles_per_sec(const struct device *dev,
uint32_t channel, uint64_t *cycles)
{
const struct rv32m1_tpm_config *config = dev->config;
struct rv32m1_tpm_data *data = dev->data;
*cycles = data->clock_freq >> config->prescale;
return 0;
}
static int rv32m1_tpm_init(const struct device *dev)
{
const struct rv32m1_tpm_config *config = dev->config;
struct rv32m1_tpm_data *data = dev->data;
tpm_chnl_pwm_signal_param_t *channel = data->channel;
tpm_config_t tpm_config;
int err;
int i;
if (config->channel_count > ARRAY_SIZE(data->channel)) {
LOG_ERR("Invalid channel count");
return -EINVAL;
}
if (!device_is_ready(config->clock_dev)) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
if (clock_control_on(config->clock_dev, config->clock_subsys)) {
LOG_ERR("Could not turn on clock");
return -EINVAL;
}
if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
&data->clock_freq)) {
LOG_ERR("Could not get clock frequency");
return -EINVAL;
}
for (i = 0; i < config->channel_count; i++) {
channel->chnlNumber = i;
channel->level = kTPM_NoPwmSignal;
channel->dutyCyclePercent = 0;
channel->firstEdgeDelayPercent = 0;
channel++;
}
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
TPM_GetDefaultConfig(&tpm_config);
tpm_config.prescale = config->prescale;
TPM_Init(config->base, &tpm_config);
return 0;
}
static const struct pwm_driver_api rv32m1_tpm_driver_api = {
.set_cycles = rv32m1_tpm_set_cycles,
.get_cycles_per_sec = rv32m1_tpm_get_cycles_per_sec,
};
#define TPM_DEVICE(n) \
PINCTRL_DT_INST_DEFINE(n); \
static const struct rv32m1_tpm_config rv32m1_tpm_config_##n = { \
.base = (TPM_Type *) \
DT_INST_REG_ADDR(n), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (clock_control_subsys_t) \
DT_INST_CLOCKS_CELL(n, name), \
.tpm_clock_source = kTPM_SystemClock, \
.prescale = kTPM_Prescale_Divide_16, \
.channel_count = FSL_FEATURE_TPM_CHANNEL_COUNTn((TPM_Type *) \
DT_INST_REG_ADDR(n)), \
.mode = kTPM_EdgeAlignedPwm, \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
}; \
static struct rv32m1_tpm_data rv32m1_tpm_data_##n; \
DEVICE_DT_INST_DEFINE(n, &rv32m1_tpm_init, NULL, \
&rv32m1_tpm_data_##n, \
&rv32m1_tpm_config_##n, \
POST_KERNEL, CONFIG_PWM_INIT_PRIORITY, \
&rv32m1_tpm_driver_api);
DT_INST_FOREACH_STATUS_OKAY(TPM_DEVICE)
``` | /content/code_sandbox/drivers/pwm/pwm_rv32m1_tpm.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,594 |
```unknown
config PWM_RPI_PICO
bool "RPi Pico PWM"
default y
depends on DT_HAS_RASPBERRYPI_PICO_PWM_ENABLED
depends on RESET
select PICOSDK_USE_PWM
help
Enable PWM driver for RPi Pico family of MCUs
``` | /content/code_sandbox/drivers/pwm/Kconfig.rpi_pico | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```unknown
/*
*
*/
/**
* @brief Assembler-hooks specific to Nuclei's Extended Core Interrupt Controller
*/
#include <zephyr/arch/cpu.h>
GTEXT(__soc_handle_irq)
/*
* In an ECLIC, pending interrupts don't have to be cleared by hand.
* In vectored mode, interrupts are cleared automatically.
* In non-vectored mode, interrupts are cleared when writing the mnxti register (done in
* __soc_handle_all_irqs).
* Thus this function can directly return.
*/
SECTION_FUNC(exception.other, __soc_handle_irq)
ret
#if !defined(CONFIG_RISCV_VECTORED_MODE)
GTEXT(__soc_handle_all_irqs)
#ifdef CONFIG_TRACING
/* imports */
GTEXT(sys_trace_isr_enter)
GTEXT(sys_trace_isr_exit)
#endif
/*
* This function services and clears all pending interrupts for an ECLIC in non-vectored mode.
*/
SECTION_FUNC(exception.other, __soc_handle_all_irqs)
addi sp, sp, -16
sw ra, 0(sp)
/* Read and clear mnxti to get highest current interrupt and enable interrupts. Will return
* original interrupt if no others appear. */
csrrci a0, 0x345, MSTATUS_IEN
beqz a0, irq_done /* Check if original interrupt vanished. */
irq_loop:
#ifdef CONFIG_TRACING_ISR
call sys_trace_isr_enter
#endif
/* Call corresponding registered function in _sw_isr_table. a0 is offset in words, table is
* 2-word wide -> shift by one */
la t0, _sw_isr_table
slli a0, a0, (1)
add t0, t0, a0
/* Load argument in a0 register */
lw a0, 0(t0)
/* Load ISR function address in register t1 */
lw t1, RV_REGSIZE(t0)
/* Call ISR function */
jalr ra, t1, 0
/* Read and clear mnxti to get highest current interrupt and enable interrupts. */
csrrci a0, 0x345, MSTATUS_IEN
#ifdef CONFIG_TRACING_ISR
call sys_trace_isr_exit
#endif
bnez a0, irq_loop
irq_done:
lw ra, 0(sp)
addi sp, sp, 16
ret
#endif
``` | /content/code_sandbox/drivers/interrupt_controller/intc_nuclei_eclic.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 497 |
```c
/*
*
*/
#define DT_DRV_COMPAT shared_irq
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/shared_irq.h>
#include <zephyr/init.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/irq.h>
#ifdef CONFIG_IOAPIC
#include <zephyr/drivers/interrupt_controller/ioapic.h>
#endif
typedef void (*shared_irq_config_irq_t)(void);
struct shared_irq_config {
uint32_t irq_num;
shared_irq_config_irq_t config;
uint32_t client_count;
};
struct shared_irq_client {
const struct device *isr_dev;
isr_t isr_func;
uint32_t enabled;
};
struct shared_irq_runtime {
struct shared_irq_client *const client;
};
/**
* @brief Register a device ISR
* @param dev Pointer to device structure for SHARED_IRQ driver instance.
* @param isr_func Pointer to the ISR function for the device.
* @param isr_dev Pointer to the device that will service the interrupt.
*/
static int isr_register(const struct device *dev, isr_t isr_func,
const struct device *isr_dev)
{
struct shared_irq_runtime *clients = dev->data;
const struct shared_irq_config *config = dev->config;
uint32_t i;
for (i = 0U; i < config->client_count; i++) {
if (!clients->client[i].isr_dev) {
clients->client[i].isr_dev = isr_dev;
clients->client[i].isr_func = isr_func;
return 0;
}
}
return -EIO;
}
/**
* @brief Enable ISR for device
* @param dev Pointer to device structure for SHARED_IRQ driver instance.
* @param isr_dev Pointer to the device that will service the interrupt.
*/
static inline int enable(const struct device *dev,
const struct device *isr_dev)
{
struct shared_irq_runtime *clients = dev->data;
const struct shared_irq_config *config = dev->config;
uint32_t i;
for (i = 0U; i < config->client_count; i++) {
if (clients->client[i].isr_dev == isr_dev) {
clients->client[i].enabled = 1U;
irq_enable(config->irq_num);
return 0;
}
}
return -EIO;
}
static int last_enabled_isr(struct shared_irq_runtime *clients, int count)
{
uint32_t i;
for (i = 0U; i < count; i++) {
if (clients->client[i].enabled) {
return 0;
}
}
return 1;
}
/**
* @brief Disable ISR for device
* @param dev Pointer to device structure for SHARED_IRQ driver instance.
* @param isr_dev Pointer to the device that will service the interrupt.
*/
static inline int disable(const struct device *dev,
const struct device *isr_dev)
{
struct shared_irq_runtime *clients = dev->data;
const struct shared_irq_config *config = dev->config;
uint32_t i;
for (i = 0U; i < config->client_count; i++) {
if (clients->client[i].isr_dev == isr_dev) {
clients->client[i].enabled = 0U;
if (last_enabled_isr(clients, config->client_count)) {
irq_disable(config->irq_num);
}
return 0;
}
}
return -EIO;
}
void shared_irq_isr(const struct device *dev)
{
struct shared_irq_runtime *clients = dev->data;
const struct shared_irq_config *config = dev->config;
uint32_t i;
for (i = 0U; i < config->client_count; i++) {
if (clients->client[i].isr_dev) {
clients->client[i].isr_func(clients->client[i].isr_dev, config->irq_num);
}
}
}
static const struct shared_irq_driver_api api_funcs = {
.isr_register = isr_register,
.enable = enable,
.disable = disable,
};
int shared_irq_initialize(const struct device *dev)
{
const struct shared_irq_config *config = dev->config;
config->config();
return 0;
}
/*
* INST_SUPPORTS_DEP_ORDS_CNT: Counts the number of "elements" in
* DT_SUPPORTS_DEP_ORDS(n). There is a comma after each ordinal(inc. the last)
* Hence FOR_EACH adds "+1" once too often which has to be subtracted in the end.
*/
#define F1(x) 1
#define INST_SUPPORTS_DEP_ORDS_CNT(n) \
(FOR_EACH(F1, (+), DT_INST_SUPPORTS_DEP_ORDS(n)) - 1)
#define SHARED_IRQ_CONFIG_FUNC(n) \
void shared_irq_config_func_##n(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), \
shared_irq_isr, \
DEVICE_DT_INST_GET(n), \
COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, sense), \
(DT_INST_IRQ(n, sense)), \
(0))); \
}
#define SHARED_IRQ_INIT(n) \
SHARED_IRQ_CONFIG_FUNC(n) \
struct shared_irq_client clients_##n[INST_SUPPORTS_DEP_ORDS_CNT(n)]; \
struct shared_irq_runtime shared_irq_data_##n = { \
.client = clients_##n \
}; \
\
const struct shared_irq_config shared_irq_config_##n = { \
.irq_num = DT_INST_IRQN(n), \
.client_count = INST_SUPPORTS_DEP_ORDS_CNT(n), \
.config = shared_irq_config_func_##n \
}; \
DEVICE_DT_INST_DEFINE(n, shared_irq_initialize, \
NULL, \
&shared_irq_data_##n, \
&shared_irq_config_##n, POST_KERNEL, \
CONFIG_SHARED_IRQ_INIT_PRIORITY, \
&api_funcs);
DT_INST_FOREACH_STATUS_OKAY(SHARED_IRQ_INIT)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_shared_irq.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,350 |
```c
/*
*
*/
/**
* @brief Driver for External interrupt/event controller in STM32 MCUs
*/
#define EXTI_NODE DT_INST(0, st_stm32_exti)
#include <zephyr/device.h>
#include <soc.h>
#include <stm32_ll_exti.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/interrupt_controller/exti_stm32.h>
#include <zephyr/irq.h>
#include "stm32_hsem.h"
/** @brief EXTI line ranges hold by a single ISR */
struct stm32_exti_range {
/** Start of the range */
uint8_t start;
/** Range length */
uint8_t len;
};
#define NUM_EXTI_LINES DT_PROP(DT_NODELABEL(exti), num_lines)
static IRQn_Type exti_irq_table[NUM_EXTI_LINES] = {[0 ... NUM_EXTI_LINES - 1] = 0xFF};
/* wrapper for user callback */
struct __exti_cb {
stm32_exti_callback_t cb;
void *data;
};
/* driver data */
struct stm32_exti_data {
/* per-line callbacks */
struct __exti_cb cb[NUM_EXTI_LINES];
};
void stm32_exti_enable(int line)
{
int irqnum = 0;
if (line >= NUM_EXTI_LINES) {
__ASSERT_NO_MSG(line);
}
/* Get matching exti irq provided line thanks to irq_table */
irqnum = exti_irq_table[line];
if (irqnum == 0xFF) {
__ASSERT_NO_MSG(line);
}
/* Enable requested line interrupt */
#if defined(CONFIG_SOC_SERIES_STM32H7X) && defined(CONFIG_CPU_CORTEX_M4)
LL_C2_EXTI_EnableIT_0_31(BIT((uint32_t)line));
#else
LL_EXTI_EnableIT_0_31(BIT((uint32_t)line));
#endif
/* Enable exti irq interrupt */
irq_enable(irqnum);
}
void stm32_exti_disable(int line)
{
if (line < 32) {
#if defined(CONFIG_SOC_SERIES_STM32H7X) && defined(CONFIG_CPU_CORTEX_M4)
LL_C2_EXTI_DisableIT_0_31(BIT((uint32_t)line));
#else
LL_EXTI_DisableIT_0_31(BIT((uint32_t)line));
#endif
} else {
__ASSERT_NO_MSG(line);
}
}
/**
* @brief check if interrupt is pending
*
* @param line line number
*/
static inline int stm32_exti_is_pending(int line)
{
if (line < 32) {
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32g0_exti)
return (LL_EXTI_IsActiveRisingFlag_0_31(BIT((uint32_t)line)) ||
LL_EXTI_IsActiveFallingFlag_0_31(BIT((uint32_t)line)));
#elif defined(CONFIG_SOC_SERIES_STM32H7X) && defined(CONFIG_CPU_CORTEX_M4)
return LL_C2_EXTI_IsActiveFlag_0_31(BIT((uint32_t)line));
#else
return LL_EXTI_IsActiveFlag_0_31(BIT((uint32_t)line));
#endif
} else {
__ASSERT_NO_MSG(line);
return 0;
}
}
/**
* @brief clear pending interrupt bit
*
* @param line line number
*/
static inline void stm32_exti_clear_pending(int line)
{
if (line < 32) {
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32g0_exti)
LL_EXTI_ClearRisingFlag_0_31(BIT((uint32_t)line));
LL_EXTI_ClearFallingFlag_0_31(BIT((uint32_t)line));
#elif defined(CONFIG_SOC_SERIES_STM32H7X) && defined(CONFIG_CPU_CORTEX_M4)
LL_C2_EXTI_ClearFlag_0_31(BIT((uint32_t)line));
#else
LL_EXTI_ClearFlag_0_31(BIT((uint32_t)line));
#endif
} else {
__ASSERT_NO_MSG(line);
}
}
void stm32_exti_trigger(int line, int trigger)
{
if (line >= 32) {
__ASSERT_NO_MSG(line);
}
z_stm32_hsem_lock(CFG_HW_EXTI_SEMID, HSEM_LOCK_DEFAULT_RETRY);
switch (trigger) {
case STM32_EXTI_TRIG_NONE:
LL_EXTI_DisableRisingTrig_0_31(BIT((uint32_t)line));
LL_EXTI_DisableFallingTrig_0_31(BIT((uint32_t)line));
break;
case STM32_EXTI_TRIG_RISING:
LL_EXTI_EnableRisingTrig_0_31(BIT((uint32_t)line));
LL_EXTI_DisableFallingTrig_0_31(BIT((uint32_t)line));
break;
case STM32_EXTI_TRIG_FALLING:
LL_EXTI_EnableFallingTrig_0_31(BIT((uint32_t)line));
LL_EXTI_DisableRisingTrig_0_31(BIT((uint32_t)line));
break;
case STM32_EXTI_TRIG_BOTH:
LL_EXTI_EnableRisingTrig_0_31(BIT((uint32_t)line));
LL_EXTI_EnableFallingTrig_0_31(BIT((uint32_t)line));
break;
default:
__ASSERT_NO_MSG(trigger);
break;
}
z_stm32_hsem_unlock(CFG_HW_EXTI_SEMID);
}
/**
* @brief EXTI ISR handler
*
* Check EXTI lines in exti_range for pending interrupts
*
* @param exti_range Pointer to a exti_range structure
*/
static void stm32_exti_isr(const void *exti_range)
{
const struct device *dev = DEVICE_DT_GET(EXTI_NODE);
struct stm32_exti_data *data = dev->data;
const struct stm32_exti_range *range = exti_range;
int line;
/* see which bits are set */
for (uint8_t i = 0; i <= range->len; i++) {
line = range->start + i;
/* check if interrupt is pending */
if (stm32_exti_is_pending(line) != 0) {
/* clear pending interrupt */
stm32_exti_clear_pending(line);
/* run callback only if one is registered */
if (!data->cb[line].cb) {
continue;
}
data->cb[line].cb(line, data->cb[line].data);
}
}
}
static void stm32_fill_irq_table(int8_t start, int8_t len, int32_t irqn)
{
for (int i = 0; i < len; i++) {
exti_irq_table[start + i] = irqn;
}
}
/* This macro:
* - populates line_range_x from line_range dt property
* - fill exti_irq_table through stm32_fill_irq_table()
* - calls IRQ_CONNECT for each irq & matching line_range
*/
#define STM32_EXTI_INIT(node_id, interrupts, idx) \
static const struct stm32_exti_range line_range_##idx = { \
DT_PROP_BY_IDX(node_id, line_ranges, UTIL_X2(idx)), \
DT_PROP_BY_IDX(node_id, line_ranges, UTIL_INC(UTIL_X2(idx))) \
}; \
stm32_fill_irq_table(line_range_##idx.start, \
line_range_##idx.len, \
DT_IRQ_BY_IDX(node_id, idx, irq)); \
IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, idx, irq), \
DT_IRQ_BY_IDX(node_id, idx, priority), \
stm32_exti_isr, &line_range_##idx, \
0);
/**
* @brief initialize EXTI device driver
*/
static int stm32_exti_init(const struct device *dev)
{
ARG_UNUSED(dev);
DT_FOREACH_PROP_ELEM(DT_NODELABEL(exti),
interrupt_names,
STM32_EXTI_INIT);
return 0;
}
static struct stm32_exti_data exti_data;
DEVICE_DT_DEFINE(EXTI_NODE, &stm32_exti_init,
NULL,
&exti_data, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY,
NULL);
/**
* @brief set & unset for the interrupt callbacks
*/
int stm32_exti_set_callback(int line, stm32_exti_callback_t cb, void *arg)
{
const struct device *const dev = DEVICE_DT_GET(EXTI_NODE);
struct stm32_exti_data *data = dev->data;
if ((data->cb[line].cb == cb) && (data->cb[line].data == arg)) {
return 0;
}
/* if callback already exists/maybe-running return busy */
if (data->cb[line].cb != NULL) {
return -EBUSY;
}
data->cb[line].cb = cb;
data->cb[line].data = arg;
return 0;
}
void stm32_exti_unset_callback(int line)
{
const struct device *const dev = DEVICE_DT_GET(EXTI_NODE);
struct stm32_exti_data *data = dev->data;
data->cb[line].cb = NULL;
data->cb[line].data = NULL;
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_exti_stm32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,027 |
```c
/*
*
*/
/**
* @file
* @brief ARCv2 Interrupt Unit device driver
*
* The ARCv2 interrupt unit has 16 allocated exceptions associated with
* vectors 0 to 15 and 240 interrupts associated with vectors 16 to 255.
* The interrupt unit is optional in the ARCv2-based processors. When
* building a processor, you can configure the processor to include an
* interrupt unit. The ARCv2 interrupt unit is highly programmable.
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/device.h>
#define DT_DRV_COMPAT snps_arcv2_intc
#ifdef CONFIG_ARC_CONNECT
static void arc_shared_intc_init(void)
{
/*
* Initialize all IDU interrupts:
* - select round-robbin
* - disable all lines
*/
BUILD_ASSERT(CONFIG_NUM_IRQS > ARC_CONNECT_IDU_IRQ_START);
__ASSERT(z_arc_v2_core_id() == ARC_MP_PRIMARY_CPU_ID,
"idu interrupts must be inited from primary core");
z_arc_connect_idu_disable();
for (uint32_t i = 0; i < (CONFIG_NUM_IRQS - ARC_CONNECT_IDU_IRQ_START); i++) {
/*
* TODO: don't use z_arc_connect_idu* functions to avoid
* locking/unlocking every time.
*/
/* Disable (mask) line */
z_arc_connect_idu_set_mask(i, 0x1);
z_arc_connect_idu_set_mode(i, ARC_CONNECT_INTRPT_TRIGGER_LEVEL,
ARC_CONNECT_DISTRI_MODE_ROUND_ROBIN);
/*
* Fake round-robin: we allow to distribute interrupts only to primary core as
* secondary cores may be not initialized yet.
*/
z_arc_connect_idu_set_dest(i, BIT(ARC_MP_PRIMARY_CPU_ID));
}
z_arc_connect_idu_enable();
}
/* Allow to schedule IRQ to all cores after we bring up all secondary cores */
static int arc_shared_intc_update_post_smp(void)
{
__ASSERT(z_arc_v2_core_id() == ARC_MP_PRIMARY_CPU_ID,
"idu interrupts must be updated from primary core");
z_arc_connect_idu_disable();
for (uint32_t i = 0; i < (CONFIG_NUM_IRQS - ARC_CONNECT_IDU_IRQ_START); i++) {
/* TODO: take arc_connect_spinlock one time to avoid locking/unlocking every time */
z_arc_connect_idu_set_dest(i, BIT_MASK(arch_num_cpus()));
}
z_arc_connect_idu_enable();
return 0;
}
SYS_INIT(arc_shared_intc_update_post_smp, SMP, 0);
#endif /* CONFIG_ARC_CONNECT */
/* lowest IRQ priority */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
#define ARC_IRQ_DEFAULT_PRIORITY ((CONFIG_NUM_IRQ_PRIO_LEVELS - 1) | _ARC_V2_IRQ_PRIORITY_SECURE)
#else
#define ARC_IRQ_DEFAULT_PRIORITY (CONFIG_NUM_IRQ_PRIO_LEVELS - 1)
#endif
static inline void arc_core_intc_init_nolock(uint32_t irq, uint32_t state)
{
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, ARC_IRQ_DEFAULT_PRIORITY);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, _ARC_V2_INT_LEVEL);
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, state);
}
/*
* Initialize the core private interrupt controller.
*
* This function must be called on each CPU in case of SMP system.
*
* NOTE: core interrupts are still globally disabled at this point (STATUS32.IE = 0), so there is
* no need to protect the window between a write to IRQ_SELECT and subsequent writes to the
* selected IRQ's registers with locks.
*/
void arc_core_private_intc_init(void)
{
/*
* Interrupts from 0 to 15 are exceptions and they are ignored by IRQ auxiliary registers.
* We skip those interrupt lines while setting up core private interrupt controller.
*/
BUILD_ASSERT(CONFIG_GEN_IRQ_START_VECTOR == 16);
/*
* System with IDU case (most likely multi-core system):
* - disable private IRQs: they will be enabled with irq_enable before usage
* - enable shared (IDU) IRQs: their enabling / disabling is controlled via IDU, so we
* always pass them via core private interrupt controller.
* System without IDU case (single-core system):
* - disable all IRQs: they will be enabled with irq_enable before usage
*/
#ifdef CONFIG_ARC_CONNECT
for (uint32_t irq = CONFIG_GEN_IRQ_START_VECTOR; irq < ARC_CONNECT_IDU_IRQ_START; irq++) {
arc_core_intc_init_nolock(irq, _ARC_V2_INT_DISABLE);
}
for (uint32_t irq = ARC_CONNECT_IDU_IRQ_START; irq < CONFIG_NUM_IRQS; irq++) {
arc_core_intc_init_nolock(irq, _ARC_V2_INT_ENABLE);
}
#else
for (uint32_t irq = CONFIG_GEN_IRQ_START_VECTOR; irq < CONFIG_NUM_IRQS; irq++) {
arc_core_intc_init_nolock(irq, _ARC_V2_INT_DISABLE);
}
#endif /* CONFIG_ARC_CONNECT */
}
static int arc_irq_init(const struct device *dev)
{
#ifdef CONFIG_ARC_CONNECT
arc_shared_intc_init();
#endif /* CONFIG_ARC_CONNECT */
/*
* We initialize per-core part for core 0 here,
* for rest cores it will be initialized in slave_start.
*/
arc_core_private_intc_init();
return 0;
}
DEVICE_DT_INST_DEFINE(0, arc_irq_init, NULL, NULL, NULL,
PRE_KERNEL_1, 0, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_arcv2_irq_unit.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,241 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/init.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/printk.h>
#include <zephyr/sw_isr_table.h>
#include "intc_ite_it8xxx2.h"
LOG_MODULE_REGISTER(intc_it8xxx2_v2, LOG_LEVEL_DBG);
#define IT8XXX2_INTC_BASE DT_REG_ADDR(DT_NODELABEL(intc))
#define IT8XXX2_INTC_BASE_SHIFT(g) (IT8XXX2_INTC_BASE + ((g) << 2))
/* Interrupt status register */
#define IT8XXX2_INTC_ISR(g) ECREG(IT8XXX2_INTC_BASE_SHIFT(g) + \
((g) < 4 ? 0x0 : 0x4))
/* Interrupt enable register */
#define IT8XXX2_INTC_IER(g) ECREG(IT8XXX2_INTC_BASE_SHIFT(g) + \
((g) < 4 ? 0x1 : 0x5))
/* Interrupt edge/level triggered mode register */
#define IT8XXX2_INTC_IELMR(g) ECREG(IT8XXX2_INTC_BASE_SHIFT(g) + \
((g) < 4 ? 0x2 : 0x6))
/* Interrupt polarity register */
#define IT8XXX2_INTC_IPOLR(g) ECREG(IT8XXX2_INTC_BASE_SHIFT(g) + \
((g) < 4 ? 0x3 : 0x7))
#define IT8XXX2_INTC_GROUP_CNT 24
#define MAX_REGISR_IRQ_NUM 8
#define IVECT_OFFSET_WITH_IRQ 0x10
/* Interrupt number of INTC module */
static uint8_t intc_irq;
static uint8_t ier_setting[IT8XXX2_INTC_GROUP_CNT];
void ite_intc_save_and_disable_interrupts(void)
{
/* Disable global interrupt for critical section */
unsigned int key = irq_lock();
/* Save and disable interrupts */
for (int i = 0; i < IT8XXX2_INTC_GROUP_CNT; i++) {
ier_setting[i] = IT8XXX2_INTC_IER(i);
IT8XXX2_INTC_IER(i) = 0;
}
/*
* This load operation will guarantee the above modification of
* SOC's register can be seen by any following instructions.
* Note: Barrier instruction can not synchronize chip register,
* so we introduce workaround here.
*/
IT8XXX2_INTC_IER(IT8XXX2_INTC_GROUP_CNT - 1);
irq_unlock(key);
}
void ite_intc_restore_interrupts(void)
{
/*
* Ensure the highest priority interrupt will be the first fired
* interrupt when soc is ready to go.
*/
unsigned int key = irq_lock();
/* Restore interrupt state */
for (int i = 0; i < IT8XXX2_INTC_GROUP_CNT; i++) {
IT8XXX2_INTC_IER(i) = ier_setting[i];
}
irq_unlock(key);
}
void ite_intc_isr_clear(unsigned int irq)
{
uint32_t group, index;
if (irq > CONFIG_NUM_IRQS) {
return;
}
group = irq / MAX_REGISR_IRQ_NUM;
index = irq % MAX_REGISR_IRQ_NUM;
IT8XXX2_INTC_ISR(group) = BIT(index);
}
void __soc_ram_code ite_intc_irq_enable(unsigned int irq)
{
uint32_t group, index;
if (irq > CONFIG_NUM_IRQS) {
return;
}
group = irq / MAX_REGISR_IRQ_NUM;
index = irq % MAX_REGISR_IRQ_NUM;
/* Critical section due to run a bit-wise OR operation */
unsigned int key = irq_lock();
IT8XXX2_INTC_IER(group) |= BIT(index);
irq_unlock(key);
}
void __soc_ram_code ite_intc_irq_disable(unsigned int irq)
{
uint32_t group, index;
if (irq > CONFIG_NUM_IRQS) {
return;
}
group = irq / MAX_REGISR_IRQ_NUM;
index = irq % MAX_REGISR_IRQ_NUM;
/* Critical section due to run a bit-wise NAND operation */
unsigned int key = irq_lock();
IT8XXX2_INTC_IER(group) &= ~BIT(index);
/*
* This load operation will guarantee the above modification of
* SOC's register can be seen by any following instructions.
*/
IT8XXX2_INTC_IER(group);
irq_unlock(key);
}
void ite_intc_irq_polarity_set(unsigned int irq, unsigned int flags)
{
uint32_t group, index;
if (irq > CONFIG_NUM_IRQS) {
return;
}
if ((flags & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
return;
}
group = irq / MAX_REGISR_IRQ_NUM;
index = irq % MAX_REGISR_IRQ_NUM;
if ((flags & IRQ_TYPE_LEVEL_HIGH) || (flags & IRQ_TYPE_EDGE_RISING)) {
IT8XXX2_INTC_IPOLR(group) &= ~BIT(index);
} else {
IT8XXX2_INTC_IPOLR(group) |= BIT(index);
}
if ((flags & IRQ_TYPE_LEVEL_LOW) || (flags & IRQ_TYPE_LEVEL_HIGH)) {
IT8XXX2_INTC_IELMR(group) &= ~BIT(index);
} else {
IT8XXX2_INTC_IELMR(group) |= BIT(index);
}
}
int __soc_ram_code ite_intc_irq_is_enable(unsigned int irq)
{
uint32_t group, index;
if (irq > CONFIG_NUM_IRQS) {
return 0;
}
group = irq / MAX_REGISR_IRQ_NUM;
index = irq % MAX_REGISR_IRQ_NUM;
return IS_MASK_SET(IT8XXX2_INTC_IER(group), BIT(index));
}
uint8_t __soc_ram_code ite_intc_get_irq_num(void)
{
return intc_irq;
}
bool __soc_ram_code ite_intc_no_irq(void)
{
return (IVECT == IVECT_OFFSET_WITH_IRQ);
}
uint8_t __soc_ram_code get_irq(void *arg)
{
ARG_UNUSED(arg);
/* Wait until two equal interrupt values are read */
do {
/* Read interrupt number from interrupt vector register */
intc_irq = IVECT;
/*
* WORKAROUND: when the interrupt vector register (IVECT)
* isn't latched in a load operation, we read it again to make
* sure the value we got is the correct value.
*/
} while (intc_irq != IVECT);
/* Determine interrupt number */
intc_irq -= IVECT_OFFSET_WITH_IRQ;
/*
* Look for pending interrupt if there's interrupt number 0 from
* the AIVECT register.
*/
if (intc_irq == 0) {
uint8_t int_pending;
for (int i = (IT8XXX2_INTC_GROUP_CNT - 1); i >= 0; i--) {
int_pending =
(IT8XXX2_INTC_ISR(i) & IT8XXX2_INTC_IER(i));
if (int_pending != 0) {
intc_irq = (MAX_REGISR_IRQ_NUM * i) +
find_msb_set(int_pending) - 1;
LOG_DBG("Pending interrupt found: %d",
intc_irq);
LOG_DBG("CPU mepc: 0x%lx", csr_read(mepc));
break;
}
}
}
/* Clear interrupt status */
ite_intc_isr_clear(intc_irq);
/* Return interrupt number */
return intc_irq;
}
static void intc_irq0_handler(const void *arg)
{
ARG_UNUSED(arg);
LOG_DBG("SOC it8xxx2 Interrupt 0 handler");
}
void soc_interrupt_init(void)
{
/* Ensure interrupts of soc are disabled at default */
for (int i = 0; i < IT8XXX2_INTC_GROUP_CNT; i++) {
IT8XXX2_INTC_IER(i) = 0;
}
/*
* WORKAROUND: In the it8xxx2 chip, the interrupt for INT0 is reserved.
* However, in some stress tests, the unhandled IRQ0 issue occurs.
* To prevent the system from going directly into kernel panic, we
* implemented a workaround by registering interrupt number 0 and doing
* nothing in the IRQ0 handler. The side effect of this solution is
* that when IRQ0 is triggered, it will take some time to execute the
* routine. There is no need to worry about missing interrupts because
* each IRQ's ISR is write-clear, and if the status is not cleared, it
* will continue to trigger.
*
* NOTE: After this workaround is merged, we will then find out under
* what circumstances the situation can be reproduced and fix it, and
* then remove the workaround.
*/
IRQ_CONNECT(0, 0, intc_irq0_handler, 0, 0);
/* Enable M-mode external interrupt */
csr_set(mie, MIP_MEIP);
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_ite_it8xxx2_v2.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,977 |
```c
/*
* Systems, Inc.
*
*/
#define DT_DRV_COMPAT intel_ioapic
/**
* @file
* @brief Intel IO APIC/xAPIC driver
*
* This module is a driver for the IO APIC/xAPIC (Advanced Programmable
* Interrupt Controller) for P6 (PentiumPro, II, III) family processors
* and P7 (Pentium4) family processors. The IO APIC/xAPIC is included
* in the Intel's system chip set, such as ICH2. Software intervention
* may be required to enable the IO APIC/xAPIC in some chip sets.
* The 8259A interrupt controller is intended for use in a uni-processor
* system, IO APIC can be used in either a uni-processor or multi-processor
* system. The IO APIC handles interrupts very differently than the 8259A.
* Briefly, these differences are:
* - Method of Interrupt Transmission. The IO APIC transmits interrupts
* through a 3-wire bus and interrupts are handled without the need for
* the processor to run an interrupt acknowledge cycle.
* - Interrupt Priority. The priority of interrupts in the IO APIC is
* independent of the interrupt number. For example, interrupt 10 can
* be given a higher priority than interrupt 3.
* - More Interrupts. The IO APIC supports a total of 24 interrupts.
*
* The IO APIC unit consists of a set of interrupt input signals, a 24-entry
* by 64-bit Interrupt Redirection Table, programmable registers, and a message
* unit for sending and receiving APIC messages over the APIC bus or the
* Front-Side (system) bus. IO devices inject interrupts into the system by
* asserting one of the interrupt lines to the IO APIC. The IO APIC selects the
* corresponding entry in the Redirection Table and uses the information in that
* entry to format an interrupt request message. Each entry in the Redirection
* Table can be individually programmed to indicate edge/level sensitive interrupt
* signals, the interrupt vector and priority, the destination processor, and how
* the processor is selected (statically and dynamically). The information in
* the table is used to transmit a message to other APIC units (via the APIC bus
* or the Front-Side (system) bus). IO APIC is used in the Symmetric IO Mode.
* The base address of IO APIC is determined in loapic_init() and stored in the
* global variable ioApicBase and ioApicData.
* The lower 32 bit value of the redirection table entries for IRQ 0
* to 15 are edge triggered positive high, and for IRQ 16 to 23 are level
* triggered positive low.
*
* This implementation doesn't support multiple IO APICs.
*
* INCLUDE FILES: ioapic.h loapic.h
*
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/device.h>
#include <zephyr/pm/device.h>
#include <string.h>
#include <zephyr/drivers/interrupt_controller/ioapic.h> /* public API declarations */
#include <zephyr/drivers/interrupt_controller/loapic.h> /* public API declarations and registers */
#include "intc_ioapic_priv.h"
DEVICE_MMIO_TOPLEVEL_STATIC(ioapic_regs, DT_DRV_INST(0));
#define IOAPIC_REG DEVICE_MMIO_TOPLEVEL_GET(ioapic_regs)
/*
* Destination field (bits[56:63]) defines a set of processors, which is
* used to be compared with local LDR to determine which local APICs accept
* the interrupt.
*
* XAPIC: in logical destination mode and flat model (determined by DFR).
* LDR bits[24:31] can accommodate up to 8 logical APIC IDs.
*
* X2APIC: in logical destination mode and cluster model.
* In this case, LDR is read-only to system software and supports up to 16
* logical IDs. (Cluster ID: don't care to IO APIC).
*
* In either case, regardless how many CPUs in the system, 0xff implies that
* it's intended to deliver to all possible 8 local APICs.
*/
#define DEFAULT_RTE_DEST (0xFF << 24)
static __pinned_bss uint32_t ioapic_rtes;
#ifdef CONFIG_PM_DEVICE
#define BITS_PER_IRQ 4
#define IOAPIC_BITFIELD_HI_LO 0
#define IOAPIC_BITFIELD_LVL_EDGE 1
#define IOAPIC_BITFIELD_ENBL_DSBL 2
#define IOAPIC_BITFIELD_DELIV_MODE 3
#define BIT_POS_FOR_IRQ_OPTION(irq, option) ((irq) * BITS_PER_IRQ + (option))
/* Allocating up to 256 irq bits bufffer for RTEs, RTEs are dynamically found
* so let's just assume the maximum, it's only 128 bytes in total.
*/
#define SUSPEND_BITS_REQD (ROUND_UP((256 * BITS_PER_IRQ), 32))
__pinned_bss
uint32_t ioapic_suspend_buf[SUSPEND_BITS_REQD / 32] = {0};
#endif
static uint32_t __IoApicGet(int32_t offset);
static void __IoApicSet(int32_t offset, uint32_t value);
static void ioApicRedSetHi(unsigned int irq, uint32_t upper32);
static void ioApicRedSetLo(unsigned int irq, uint32_t lower32);
static uint32_t ioApicRedGetLo(unsigned int irq);
static void IoApicRedUpdateLo(unsigned int irq, uint32_t value,
uint32_t mask);
#if defined(CONFIG_INTEL_VTD_ICTL) && \
!defined(CONFIG_INTEL_VTD_ICTL_XAPIC_PASSTHROUGH)
#include <zephyr/drivers/interrupt_controller/intel_vtd.h>
#include <zephyr/acpi/acpi.h>
static const struct device *const vtd =
DEVICE_DT_GET_OR_NULL(DT_INST(0, intel_vt_d));
static uint16_t ioapic_id;
static bool get_vtd(void)
{
if (!device_is_ready(vtd)) {
return false;
}
if (ioapic_id != 0) {
return true;
}
return acpi_dmar_ioapic_get(&ioapic_id) == 0;
}
#endif /* CONFIG_INTEL_VTD_ICTL && !INTEL_VTD_ICTL_XAPIC_PASSTHROUGH */
/*
* The functions irq_enable() and irq_disable() are implemented in the
* interrupt controller driver due to the IRQ virtualization imposed by
* the x86 architecture.
*/
/**
* @brief Initialize the IO APIC or xAPIC
*
* This routine initializes the IO APIC or xAPIC.
*
* @retval 0 on success.
*/
__boot_func
int ioapic_init(const struct device *unused)
{
ARG_UNUSED(unused);
DEVICE_MMIO_TOPLEVEL_MAP(ioapic_regs, K_MEM_CACHE_NONE);
/* Reading MRE: this will give the number of RTEs available */
ioapic_rtes = ((__IoApicGet(IOAPIC_VERS) &
IOAPIC_MRE_MASK) >> IOAPIC_MRE_POS) + 1;
#ifdef CONFIG_IOAPIC_MASK_RTE
int32_t ix; /* redirection table index */
uint32_t rteValue; /* value to copy into redirection table entry */
rteValue = IOAPIC_EDGE | IOAPIC_HIGH | IOAPIC_FIXED | IOAPIC_INT_MASK |
IOAPIC_LOGICAL | 0 /* dummy vector */;
for (ix = 0; ix < ioapic_rtes; ix++) {
ioApicRedSetHi(ix, DEFAULT_RTE_DEST);
ioApicRedSetLo(ix, rteValue);
}
#endif
return 0;
}
__pinned_func
uint32_t z_ioapic_num_rtes(void)
{
return ioapic_rtes;
}
/**
* @brief Enable a specified APIC interrupt input line
*
* This routine enables a specified APIC interrupt input line.
*
* @param irq IRQ number to enable
*/
__pinned_func
void z_ioapic_irq_enable(unsigned int irq)
{
IoApicRedUpdateLo(irq, 0, IOAPIC_INT_MASK);
}
/**
* @brief Disable a specified APIC interrupt input line
*
* This routine disables a specified APIC interrupt input line.
* @param irq IRQ number to disable
*/
__pinned_func
void z_ioapic_irq_disable(unsigned int irq)
{
IoApicRedUpdateLo(irq, IOAPIC_INT_MASK, IOAPIC_INT_MASK);
}
#ifdef CONFIG_PM_DEVICE
__pinned_func
void store_flags(unsigned int irq, uint32_t flags)
{
/* Currently only the following four flags are modified */
if (flags & IOAPIC_LOW) {
sys_bitfield_set_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_HI_LO));
}
if (flags & IOAPIC_LEVEL) {
sys_bitfield_set_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_LVL_EDGE));
}
if (flags & IOAPIC_INT_MASK) {
sys_bitfield_set_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_ENBL_DSBL));
}
/*
* We support lowest priority and fixed mode only, so only one bit
* needs to be saved.
*/
if (flags & IOAPIC_LOWEST) {
sys_bitfield_set_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_DELIV_MODE));
}
}
__pinned_func
uint32_t restore_flags(unsigned int irq)
{
uint32_t flags = 0U;
if (sys_bitfield_test_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_HI_LO))) {
flags |= IOAPIC_LOW;
}
if (sys_bitfield_test_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_LVL_EDGE))) {
flags |= IOAPIC_LEVEL;
}
if (sys_bitfield_test_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_ENBL_DSBL))) {
flags |= IOAPIC_INT_MASK;
}
if (sys_bitfield_test_bit((mem_addr_t) ioapic_suspend_buf,
BIT_POS_FOR_IRQ_OPTION(irq, IOAPIC_BITFIELD_DELIV_MODE))) {
flags |= IOAPIC_LOWEST;
}
return flags;
}
__pinned_func
int ioapic_suspend(const struct device *port)
{
int irq;
uint32_t rte_lo;
ARG_UNUSED(port);
(void)memset(ioapic_suspend_buf, 0, (SUSPEND_BITS_REQD >> 3));
for (irq = 0; irq < ioapic_rtes; irq++) {
/*
* The following check is to figure out the registered
* IRQ lines, so as to limit ourselves to saving the
* flags for them only.
*/
if (_irq_to_interrupt_vector[irq]) {
rte_lo = ioApicRedGetLo(irq);
store_flags(irq, rte_lo);
}
}
return 0;
}
__pinned_func
int ioapic_resume_from_suspend(const struct device *port)
{
int irq;
uint32_t flags;
uint32_t rteValue;
ARG_UNUSED(port);
for (irq = 0; irq < ioapic_rtes; irq++) {
if (_irq_to_interrupt_vector[irq]) {
/* Get the saved flags */
flags = restore_flags(irq);
/* Appending the flags that are never modified */
flags = flags | IOAPIC_LOGICAL;
rteValue = (_irq_to_interrupt_vector[irq] &
IOAPIC_VEC_MASK) | flags;
} else {
/* Initialize the other RTEs to sane values */
rteValue = IOAPIC_EDGE | IOAPIC_HIGH |
IOAPIC_FIXED | IOAPIC_INT_MASK |
IOAPIC_LOGICAL | 0 ; /* dummy vector*/
}
ioApicRedSetHi(irq, DEFAULT_RTE_DEST);
ioApicRedSetLo(irq, rteValue);
}
return 0;
}
/*
* Implements the driver control management functionality
* the *context may include IN data or/and OUT data
*/
__pinned_func
static int ioapic_pm_action(const struct device *dev,
enum pm_device_action action)
{
int ret = 0;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
ret = ioapic_resume_from_suspend(dev);
break;
case PM_DEVICE_ACTION_SUSPEND:
ret = ioapic_suspend(dev);
break;
default:
ret = -ENOTSUP;
}
return ret;
}
#endif /*CONFIG_PM_DEVICE*/
/**
* @brief Programs the interrupt redirection table
*
* This routine sets up the redirection table entry for the specified IRQ
* @param irq Virtualized IRQ
* @param vector Vector number
* @param flags Interrupt flags
*/
__boot_func
void z_ioapic_irq_set(unsigned int irq, unsigned int vector, uint32_t flags)
{
uint32_t rteValue; /* value to copy into redirection table entry */
#if defined(CONFIG_INTEL_VTD_ICTL) && \
!defined(CONFIG_INTEL_VTD_ICTL_XAPIC_PASSTHROUGH)
int irte_idx;
if (!get_vtd()) {
goto no_vtd;
}
irte_idx = vtd_get_irte_by_vector(vtd, vector);
if (irte_idx < 0) {
irte_idx = vtd_get_irte_by_irq(vtd, irq);
}
if (irte_idx >= 0 && !vtd_irte_is_msi(vtd, irte_idx)) {
/* Enable interrupt remapping format and set the irte index */
rteValue = IOAPIC_VTD_REMAP_FORMAT |
IOAPIC_VTD_INDEX(irte_idx);
ioApicRedSetHi(irq, rteValue);
/* Remapped: delivery mode is Fixed (000) and
* destination mode is no longer present as it is replaced by
* the 15th bit of irte index, which is always 0 in our case.
*/
rteValue = IOAPIC_INT_MASK |
(vector & IOAPIC_VEC_MASK) |
(flags & IOAPIC_TRIGGER_MASK) |
(flags & IOAPIC_POLARITY_MASK);
ioApicRedSetLo(irq, rteValue);
vtd_remap(vtd, irte_idx, vector, flags, ioapic_id);
} else {
no_vtd:
#else
{
#endif /* CONFIG_INTEL_VTD_ICTL && !CONFIG_INTEL_VTD_ICTL_XAPIC_PASSTHROUGH */
/* the delivery mode is determined by the flags
* passed from drivers
*/
rteValue = IOAPIC_INT_MASK | IOAPIC_LOGICAL |
(vector & IOAPIC_VEC_MASK) | flags;
ioApicRedSetHi(irq, DEFAULT_RTE_DEST);
ioApicRedSetLo(irq, rteValue);
}
}
/**
* @brief Program interrupt vector for specified irq
*
* The routine writes the interrupt vector in the Interrupt Redirection
* Table for specified irq number
*
* @param irq Interrupt number
* @param vector Vector number
*/
__boot_func
void z_ioapic_int_vec_set(unsigned int irq, unsigned int vector)
{
IoApicRedUpdateLo(irq, vector, IOAPIC_VEC_MASK);
}
/**
* @brief Read a 32 bit IO APIC register
*
* This routine reads the specified IO APIC register using indirect addressing.
* @param offset Register offset (8 bits)
*
* @return register value
*/
__pinned_func
static uint32_t __IoApicGet(int32_t offset)
{
uint32_t value; /* value */
unsigned int key; /* interrupt lock level */
/* lock interrupts to ensure indirect addressing works "atomically" */
key = irq_lock();
*((volatile uint32_t *) (IOAPIC_REG + IOAPIC_IND)) = (unsigned char)offset;
value = *((volatile uint32_t *)(IOAPIC_REG + IOAPIC_DATA));
irq_unlock(key);
return value;
}
/**
* @brief Write a 32 bit IO APIC register
*
* This routine writes the specified IO APIC register using indirect addressing.
*
* @param offset Register offset (8 bits)
* @param value Value to set the register
*/
__pinned_func
static void __IoApicSet(int32_t offset, uint32_t value)
{
unsigned int key; /* interrupt lock level */
/* lock interrupts to ensure indirect addressing works "atomically" */
key = irq_lock();
*(volatile uint32_t *)(IOAPIC_REG + IOAPIC_IND) = (unsigned char)offset;
*((volatile uint32_t *)(IOAPIC_REG + IOAPIC_DATA)) = value;
irq_unlock(key);
}
/**
* @brief Get low 32 bits of Redirection Table entry
*
* This routine reads the low-order 32 bits of a Redirection Table entry.
*
* @param irq INTIN number
* @return 32 low-order bits
*/
__pinned_func
static uint32_t ioApicRedGetLo(unsigned int irq)
{
int32_t offset = IOAPIC_REDTBL + (irq << 1); /* register offset */
return __IoApicGet(offset);
}
/**
* @brief Set low 32 bits of Redirection Table entry
*
* This routine writes the low-order 32 bits of a Redirection Table entry.
*
* @param irq INTIN number
* @param lower32 Value to be written
*/
__pinned_func
static void ioApicRedSetLo(unsigned int irq, uint32_t lower32)
{
int32_t offset = IOAPIC_REDTBL + (irq << 1); /* register offset */
__IoApicSet(offset, lower32);
}
/**
* @brief Set high 32 bits of Redirection Table entry
*
* This routine writes the high-order 32 bits of a Redirection Table entry.
*
* @param irq INTIN number
* @param upper32 Value to be written
*/
__pinned_func
static void ioApicRedSetHi(unsigned int irq, uint32_t upper32)
{
int32_t offset = IOAPIC_REDTBL + (irq << 1) + 1; /* register offset */
__IoApicSet(offset, upper32);
}
/**
* @brief Modify low 32 bits of Redirection Table entry
*
* This routine modifies selected portions of the low-order 32 bits of a
* Redirection Table entry, as indicated by the associate bit mask.
*
* @param irq INTIN number
* @param value Value to be written
* @param mask Mask of bits to be modified
*/
__pinned_func
static void IoApicRedUpdateLo(unsigned int irq,
uint32_t value,
uint32_t mask)
{
ioApicRedSetLo(irq, (ioApicRedGetLo(irq) & ~mask) | (value & mask));
}
PM_DEVICE_DT_INST_DEFINE(0, ioapic_pm_action);
DEVICE_DT_INST_DEFINE(0, ioapic_init, PM_DEVICE_DT_INST_GET(0), NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_ioapic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,312 |
```c
/*
*/
#define DT_DRV_COMPAT intel_loapic
/*
* driver for x86 CPU local APIC (as an interrupt controller)
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/pm/device.h>
#include <zephyr/types.h>
#include <string.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/arch/x86/msr.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/drivers/interrupt_controller/loapic.h> /* public API declarations */
#include <zephyr/device.h>
#include <zephyr/drivers/interrupt_controller/sysapic.h>
#include <zephyr/drivers/interrupt_controller/ioapic.h>
/* Local APIC Version Register Bits */
#define LOAPIC_VERSION_MASK 0x000000ff /* LO APIC Version mask */
#define LOAPIC_MAXLVT_MASK 0x00ff0000 /* LO APIC Max LVT mask */
#define LOAPIC_PENTIUM4 0x00000014 /* LO APIC in Pentium4 */
#define LOAPIC_LVT_PENTIUM4 5 /* LO APIC LVT - Pentium4 */
#define LOAPIC_LVT_P6 4 /* LO APIC LVT - P6 */
#define LOAPIC_LVT_P5 3 /* LO APIC LVT - P5 */
/* Local APIC Vector Table Bits */
#define LOAPIC_VECTOR 0x000000ff /* vectorNo */
#define LOAPIC_MODE 0x00000700 /* delivery mode */
#define LOAPIC_FIXED 0x00000000 /* delivery mode: FIXED */
#define LOAPIC_SMI 0x00000200 /* delivery mode: SMI */
#define LOAPIC_NMI 0x00000400 /* delivery mode: NMI */
#define LOAPIC_EXT 0x00000700 /* delivery mode: ExtINT */
#define LOAPIC_IDLE 0x00000000 /* delivery status: Idle */
#define LOAPIC_PEND 0x00001000 /* delivery status: Pend */
#define LOAPIC_HIGH 0x00000000 /* polarity: High */
#define LOAPIC_LOW 0x00002000 /* polarity: Low */
#define LOAPIC_REMOTE 0x00004000 /* remote IRR */
#define LOAPIC_EDGE 0x00000000 /* trigger mode: Edge */
#define LOAPIC_LEVEL 0x00008000 /* trigger mode: Level */
/* Local APIC Spurious-Interrupt Register Bits */
#define LOAPIC_ENABLE 0x100 /* APIC Enabled */
#define LOAPIC_FOCUS_DISABLE 0x200 /* Focus Processor Checking */
#if CONFIG_LOAPIC_SPURIOUS_VECTOR_ID == -1
#define LOAPIC_SPURIOUS_VECTOR_ID (CONFIG_IDT_NUM_VECTORS - 1)
#else
#define LOAPIC_SPURIOUS_VECTOR_ID CONFIG_LOAPIC_SPURIOUS_VECTOR_ID
#endif
#define LOAPIC_SSPND_BITS_PER_IRQ 1 /* Just the one for enable disable*/
#define LOAPIC_SUSPEND_BITS_REQD (ROUND_UP((LOAPIC_IRQ_COUNT * LOAPIC_SSPND_BITS_PER_IRQ), 32))
#ifdef CONFIG_PM_DEVICE
__pinned_bss
uint32_t loapic_suspend_buf[LOAPIC_SUSPEND_BITS_REQD / 32] = {0};
#endif
DEVICE_MMIO_TOPLEVEL(LOAPIC_REGS_STR, DT_DRV_INST(0));
__pinned_func
void send_eoi(void)
{
x86_write_xapic(LOAPIC_EOI, 0);
}
/**
* @brief Enable and initialize the local APIC.
*
* Called from early assembly layer (e.g., crt0.S).
*/
__pinned_func
void z_loapic_enable(unsigned char cpu_number)
{
int32_t loApicMaxLvt; /* local APIC Max LVT */
DEVICE_MMIO_TOPLEVEL_MAP(LOAPIC_REGS_STR, K_MEM_CACHE_NONE);
#ifndef CONFIG_X2APIC
/*
* in xAPIC and flat model, bits 24-31 in LDR (Logical APIC ID) are
* bitmap of target logical APIC ID and it supports maximum 8 local
* APICs.
*
* The logical APIC ID could be arbitrarily selected by system software
* and is different from local APIC ID in local APIC ID register.
*
* We choose 0 for BSP, and the index to x86_cpuboot[] for secondary
* CPUs.
*
* in X2APIC, LDR is read-only.
*/
x86_write_xapic(LOAPIC_LDR, 1 << (cpu_number + 24));
#endif
/*
* enable the local APIC. note that we use xAPIC mode here, since
* x2APIC access is not enabled until the next step (if at all).
*/
x86_write_xapic(LOAPIC_SVR,
x86_read_xapic(LOAPIC_SVR) | LOAPIC_ENABLE);
#ifdef CONFIG_X2APIC
/*
* turn on x2APIC mode. we trust the config option, so
* we don't check CPUID to see if x2APIC is supported.
*/
uint64_t msr = z_x86_msr_read(X86_APIC_BASE_MSR);
msr |= X86_APIC_BASE_MSR_X2APIC;
z_x86_msr_write(X86_APIC_BASE_MSR, msr);
#endif
loApicMaxLvt = (x86_read_loapic(LOAPIC_VER) & LOAPIC_MAXLVT_MASK) >> 16;
/* reset the DFR, TPR, TIMER_CONFIG, and TIMER_ICR */
#ifndef CONFIG_X2APIC
/* Flat model */
x86_write_loapic(LOAPIC_DFR, 0xffffffff); /* no DFR in x2APIC mode */
#endif
x86_write_loapic(LOAPIC_TPR, 0x0);
x86_write_loapic(LOAPIC_TIMER_CONFIG, 0x0);
x86_write_loapic(LOAPIC_TIMER_ICR, 0x0);
/* program Local Vector Table for the Virtual Wire Mode */
/* set LINT0: extInt, high-polarity, edge-trigger, not-masked */
x86_write_loapic(LOAPIC_LINT0, (x86_read_loapic(LOAPIC_LINT0) &
~(LOAPIC_MODE | LOAPIC_LOW |
LOAPIC_LEVEL | LOAPIC_LVT_MASKED)) |
(LOAPIC_EXT | LOAPIC_HIGH | LOAPIC_EDGE));
/* set LINT1: NMI, high-polarity, edge-trigger, not-masked */
x86_write_loapic(LOAPIC_LINT1, (x86_read_loapic(LOAPIC_LINT1) &
~(LOAPIC_MODE | LOAPIC_LOW |
LOAPIC_LEVEL | LOAPIC_LVT_MASKED)) |
(LOAPIC_NMI | LOAPIC_HIGH | LOAPIC_EDGE));
/* lock the Local APIC interrupts */
x86_write_loapic(LOAPIC_TIMER, LOAPIC_LVT_MASKED);
x86_write_loapic(LOAPIC_ERROR, LOAPIC_LVT_MASKED);
if (loApicMaxLvt >= LOAPIC_LVT_P6) {
x86_write_loapic(LOAPIC_PMC, LOAPIC_LVT_MASKED);
}
if (loApicMaxLvt >= LOAPIC_LVT_PENTIUM4) {
x86_write_loapic(LOAPIC_THERMAL, LOAPIC_LVT_MASKED);
}
#if CONFIG_LOAPIC_SPURIOUS_VECTOR
x86_write_loapic(LOAPIC_SVR, (x86_read_loapic(LOAPIC_SVR) & 0xFFFFFF00) |
(LOAPIC_SPURIOUS_VECTOR_ID & 0xFF));
#endif
/* discard a pending interrupt if any */
x86_write_loapic(LOAPIC_EOI, 0);
}
/**
* @brief Dummy initialization function.
*
* The local APIC is initialized via z_loapic_enable() long before the
* kernel runs through its device initializations, so this is unneeded.
*/
__boot_func
static int loapic_init(const struct device *unused)
{
ARG_UNUSED(unused);
return 0;
}
__pinned_func
uint32_t z_loapic_irq_base(void)
{
return z_ioapic_num_rtes();
}
/**
* @brief Set the vector field in the specified RTE
*
* This associates an IRQ with the desired vector in the IDT.
*/
__pinned_func
void z_loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
unsigned int vector /* vector to copy into the LVT */
)
{
unsigned int oldLevel; /* previous interrupt lock level */
/*
* The following mappings are used:
*
* IRQ0 -> LOAPIC_TIMER
* IRQ1 -> LOAPIC_THERMAL
* IRQ2 -> LOAPIC_PMC
* IRQ3 -> LOAPIC_LINT0
* IRQ4 -> LOAPIC_LINT1
* IRQ5 -> LOAPIC_ERROR
*
* It's assumed that LVTs are spaced by 0x10 bytes
*/
/* update the 'vector' bits in the LVT */
oldLevel = irq_lock();
x86_write_loapic(LOAPIC_TIMER + (irq * 0x10),
(x86_read_loapic(LOAPIC_TIMER + (irq * 0x10)) &
~LOAPIC_VECTOR) | vector);
irq_unlock(oldLevel);
}
/**
* @brief Enable an individual LOAPIC interrupt (IRQ)
*
* @param irq the IRQ number of the interrupt
*
* This routine clears the interrupt mask bit in the LVT for the specified IRQ
*/
__pinned_func
void z_loapic_irq_enable(unsigned int irq)
{
unsigned int oldLevel; /* previous interrupt lock level */
/*
* See the comments in _LoApicLvtVecSet() regarding IRQ to LVT mappings
* and ths assumption concerning LVT spacing.
*/
/* clear the mask bit in the LVT */
oldLevel = irq_lock();
x86_write_loapic(LOAPIC_TIMER + (irq * 0x10),
x86_read_loapic(LOAPIC_TIMER + (irq * 0x10)) &
~LOAPIC_LVT_MASKED);
irq_unlock(oldLevel);
}
/**
* @brief Disable an individual LOAPIC interrupt (IRQ)
*
* @param irq the IRQ number of the interrupt
*
* This routine clears the interrupt mask bit in the LVT for the specified IRQ
*/
__pinned_func
void z_loapic_irq_disable(unsigned int irq)
{
unsigned int oldLevel; /* previous interrupt lock level */
/*
* See the comments in _LoApicLvtVecSet() regarding IRQ to LVT mappings
* and ths assumption concerning LVT spacing.
*/
/* set the mask bit in the LVT */
oldLevel = irq_lock();
x86_write_loapic(LOAPIC_TIMER + (irq * 0x10),
x86_read_loapic(LOAPIC_TIMER + (irq * 0x10)) |
LOAPIC_LVT_MASKED);
irq_unlock(oldLevel);
}
/**
* @brief Find the currently executing interrupt vector, if any
*
* This routine finds the vector of the interrupt that is being processed.
* The ISR (In-Service Register) register contain the vectors of the interrupts
* in service. And the higher vector is the identification of the interrupt
* being currently processed.
*
* This function must be called with interrupts locked in interrupt context.
*
* ISR registers' offsets:
* --------------------
* | Offset | bits |
* --------------------
* | 0100H | 0:31 |
* | 0110H | 32:63 |
* | 0120H | 64:95 |
* | 0130H | 96:127 |
* | 0140H | 128:159 |
* | 0150H | 160:191 |
* | 0160H | 192:223 |
* | 0170H | 224:255 |
* --------------------
*
* @return The vector of the interrupt that is currently being processed, or -1
* if no IRQ is being serviced.
*/
__pinned_func
int z_irq_controller_isr_vector_get(void)
{
int pReg, block;
/* Block 0 bits never lit up as these are all exception or reserved
* vectors
*/
for (block = 7; likely(block > 0); block--) {
pReg = x86_read_loapic(LOAPIC_ISR + (block * 0x10));
if (pReg != 0) {
return (block * 32) + (find_msb_set(pReg) - 1);
}
}
return -1;
}
#ifdef CONFIG_PM_DEVICE
__pinned_func
static int loapic_suspend(const struct device *port)
{
volatile uint32_t lvt; /* local vector table entry value */
int loapic_irq;
ARG_UNUSED(port);
(void)memset(loapic_suspend_buf, 0, (LOAPIC_SUSPEND_BITS_REQD >> 3));
for (loapic_irq = 0; loapic_irq < LOAPIC_IRQ_COUNT; loapic_irq++) {
if (_irq_to_interrupt_vector[z_loapic_irq_base() + loapic_irq]) {
/* Since vector numbers are already present in RAM/ROM,
* We save only the mask bits here.
*/
lvt = x86_read_loapic(LOAPIC_TIMER + (loapic_irq * 0x10));
if ((lvt & LOAPIC_LVT_MASKED) == 0U) {
sys_bitfield_set_bit((mem_addr_t)loapic_suspend_buf,
loapic_irq);
}
}
}
return 0;
}
__pinned_func
int loapic_resume(const struct device *port)
{
int loapic_irq;
ARG_UNUSED(port);
/* Assuming all loapic device registers lose their state, the call to
* z_loapic_init(), should bring all the registers to a sane state.
*/
loapic_init(NULL);
for (loapic_irq = 0; loapic_irq < LOAPIC_IRQ_COUNT; loapic_irq++) {
if (_irq_to_interrupt_vector[z_loapic_irq_base() + loapic_irq]) {
/* Configure vector and enable the required ones*/
z_loapic_int_vec_set(loapic_irq,
_irq_to_interrupt_vector[z_loapic_irq_base() +
loapic_irq]);
if (sys_bitfield_test_bit((mem_addr_t) loapic_suspend_buf,
loapic_irq)) {
z_loapic_irq_enable(loapic_irq);
}
}
}
return 0;
}
/*
* Implements the driver control management functionality
* the *context may include IN data or/and OUT data
*/
__pinned_func
static int loapic_pm_action(const struct device *dev,
enum pm_device_action action)
{
int ret = 0;
switch (action) {
case PM_DEVICE_ACTION_SUSPEND:
ret = loapic_suspend(dev);
break;
case PM_DEVICE_ACTION_RESUME:
ret = loapic_resume(dev);
break;
default:
return -ENOTSUP;
}
return ret;
}
#endif /* CONFIG_PM_DEVICE */
PM_DEVICE_DT_INST_DEFINE(0, loapic_pm_action);
DEVICE_DT_INST_DEFINE(0, loapic_init, PM_DEVICE_DT_INST_GET(0), NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
#if CONFIG_LOAPIC_SPURIOUS_VECTOR
extern void z_loapic_spurious_handler(void);
NANO_CPU_INT_REGISTER(z_loapic_spurious_handler, NANO_SOFT_IRQ,
LOAPIC_SPURIOUS_VECTOR_ID >> 4,
LOAPIC_SPURIOUS_VECTOR_ID, 0);
#endif
``` | /content/code_sandbox/drivers/interrupt_controller/intc_loapic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,593 |
```unknown
/*
*/
#include <zephyr/toolchain.h>
/* Exports */
GTEXT(__soc_handle_irq)
/*
* No need to clear anything, pending bit is cleared by HW.
*/
SECTION_FUNC(exception.other, __soc_handle_irq)
ret
``` | /content/code_sandbox/drivers/interrupt_controller/intc_nrfx_clic.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 51 |
```unknown
# STM32 EXTI configuration
if SOC_FAMILY_STM32
config EXTI_STM32
bool "External Interrupt/Event Controller (EXTI) Driver for STM32 family of MCUs"
default y
depends on DT_HAS_ST_STM32_EXTI_ENABLED
help
Enable EXTI driver for STM32 line of MCUs
endif # SOC_FAMILY_STM32
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.stm32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 78 |
```unknown
menuconfig LOAPIC
bool "LOAPIC"
depends on X86
help
This option selects local APIC as the interrupt controller.
if LOAPIC
config X2APIC
bool "Access local APIC in x2APIC mode"
help
If your local APIC supports x2APIC mode, turn this on.
config LOAPIC_SPURIOUS_VECTOR
bool "Handle LOAPIC spurious interrupts"
help
A special situation may occur when a processor raises its task
priority to be greater than or equal to the level of the
interrupt for which the processor INTR signal is currently being
asserted. If at the time the INTA cycle is issued, the
interrupt that was to be dispensed has become masked (programmed
by software), the local APIC will deliver a spurious-interrupt
vector. Dispensing the spurious-interrupt vector does not affect
the ISR, so the handler for this vector should return without an EOI.
From x86 manual Volume 3 Section 10.9.
config LOAPIC_SPURIOUS_VECTOR_ID
int "LOAPIC spurious vector ID"
default -1
depends on LOAPIC_SPURIOUS_VECTOR
help
IDT vector to use for spurious LOAPIC interrupts. Note that some
arches (P6, Pentium) ignore the low 4 bits and fix them at 0xF.
If this value is left at -1 the last entry in the IDT will be used.
config IOAPIC
bool "IO-APIC"
default y
depends on DT_HAS_INTEL_IOAPIC_ENABLED
help
This option signifies that the target has an IO-APIC device. This
capability allows IO-APIC-dependent code to be included.
config IOAPIC_MASK_RTE
bool "Mask out RTE entries on boot"
default y
depends on IOAPIC
help
At boot, mask all IOAPIC RTEs if they may be in an undefined state.
You don't need this if the RTEs are either all guaranteed to be masked
when the OS starts up, or a previous boot stage has done some IOAPIC
configuration that needs to be preserved.
endif # LOAPIC
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.loapic | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 489 |
```c
/*
*
*/
#define DT_DRV_COMPAT snps_designware_intc
/* This implementation supports only the regular irqs
* No support for priority filtering
* No support for vectored interrupts
* Firqs are also not supported
* This implementation works only when sw_isr_table is enabled in zephyr
*/
#include <zephyr/device.h>
#include <zephyr/devicetree/interrupt_controller.h>
#include <zephyr/irq_nextlevel.h>
#include <zephyr/sw_isr_table.h>
#include "intc_dw.h"
#include <soc.h>
#include <zephyr/irq.h>
static ALWAYS_INLINE void dw_ictl_dispatch_child_isrs(uint32_t intr_status,
uint32_t isr_base_offset)
{
uint32_t intr_bitpos, intr_offset;
/* Dispatch lower level ISRs depending upon the bit set */
while (intr_status) {
intr_bitpos = find_lsb_set(intr_status) - 1;
intr_status &= ~(1 << intr_bitpos);
intr_offset = isr_base_offset + intr_bitpos;
_sw_isr_table[intr_offset].isr(
_sw_isr_table[intr_offset].arg);
}
}
static int dw_ictl_initialize(const struct device *dev)
{
const struct dw_ictl_config *config = dev->config;
volatile struct dw_ictl_registers * const regs =
(struct dw_ictl_registers *)config->base_addr;
/* disable all interrupts */
regs->irq_inten_l = 0U;
regs->irq_inten_h = 0U;
return 0;
}
static void dw_ictl_isr(const struct device *dev)
{
const struct dw_ictl_config *config = dev->config;
volatile struct dw_ictl_registers * const regs =
(struct dw_ictl_registers *)config->base_addr;
dw_ictl_dispatch_child_isrs(regs->irq_finalstatus_l,
config->isr_table_offset);
if (config->numirqs > 32) {
dw_ictl_dispatch_child_isrs(regs->irq_finalstatus_h,
config->isr_table_offset + 32);
}
}
static inline void dw_ictl_intr_enable(const struct device *dev,
unsigned int irq)
{
const struct dw_ictl_config *config = dev->config;
volatile struct dw_ictl_registers * const regs =
(struct dw_ictl_registers *)config->base_addr;
if (irq < 32) {
regs->irq_inten_l |= (1 << irq);
} else {
regs->irq_inten_h |= (1 << (irq - 32));
}
}
static inline void dw_ictl_intr_disable(const struct device *dev,
unsigned int irq)
{
const struct dw_ictl_config *config = dev->config;
volatile struct dw_ictl_registers * const regs =
(struct dw_ictl_registers *)config->base_addr;
if (irq < 32) {
regs->irq_inten_l &= ~(1 << irq);
} else {
regs->irq_inten_h &= ~(1 << (irq - 32));
}
}
static inline unsigned int dw_ictl_intr_get_state(const struct device *dev)
{
const struct dw_ictl_config *config = dev->config;
volatile struct dw_ictl_registers * const regs =
(struct dw_ictl_registers *)config->base_addr;
if (regs->irq_inten_l) {
return 1;
}
if (config->numirqs > 32) {
if (regs->irq_inten_h) {
return 1;
}
}
return 0;
}
static int dw_ictl_intr_get_line_state(const struct device *dev,
unsigned int irq)
{
const struct dw_ictl_config *config = dev->config;
volatile struct dw_ictl_registers * const regs =
(struct dw_ictl_registers *)config->base_addr;
if (config->numirqs > 32) {
if ((regs->irq_inten_h & BIT(irq - 32)) != 0) {
return 1;
}
} else {
if ((regs->irq_inten_l & BIT(irq)) != 0) {
return 1;
}
}
return 0;
}
static void dw_ictl_config_irq(const struct device *dev);
static const struct dw_ictl_config dw_config = {
.base_addr = DT_INST_REG_ADDR(0),
.numirqs = DT_INST_PROP(0, num_irqs),
.isr_table_offset = CONFIG_DW_ISR_TBL_OFFSET,
.config_func = dw_ictl_config_irq,
};
static const struct irq_next_level_api dw_ictl_apis = {
.intr_enable = dw_ictl_intr_enable,
.intr_disable = dw_ictl_intr_disable,
.intr_get_state = dw_ictl_intr_get_state,
.intr_get_line_state = dw_ictl_intr_get_line_state,
};
DEVICE_DT_INST_DEFINE(0, dw_ictl_initialize, NULL,
NULL, &dw_config, PRE_KERNEL_1,
CONFIG_DW_ICTL_INIT_PRIORITY, &dw_ictl_apis);
static void dw_ictl_config_irq(const struct device *port)
{
IRQ_CONNECT(DT_INST_IRQN(0),
DT_INST_IRQ(0, priority),
dw_ictl_isr,
DEVICE_DT_INST_GET(0),
DT_INST_IRQ(0, sense));
}
IRQ_PARENT_ENTRY_DEFINE(intc_dw, DEVICE_DT_INST_GET(0), DT_INST_IRQN(0),
INTC_INST_ISR_TBL_OFFSET(0), DT_INST_INTC_GET_AGGREGATOR_LEVEL(0));
``` | /content/code_sandbox/drivers/interrupt_controller/intc_dw.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,230 |
```c
/*
*
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <soc/periph_defs.h>
#include <limits.h>
#include <assert.h>
#include "soc/soc.h"
#include <soc.h>
#include <zephyr/kernel.h>
#include <zephyr/drivers/interrupt_controller/intc_esp32c3.h>
#include <zephyr/sw_isr_table.h>
#include <riscv/interrupt.h>
#define ESP32C3_INTC_DEFAULT_PRIO 15
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(intc_esp32c3, CONFIG_LOG_DEFAULT_LEVEL);
/*
* Define this to debug the choices made when allocating the interrupt. This leads to much debugging
* output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
* being triggered, that is why it is separate from the normal LOG* scheme.
*/
#ifdef CONFIG_INTC_ESP32C3_DECISIONS_LOG
# define INTC_LOG(...) LOG_INF(__VA_ARGS__)
#else
# define INTC_LOG(...) do {} while (0)
#endif
#define ESP32C3_INTC_DEFAULT_PRIORITY 15
#define ESP32C3_INTC_DEFAULT_THRESHOLD 1
#define ESP32C3_INTC_DISABLED_SLOT 31
#define ESP32C3_INTC_SRCS_PER_IRQ 2
#define ESP32C3_INTC_AVAILABLE_IRQS 30
#if defined(CONFIG_SOC_SERIES_ESP32C6)
#define IRQ_NA 0xFF /* IRQ not available */
#define IRQ_FREE 0xFE
#define ESP32C6_INTC_SRCS_PER_IRQ 2
#define ESP32C6_INTC_AVAILABLE_IRQS 31
/* For ESP32C6 only CPU peripheral interrupts number
* 1, 2, 5, 6, 8 ~ 31 are available.
* IRQ 31 is reserved for disabled interrupts
*/
static uint8_t esp_intr_irq_alloc[ESP32C6_INTC_AVAILABLE_IRQS][ESP32C6_INTC_SRCS_PER_IRQ] = {
[0] = {IRQ_NA, IRQ_NA},
[3] = {IRQ_NA, IRQ_NA},
[4] = {IRQ_NA, IRQ_NA},
[7] = {IRQ_NA, IRQ_NA},
[1 ... 2] = {IRQ_FREE, IRQ_FREE},
[5 ... 6] = {IRQ_FREE, IRQ_FREE},
[8 ... 30] = {IRQ_FREE, IRQ_FREE}
};
#endif
#define STATUS_MASK_NUM 3
static uint32_t esp_intr_enabled_mask[STATUS_MASK_NUM] = {0, 0, 0};
#if defined(CONFIG_SOC_SERIES_ESP32C2) || defined(CONFIG_SOC_SERIES_ESP32C3)
static uint32_t esp_intr_find_irq_for_source(uint32_t source)
{
/* in general case, each 2 sources goes routed to
* 1 IRQ line.
*/
uint32_t irq = (source / ESP32C3_INTC_SRCS_PER_IRQ);
if (irq > ESP32C3_INTC_AVAILABLE_IRQS) {
INTC_LOG("Clamping the source: %d no more IRQs available", source);
irq = ESP32C3_INTC_AVAILABLE_IRQS;
} else if (irq == 0) {
irq = 1;
}
INTC_LOG("Found IRQ: %d for source: %d", irq, source);
return irq;
}
#elif defined(CONFIG_SOC_SERIES_ESP32C6)
static uint32_t esp_intr_find_irq_for_source(uint32_t source)
{
uint32_t irq = IRQ_NA;
uint32_t irq_free = IRQ_NA;
uint8_t *irq_ptr = NULL;
/* First allocate one source per IRQ, then two
* if there are more sources than free IRQs
*/
for (int j = 0; j < ESP32C6_INTC_SRCS_PER_IRQ; j++) {
for (int i = 0; i < ESP32C6_INTC_AVAILABLE_IRQS; i++) {
/* Find first free slot but keep searching to see
* if source is already associated to an IRQ
*/
if (esp_intr_irq_alloc[i][j] == source) {
/* Source is already associated to an IRQ */
irq = i;
goto found;
} else if ((irq_free == IRQ_NA) && (esp_intr_irq_alloc[i][j] == IRQ_FREE)) {
irq_free = i;
irq_ptr = &esp_intr_irq_alloc[i][j];
}
}
}
if (irq_ptr != NULL) {
*irq_ptr = (uint8_t)source;
irq = irq_free;
} else {
return IRQ_NA;
}
found:
INTC_LOG("Found IRQ: %d for source: %d", irq, source);
return irq;
}
#endif
void esp_intr_initialize(void)
{
/* IRQ 31 is reserved for disabled interrupts,
* so route all sources to it
*/
for (int i = 0 ; i < ESP32C3_INTC_AVAILABLE_IRQS + 2; i++) {
irq_disable(i);
}
for (int i = 0; i < ETS_MAX_INTR_SOURCE; i++) {
esp_rom_intr_matrix_set(0, i, ESP32C3_INTC_DISABLED_SLOT);
}
#if defined(CONFIG_SOC_SERIES_ESP32C6)
/* Clear up IRQ allocation */
for (int j = 0; j < ESP32C6_INTC_SRCS_PER_IRQ; j++) {
for (int i = 0; i < ESP32C6_INTC_AVAILABLE_IRQS; i++) {
/* screen out reserved IRQs */
if (esp_intr_irq_alloc[i][j] != IRQ_NA) {
esp_intr_irq_alloc[i][j] = IRQ_FREE;
}
}
}
#endif
/* set global esp32c3's INTC masking level */
esprv_intc_int_set_threshold(ESP32C3_INTC_DEFAULT_THRESHOLD);
}
int esp_intr_alloc(int source,
int flags,
isr_handler_t handler,
void *arg,
void **ret_handle)
{
ARG_UNUSED(flags);
ARG_UNUSED(ret_handle);
if (handler == NULL) {
return -EINVAL;
}
if (source < 0 || source >= ETS_MAX_INTR_SOURCE) {
return -EINVAL;
}
uint32_t key = irq_lock();
irq_connect_dynamic(source,
ESP32C3_INTC_DEFAULT_PRIORITY,
handler,
arg,
0);
if (source < 32) {
esp_intr_enabled_mask[0] |= (1 << source);
} else if (source < 64) {
esp_intr_enabled_mask[1] |= (1 << (source - 32));
} else if (source < 96) {
esp_intr_enabled_mask[2] |= (1 << (source - 64));
}
INTC_LOG("Enabled ISRs -- 0: 0x%X -- 1: 0x%X -- 2: 0x%X",
esp_intr_enabled_mask[0], esp_intr_enabled_mask[1], esp_intr_enabled_mask[2]);
irq_unlock(key);
int ret = esp_intr_enable(source);
return ret;
}
int esp_intr_disable(int source)
{
if (source < 0 || source >= ETS_MAX_INTR_SOURCE) {
return -EINVAL;
}
uint32_t key = irq_lock();
esp_rom_intr_matrix_set(0,
source,
ESP32C3_INTC_DISABLED_SLOT);
#if defined(CONFIG_SOC_SERIES_ESP32C6)
for (int j = 0; j < ESP32C6_INTC_SRCS_PER_IRQ; j++) {
for (int i = 0; i < ESP32C6_INTC_AVAILABLE_IRQS; i++) {
if (esp_intr_irq_alloc[i][j] == source) {
esp_intr_irq_alloc[i][j] = IRQ_FREE;
goto freed;
}
}
}
freed:
#endif
if (source < 32) {
esp_intr_enabled_mask[0] &= ~(1 << source);
} else if (source < 64) {
esp_intr_enabled_mask[1] &= ~(1 << (source - 32));
} else if (source < 96) {
esp_intr_enabled_mask[2] &= ~(1 << (source - 64));
}
INTC_LOG("Enabled ISRs -- 0: 0x%X -- 1: 0x%X -- 2: 0x%X",
esp_intr_enabled_mask[0], esp_intr_enabled_mask[1], esp_intr_enabled_mask[2]);
irq_unlock(key);
return 0;
}
int esp_intr_enable(int source)
{
if (source < 0 || source >= ETS_MAX_INTR_SOURCE) {
return -EINVAL;
}
uint32_t key = irq_lock();
uint32_t irq = esp_intr_find_irq_for_source(source);
#if defined(CONFIG_SOC_SERIES_ESP32C6)
if (irq == IRQ_NA) {
irq_unlock(key);
return -ENOMEM;
}
#endif
esp_rom_intr_matrix_set(0, source, irq);
if (source < 32) {
esp_intr_enabled_mask[0] |= (1 << source);
} else if (source < 64) {
esp_intr_enabled_mask[1] |= (1 << (source - 32));
} else if (source < 96) {
esp_intr_enabled_mask[2] |= (1 << (source - 64));
}
INTC_LOG("Enabled ISRs -- 0: 0x%X -- 1: 0x%X -- 2: 0x%X",
esp_intr_enabled_mask[0], esp_intr_enabled_mask[1], esp_intr_enabled_mask[2]);
esprv_intc_int_set_priority(irq, ESP32C3_INTC_DEFAULT_PRIO);
esprv_intc_int_set_type(irq, INTR_TYPE_LEVEL);
esprv_intc_int_enable(1 << irq);
irq_unlock(key);
return 0;
}
uint32_t esp_intr_get_enabled_intmask(int status_mask_number)
{
INTC_LOG("Enabled ISRs -- 0: 0x%X -- 1: 0x%X -- 2: 0x%X",
esp_intr_enabled_mask[0], esp_intr_enabled_mask[1], esp_intr_enabled_mask[2]);
if (status_mask_number < STATUS_MASK_NUM) {
return esp_intr_enabled_mask[status_mask_number];
} else {
return 0; /* error */
}
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_esp32c3.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,328 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_INTC_GIC_COMMON_PRIV_H_
#define ZEPHYR_INCLUDE_DRIVERS_INTC_GIC_COMMON_PRIV_H_
/* Offsets from GICD base or GICR(n) SGI_base */
#define GIC_DIST_IGROUPR 0x0080
#define GIC_DIST_ISENABLER 0x0100
#define GIC_DIST_ICENABLER 0x0180
#define GIC_DIST_ISPENDR 0x0200
#define GIC_DIST_ICPENDR 0x0280
#define GIC_DIST_ISACTIVER 0x0300
#define GIC_DIST_ICACTIVER 0x0380
#define GIC_DIST_IPRIORITYR 0x0400
#define GIC_DIST_ITARGETSR 0x0800
#define GIC_DIST_ICFGR 0x0c00
#define GIC_DIST_IGROUPMODR 0x0d00
#define GIC_DIST_SGIR 0x0f00
/* GICD GICR common access macros */
#define IGROUPR(base, n) (base + GIC_DIST_IGROUPR + (n) * 4)
#define ISENABLER(base, n) (base + GIC_DIST_ISENABLER + (n) * 4)
#define ICENABLER(base, n) (base + GIC_DIST_ICENABLER + (n) * 4)
#define ISPENDR(base, n) (base + GIC_DIST_ISPENDR + (n) * 4)
#define ICPENDR(base, n) (base + GIC_DIST_ICPENDR + (n) * 4)
#define IPRIORITYR(base, n) (base + GIC_DIST_IPRIORITYR + n)
#define ITARGETSR(base, n) (base + GIC_DIST_ITARGETSR + (n) * 4)
#define ICFGR(base, n) (base + GIC_DIST_ICFGR + (n) * 4)
#define IGROUPMODR(base, n) (base + GIC_DIST_IGROUPMODR + (n) * 4)
/*
* selects redistributor SGI_base for current core for PPI and SGI
* selects distributor base for SPI
* The macro translates to distributor base for GICv2 and GICv1
*/
#if CONFIG_GIC_VER <= 2
#define GET_DIST_BASE(intid) GIC_DIST_BASE
#else
#define GET_DIST_BASE(intid) ((intid < GIC_SPI_INT_BASE) ? \
(gic_get_rdist() + GICR_SGI_BASE_OFF) \
: GIC_DIST_BASE)
#endif
#endif /* ZEPHYR_INCLUDE_DRIVERS_INTC_GIC_COMMON_PRIV_H */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_gic_common_priv.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 611 |
```c
/*
*
*/
#define DT_DRV_COMPAT renesas_ra_interrupt_controller_unit
#include <zephyr/device.h>
#include <zephyr/irq.h>
#include <soc.h>
#include <zephyr/drivers/interrupt_controller/intc_ra_icu.h>
#include <zephyr/sw_isr_table.h>
#include <errno.h>
#define IELSRn_REG(n) (DT_INST_REG_ADDR(0) + IELSRn_OFFSET + (n * 4))
#define IRQCRi_REG(i) (DT_INST_REG_ADDR(0) + IRQCRi_OFFSET + (i))
#define IRQCRi_IRQMD_POS 0
#define IRQCRi_IRQMD_MASK BIT_MASK(2)
#define IELSRn_IR_POS 16
#define IELSRn_IR_MASK BIT_MASK(1)
enum {
IRQCRi_OFFSET = 0x0,
IELSRn_OFFSET = 0x300,
};
int ra_icu_query_exists_irq(uint32_t event)
{
for (uint32_t i = 0; i < CONFIG_NUM_IRQS; i++) {
uint32_t els = sys_read32(IELSRn_REG(i)) & UINT8_MAX;
if (event == els) {
return i;
}
}
return -EINVAL;
}
int ra_icu_query_available_irq(uint32_t event)
{
int irq = -EINVAL;
if (ra_icu_query_exists_irq(event) > 0) {
return -EINVAL;
}
for (uint32_t i = 0; i < CONFIG_NUM_IRQS; i++) {
if (_sw_isr_table[i].isr == z_irq_spurious) {
irq = i;
break;
}
}
return irq;
}
void ra_icu_clear_int_flag(unsigned int irqn)
{
uint32_t cfg = sys_read32(IELSRn_REG(irqn));
sys_write32(cfg & ~BIT(IELSRn_IR_POS), IELSRn_REG(irqn));
}
void ra_icu_query_irq_config(unsigned int irq, uint32_t *intcfg, ra_isr_handler *cb,
const void **cbarg)
{
*intcfg = sys_read32(IELSRn_REG(irq));
*cb = _sw_isr_table[irq].isr;
*cbarg = (void *)_sw_isr_table[irq].arg;
}
static void ra_icu_irq_configure(unsigned int irqn, uint32_t intcfg)
{
uint8_t reg = sys_read8(IRQCRi_REG(irqn)) & ~(IRQCRi_IRQMD_MASK);
sys_write8(reg | (intcfg & IRQCRi_IRQMD_MASK), IRQCRi_REG(irqn));
}
int ra_icu_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter), const void *parameter,
uint32_t flags)
{
uint32_t event = ((flags & RA_ICU_FLAG_EVENT_MASK) >> RA_ICU_FLAG_EVENT_OFFSET);
uint32_t intcfg = ((flags & RA_ICU_FLAG_INTCFG_MASK) >> RA_ICU_FLAG_INTCFG_OFFSET);
int irqn = irq;
if (irq == RA_ICU_IRQ_UNSPECIFIED) {
irqn = ra_icu_query_available_irq(event);
if (irqn < 0) {
return irqn;
}
}
irq_disable(irqn);
sys_write32(event, IELSRn_REG(irqn));
z_isr_install(irqn, routine, parameter);
z_arm_irq_priority_set(irqn, priority, flags);
ra_icu_irq_configure(event, intcfg);
return irqn;
}
int ra_icu_irq_disconnect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter), const void *parameter,
uint32_t flags)
{
int irqn = irq;
if (irq == RA_ICU_IRQ_UNSPECIFIED) {
return -EINVAL;
}
irq_disable(irqn);
sys_write32(0, IELSRn_REG(irqn));
z_isr_install(irqn, z_irq_spurious, NULL);
z_arm_irq_priority_set(irqn, 0, 0);
return 0;
}
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_renesas_ra_icu.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 918 |
```unknown
/*
*
*/
/**
* @file
* @brief LOAPIC spurious interrupt handler
*/
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/x86/ia32/asm.h>
GTEXT(z_loapic_spurious_handler)
SECTION_FUNC(PINNED_TEXT, z_loapic_spurious_handler)
iret
``` | /content/code_sandbox/drivers/interrupt_controller/intc_loapic_spurious.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 76 |
```unknown
# Configuration for NXP S32 external interrupt controller
config NXP_S32_EIRQ
bool "External interrupt controller driver for NXP S32 MCUs"
default y
depends on DT_HAS_NXP_S32_SIUL2_EIRQ_ENABLED
select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT
help
External interrupt controller driver for NXP S32 MCUs
if NXP_S32_EIRQ
config NXP_S32_EIRQ_EXT_INTERRUPTS_MAX
int
default 8 if SOC_SERIES_S32ZE
default 32 if SOC_SERIES_S32K3
help
Number of SIUL2 external interrupts per controller. This is a SoC
integration option.
config NXP_S32_EIRQ_EXT_INTERRUPTS_GROUP
int
default 8
help
Number of SIUL2 external interrupts grouped into a single core
interrupt line. This is a SoC integration option.
endif # NXP_S32_EIRQ
config NXP_S32_WKPU
bool "Wake-up Unit interrupt controller driver for NXP S32 MCUs"
default y
depends on DT_HAS_NXP_S32_WKPU_ENABLED
help
Wake-up Unit interrupt controller driver for NXP S32 MCUs
if NXP_S32_WKPU
config NXP_S32_WKPU_SOURCES_MAX
int
range 32 64
default 64 if SOC_SERIES_S32K3
help
Number of WKPU external and internal sources per controller. This is
a SoC integration option.
endif # NXP_S32_WKPU
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.nxp_s32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 338 |
```unknown
# Espressif's Interrupt Allocator driver for Xtensa SoCs
config INTC_ESP32
bool "Interrupt allocator for Xtensa-based Espressif SoCs"
default y
depends on SOC_FAMILY_ESPRESSIF_ESP32
depends on !SOC_SERIES_ESP32C2 && !SOC_SERIES_ESP32C3 && !SOC_SERIES_ESP32C6
help
Enable custom interrupt allocator for Espressif SoCs based on Xtensa
architecture.
config INTC_ESP32_DECISIONS_LOG
bool "Espressif's interrupt allocator logging"
depends on INTC_ESP32
select LOG
help
Enable this option to visualize information on decisions made by the
interrupt allocator. This has no impact on the interrupt allocator usage
but may be valuable for debugging purposes. When enabled, messages are
print to the serial console.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.esp32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 189 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_INTC_GICV3_PRIV_H_
#define ZEPHYR_INCLUDE_DRIVERS_INTC_GICV3_PRIV_H_
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/sys/atomic.h>
/* Cache and Share ability for ITS & Redistributor LPI state tables */
#define GIC_BASER_CACHE_NGNRNE 0x0UL /* Device-nGnRnE */
#define GIC_BASER_CACHE_INNERLIKE 0x0UL /* Same as Inner Cacheability. */
#define GIC_BASER_CACHE_NCACHEABLE 0x1UL /* Non-cacheable */
#define GIC_BASER_CACHE_RAWT 0x2UL /* Cacheable R-allocate, W-through */
#define GIC_BASER_CACHE_RAWB 0x3UL /* Cacheable R-allocate, W-back */
#define GIC_BASER_CACHE_WAWT 0x4UL /* Cacheable W-allocate, W-through */
#define GIC_BASER_CACHE_WAWB 0x5UL /* Cacheable W-allocate, W-back */
#define GIC_BASER_CACHE_RAWAWT 0x6UL /* Cacheable R-allocate, W-allocate, W-through */
#define GIC_BASER_CACHE_RAWAWB 0x7UL /* Cacheable R-allocate, W-allocate, W-back */
#define GIC_BASER_SHARE_NO 0x0UL /* Non-shareable */
#define GIC_BASER_SHARE_INNER 0x1UL /* Inner Shareable */
#define GIC_BASER_SHARE_OUTER 0x2UL /* Outer Shareable */
/*
* GIC Register Interface Base Addresses
*/
#define GIC_RDIST_BASE DT_REG_ADDR_BY_IDX(DT_INST(0, arm_gic), 1)
#define GIC_RDIST_SIZE DT_REG_SIZE_BY_IDX(DT_INST(0, arm_gic), 1)
/* SGI base is at 64K offset from Redistributor */
#define GICR_SGI_BASE_OFF 0x10000
/* GICR registers offset from RD_base(n) */
#define GICR_CTLR 0x0000
#define GICR_IIDR 0x0004
#define GICR_TYPER 0x0008
#define GICR_STATUSR 0x0010
#define GICR_WAKER 0x0014
#define GICR_PWRR 0x0024
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
/* Register bit definitions */
/* GICD_CTLR Interrupt group definitions */
#define GICD_CTLR_ENABLE_G0 0
#define GICD_CTLR_ENABLE_G1NS 1
#define GICD_CTLR_ENABLE_G1S 2
#define GICD_CTRL_ARE_S 4
#define GICD_CTRL_ARE_NS 5
#define GICD_CTRL_NS 6
#define GICD_CGRL_E1NWF 7
/* GICD_CTLR Register write progress bit */
#define GICD_CTLR_RWP 31
/* GICR_CTLR */
#define GICR_CTLR_ENABLE_LPIS BIT(0)
#define GICR_CTLR_RWP 3
/* GICR_IIDR */
#define GICR_IIDR_PRODUCT_ID_SHIFT 24
#define GICR_IIDR_PRODUCT_ID_MASK 0xFFUL
#define GICR_IIDR_PRODUCT_ID_GET(_val) MASK_GET(_val, GICR_IIDR_PRODUCT_ID)
/* GICR_TYPER */
#define GICR_TYPER_AFFINITY_VALUE_SHIFT 32
#define GICR_TYPER_AFFINITY_VALUE_MASK 0xFFFFFFFFUL
#define GICR_TYPER_AFFINITY_VALUE_GET(_val) MASK_GET(_val, GICR_TYPER_AFFINITY_VALUE)
#define GICR_TYPER_LAST_SHIFT 4
#define GICR_TYPER_LAST_MASK 0x1UL
#define GICR_TYPER_LAST_GET(_val) MASK_GET(_val, GICR_TYPER_LAST)
#define GICR_TYPER_PROCESSOR_NUMBER_SHIFT 8
#define GICR_TYPER_PROCESSOR_NUMBER_MASK 0xFFFFUL
#define GICR_TYPER_PROCESSOR_NUMBER_GET(_val) MASK_GET(_val, GICR_TYPER_PROCESSOR_NUMBER)
/* GICR_WAKER */
#define GICR_WAKER_PS 1
#define GICR_WAKER_CA 2
/* GICR_PWRR */
#define GICR_PWRR_RDPD 0
#define GICR_PWRR_RDAG 1
#define GICR_PWRR_RDGPO 3
/* GICR_PROPBASER */
#define GITR_PROPBASER_ID_BITS_MASK 0x1fUL
#define GITR_PROPBASER_INNER_CACHE_SHIFT 7
#define GITR_PROPBASER_INNER_CACHE_MASK 0x7UL
#define GITR_PROPBASER_SHAREABILITY_SHIFT 10
#define GITR_PROPBASER_SHAREABILITY_MASK 0x3UL
#define GITR_PROPBASER_ADDR_SHIFT 12
#define GITR_PROPBASER_ADDR_MASK 0xFFFFFFFFFFUL
#define GITR_PROPBASER_OUTER_CACHE_SHIFT 56
#define GITR_PROPBASER_OUTER_CACHE_MASK 0x7UL
/* GICR_PENDBASER */
#define GITR_PENDBASER_INNER_CACHE_SHIFT 7
#define GITR_PENDBASER_INNER_CACHE_MASK 0x7UL
#define GITR_PENDBASER_SHAREABILITY_SHIFT 10
#define GITR_PENDBASER_SHAREABILITY_MASK 0x3UL
#define GITR_PENDBASER_ADDR_SHIFT 16
#define GITR_PENDBASER_ADDR_MASK 0xFFFFFFFFFUL
#define GITR_PENDBASER_OUTER_CACHE_SHIFT 56
#define GITR_PENDBASER_OUTER_CACHE_MASK 0x7UL
#define GITR_PENDBASER_PTZ BIT64(62)
/* GITCD_IROUTER */
#define GIC_DIST_IROUTER 0x6000
#define IROUTER(base, n) (base + GIC_DIST_IROUTER + (n) * 8)
/*
* ITS registers, offsets from ITS_base
*/
#define GITS_CTLR 0x0000
#define GITS_IIDR 0x0004
#define GITS_TYPER 0x0008
#define GITS_STATUSR 0x0040
#define GITS_UMSIR 0x0048
#define GITS_CBASER 0x0080
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
#define GITS_BASER(n) (0x0100 + ((n) * 8))
#define GITS_TRANSLATER 0x10040
/* ITS CTLR register */
#define GITS_CTLR_ENABLED_SHIFT 0
#define GITS_CTLR_ENABLED_MASK 0x1UL
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
#define GITS_CTLR_ITS_NUMBER_MASK 0xfUL
#define GITS_CTLR_QUIESCENT_SHIFT 31
#define GITS_CTLR_QUIESCENT_MASK 0x1UL
#define GITS_CTLR_ENABLED_GET(_val) MASK_GET(_val, GITS_CTLR_ENABLED)
#define GITS_CTLR_QUIESCENT_GET(_val) MASK_GET(_val, GITS_CTLR_QUIESCENT)
/* ITS TYPER register */
#define GITS_TYPER_PHY_SHIFT 0
#define GITS_TYPER_PHY_MASK 0x1UL
#define GITS_TYPER_VIRT_SHIFT 1
#define GITS_TYPER_VIRT_MASK 0x1UL
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_ITT_ENTRY_SIZE_MASK 0xfUL
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_IDBITS_MASK 0x1fUL
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS_MASK 0x1fUL
#define GITS_TYPER_SEIS_SHIFT 18
#define GITS_TYPER_SEIS_MASK 0x1UL
#define GITS_TYPER_PTA_SHIFT 19
#define GITS_TYPER_PTA_MASK 0x1UL
#define GITS_TYPER_HCC_SHIFT 24
#define GITS_TYPER_HCC_MASK 0xffUL
#define GITS_TYPER_CIDBITS_SHIFT 32
#define GITS_TYPER_CIDBITS_MASK 0xfUL
#define GITS_TYPER_CIL_SHIFT 36
#define GITS_TYPER_CIL_MASK 0x1UL
#define GITS_TYPER_ITT_ENTRY_SIZE_GET(_val) MASK_GET(_val, GITS_TYPER_ITT_ENTRY_SIZE)
#define GITS_TYPER_PTA_GET(_val) MASK_GET(_val, GITS_TYPER_PTA)
#define GITS_TYPER_HCC_GET(_val) MASK_GET(_val, GITS_TYPER_HCC)
#define GITS_TYPER_DEVBITS_GET(_val) MASK_GET(_val, GITS_TYPER_DEVBITS)
/* ITS COMMON BASER / CBASER register */
/* ITS CBASER register */
#define GITS_CBASER_SIZE_SHIFT 0
#define GITS_CBASER_SIZE_MASK 0xffUL
#define GITS_CBASER_SHAREABILITY_SHIFT 10
#define GITS_CBASER_SHAREABILITY_MASK 0x3UL
#define GITS_CBASER_ADDR_SHIFT 12
#define GITS_CBASER_ADDR_MASK 0xfffffffffUL
#define GITS_CBASER_OUTER_CACHE_SHIFT 53
#define GITS_CBASER_OUTER_CACHE_MASK 0x7UL
#define GITS_CBASER_INNER_CACHE_SHIFT 59
#define GITS_CBASER_INNER_CACHE_MASK 0x7UL
#define GITS_CBASER_VALID_SHIFT 63
#define GITS_CBASER_VALID_MASK 0x1UL
/* ITS BASER<n> register */
#define GITS_BASER_SIZE_SHIFT 0
#define GITS_BASER_SIZE_MASK 0xffUL
#define GITS_BASER_PAGE_SIZE_SHIFT 8
#define GITS_BASER_PAGE_SIZE_MASK 0x3UL
#define GITS_BASER_PAGE_SIZE_4K 0
#define GITS_BASER_PAGE_SIZE_16K 1
#define GITS_BASER_PAGE_SIZE_64K 2
#define GITS_BASER_SHAREABILITY_SHIFT 10
#define GITS_BASER_SHAREABILITY_MASK 0x3UL
#define GITS_BASER_ADDR_SHIFT 12
#define GITS_BASER_ADDR_MASK 0xfffffffff
#define GITS_BASER_ENTRY_SIZE_SHIFT 48
#define GITS_BASER_ENTRY_SIZE_MASK 0x1fUL
#define GITS_BASER_OUTER_CACHE_SHIFT 53
#define GITS_BASER_OUTER_CACHE_MASK 0x7UL
#define GITS_BASER_TYPE_SHIFT 56
#define GITS_BASER_TYPE_MASK 0x7UL
#define GITS_BASER_INNER_CACHE_SHIFT 59
#define GITS_BASER_INNER_CACHE_MASK 0x7UL
#define GITS_BASER_INDIRECT_SHIFT 62
#define GITS_BASER_INDIRECT_MASK 0x1UL
#define GITS_BASER_VALID_SHIFT 63
#define GITS_BASER_VALID_MASK 0x1UL
#define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1
#define GITS_BASER_TYPE_COLLECTION 4
#define GITS_BASER_TYPE_GET(_val) MASK_GET(_val, GITS_BASER_TYPE)
#define GITS_BASER_PAGE_SIZE_GET(_val) MASK_GET(_val, GITS_BASER_PAGE_SIZE)
#define GITS_BASER_ENTRY_SIZE_GET(_val) MASK_GET(_val, GITS_BASER_ENTRY_SIZE)
#define GITS_BASER_INDIRECT_GET(_val) MASK_GET(_val, GITS_BASER_INDIRECT)
#define GITS_BASER_NR_REGS 8
/* ITS Commands */
#define GITS_CMD_ID_MOVI 0x01
#define GITS_CMD_ID_INT 0x03
#define GITS_CMD_ID_CLEAR 0x04
#define GITS_CMD_ID_SYNC 0x05
#define GITS_CMD_ID_MAPD 0x08
#define GITS_CMD_ID_MAPC 0x09
#define GITS_CMD_ID_MAPTI 0x0a
#define GITS_CMD_ID_MAPI 0x0b
#define GITS_CMD_ID_INV 0x0c
#define GITS_CMD_ID_INVALL 0x0d
#define GITS_CMD_ID_MOVALL 0x0e
#define GITS_CMD_ID_DISCARD 0x0f
#define GITS_CMD_ID_OFFSET 0
#define GITS_CMD_ID_SHIFT 0
#define GITS_CMD_ID_MASK 0xffUL
#define GITS_CMD_DEVICEID_OFFSET 0
#define GITS_CMD_DEVICEID_SHIFT 32
#define GITS_CMD_DEVICEID_MASK 0xffffffffUL
#define GITS_CMD_SIZE_OFFSET 1
#define GITS_CMD_SIZE_SHIFT 0
#define GITS_CMD_SIZE_MASK 0x1fUL
#define GITS_CMD_EVENTID_OFFSET 1
#define GITS_CMD_EVENTID_SHIFT 0
#define GITS_CMD_EVENTID_MASK 0xffffffffUL
#define GITS_CMD_PINTID_OFFSET 1
#define GITS_CMD_PINTID_SHIFT 32
#define GITS_CMD_PINTID_MASK 0xffffffffUL
#define GITS_CMD_ICID_OFFSET 2
#define GITS_CMD_ICID_SHIFT 0
#define GITS_CMD_ICID_MASK 0xffffUL
#define GITS_CMD_ITTADDR_OFFSET 2
#define GITS_CMD_ITTADDR_SHIFT 8
#define GITS_CMD_ITTADDR_MASK 0xffffffffffUL
#define GITS_CMD_ITTADDR_ALIGN GITS_CMD_ITTADDR_SHIFT
#define GITS_CMD_ITTADDR_ALIGN_SZ (BIT(0) << GITS_CMD_ITTADDR_ALIGN)
#define GITS_CMD_RDBASE_OFFSET 2
#define GITS_CMD_RDBASE_SHIFT 16
#define GITS_CMD_RDBASE_MASK 0xffffffffUL
#define GITS_CMD_RDBASE_ALIGN GITS_CMD_RDBASE_SHIFT
#define GITS_CMD_VALID_OFFSET 2
#define GITS_CMD_VALID_SHIFT 63
#define GITS_CMD_VALID_MASK 0x1UL
#define MASK(__basename) (__basename##_MASK << __basename##_SHIFT)
#define MASK_SET(__val, __basename) (((__val) & __basename##_MASK) << __basename##_SHIFT)
#define MASK_GET(__reg, __basename) (((__reg) >> __basename##_SHIFT) & __basename##_MASK)
#ifdef CONFIG_GIC_V3_ITS
void its_rdist_map(void);
void its_rdist_invall(void);
extern atomic_t nlpi_intid;
#endif
#endif /* ZEPHYR_INCLUDE_DRIVERS_INTC_GICV3_PRIV_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_gicv3_priv.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,470 |
```unknown
# shared_irq configuration options
menuconfig SHARED_IRQ
bool "Shared interrupt driver"
default y
depends on DT_HAS_SHARED_IRQ_ENABLED
help
Include shared interrupt support in system. Shared interrupt
support is NOT required in most systems. If in doubt answer no.
config SHARED_IRQ_INIT_PRIORITY
int "Shared IRQ init priority"
depends on SHARED_IRQ
default 45
help
Shared IRQ are initialized on POST_KERNEL init level. They
have to be initialized before any device that uses them.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.shared_irq | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 111 |
```unknown
config RENESAS_RA_ICU
bool "Renesas RA series interrupt controller unit"
default y
depends on DT_HAS_RENESAS_RA_INTERRUPT_CONTROLLER_UNIT_ENABLED
select GEN_ISR_TABLES
help
Renesas RA series interrupt controller unit
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.renesas_ra | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 57 |
```c
/*
*
*/
#define DT_DRV_COMPAT litex_vexriscv_intc0
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/irq.h>
#include <zephyr/device.h>
#include <zephyr/types.h>
#include <zephyr/arch/riscv/irq.h>
#define IRQ_MASK DT_INST_REG_ADDR_BY_NAME(0, irq_mask)
#define IRQ_PENDING DT_INST_REG_ADDR_BY_NAME(0, irq_pending)
static inline void vexriscv_litex_irq_setmask(uint32_t mask)
{
__asm__ volatile ("csrw %0, %1" :: "i"(IRQ_MASK), "r"(mask));
}
static inline uint32_t vexriscv_litex_irq_getmask(void)
{
uint32_t mask;
__asm__ volatile ("csrr %0, %1" : "=r"(mask) : "i"(IRQ_MASK));
return mask;
}
static inline uint32_t vexriscv_litex_irq_pending(void)
{
uint32_t pending;
__asm__ volatile ("csrr %0, %1" : "=r"(pending) : "i"(IRQ_PENDING));
return pending;
}
static inline void vexriscv_litex_irq_setie(uint32_t ie)
{
if (ie) {
__asm__ volatile ("csrrs x0, mstatus, %0"
:: "r"(MSTATUS_IEN));
} else {
__asm__ volatile ("csrrc x0, mstatus, %0"
:: "r"(MSTATUS_IEN));
}
}
#define LITEX_IRQ_ADD_HELPER(n) \
if (irqs & (1 << DT_IRQN(n))) { \
ite = &_sw_isr_table[DT_IRQN(n)]; \
ite->isr(ite->arg); \
}
#define LITEX_IRQ_ADD(n) IF_ENABLED(DT_IRQ_HAS_IDX(n, 0), (LITEX_IRQ_ADD_HELPER(n)))
static void vexriscv_litex_irq_handler(const void *device)
{
struct _isr_table_entry *ite;
uint32_t pending, mask, irqs;
pending = vexriscv_litex_irq_pending();
mask = vexriscv_litex_irq_getmask();
irqs = pending & mask;
DT_FOREACH_STATUS_OKAY_NODE(LITEX_IRQ_ADD);
}
void arch_irq_enable(unsigned int irq)
{
vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() | (1 << irq));
}
void arch_irq_disable(unsigned int irq)
{
vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() & ~(1 << irq));
}
int arch_irq_is_enabled(unsigned int irq)
{
return vexriscv_litex_irq_getmask() & (1 << irq);
}
static int vexriscv_litex_irq_init(const struct device *dev)
{
__asm__ volatile ("csrrs x0, mie, %0"
:: "r"(1 << RISCV_IRQ_MEXT));
vexriscv_litex_irq_setie(1);
IRQ_CONNECT(RISCV_IRQ_MEXT, 0, vexriscv_litex_irq_handler,
NULL, 0);
return 0;
}
DEVICE_DT_INST_DEFINE(0, vexriscv_litex_irq_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_vexriscv_litex.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 749 |
```c
/*
*/
#define DT_DRV_COMPAT intel_vt_d
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <soc.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <string.h>
#include <zephyr/cache.h>
#include <zephyr/arch/x86/intel_vtd.h>
#include <zephyr/drivers/interrupt_controller/intel_vtd.h>
#include <zephyr/drivers/interrupt_controller/ioapic.h>
#include <zephyr/drivers/interrupt_controller/loapic.h>
#include <zephyr/drivers/pcie/msi.h>
#include <kernel_arch_func.h>
#include "intc_intel_vtd.h"
static inline void vtd_pause_cpu(void)
{
__asm__ volatile("pause" ::: "memory");
}
static void vtd_write_reg32(const struct device *dev,
uint16_t reg, uint32_t value)
{
uintptr_t base_address = DEVICE_MMIO_GET(dev);
sys_write32(value, (base_address + reg));
}
static uint32_t vtd_read_reg32(const struct device *dev, uint16_t reg)
{
uintptr_t base_address = DEVICE_MMIO_GET(dev);
return sys_read32(base_address + reg);
}
static void vtd_write_reg64(const struct device *dev,
uint16_t reg, uint64_t value)
{
uintptr_t base_address = DEVICE_MMIO_GET(dev);
sys_write64(value, (base_address + reg));
}
static uint64_t vtd_read_reg64(const struct device *dev, uint16_t reg)
{
uintptr_t base_address = DEVICE_MMIO_GET(dev);
return sys_read64(base_address + reg);
}
static void vtd_send_cmd(const struct device *dev,
uint16_t cmd_bit, uint16_t status_bit)
{
uintptr_t base_address = DEVICE_MMIO_GET(dev);
uint32_t value;
value = vtd_read_reg32(dev, VTD_GSTS_REG);
value |= BIT(cmd_bit);
vtd_write_reg32(dev, VTD_GCMD_REG, value);
while (!sys_test_bit((base_address + VTD_GSTS_REG),
status_bit)) {
/* Do nothing */
}
}
static void vtd_flush_irte_from_cache(const struct device *dev,
uint8_t irte_idx)
{
struct vtd_ictl_data *data = dev->data;
if (!data->pwc) {
cache_data_flush_range(&data->irte[irte_idx],
sizeof(union vtd_irte));
}
}
static void vtd_qi_init(const struct device *dev)
{
struct vtd_ictl_data *data = dev->data;
uint64_t value;
vtd_write_reg64(dev, VTD_IQT_REG, 0);
data->qi_tail = 0;
value = VTD_IQA_REG_GEN_CONTENT((uintptr_t)data->qi,
VTD_IQA_WIDTH_128_BIT, QI_SIZE);
vtd_write_reg64(dev, VTD_IQA_REG, value);
vtd_send_cmd(dev, VTD_GCMD_QIE, VTD_GSTS_QIES);
}
static inline void vtd_qi_tail_inc(const struct device *dev)
{
struct vtd_ictl_data *data = dev->data;
data->qi_tail += sizeof(struct qi_descriptor);
data->qi_tail %= (QI_NUM * sizeof(struct qi_descriptor));
}
static int vtd_qi_send(const struct device *dev,
struct qi_descriptor *descriptor)
{
struct vtd_ictl_data *data = dev->data;
union qi_wait_descriptor wait_desc = { 0 };
struct qi_descriptor *desc;
uint32_t wait_status;
uint32_t wait_count;
desc = (struct qi_descriptor *)((uintptr_t)data->qi + data->qi_tail);
desc->low = descriptor->low;
desc->high = descriptor->high;
vtd_qi_tail_inc(dev);
desc++;
wait_status = QI_WAIT_STATUS_INCOMPLETE;
wait_desc.wait.type = QI_TYPE_WAIT;
wait_desc.wait.status_write = 1;
wait_desc.wait.status_data = QI_WAIT_STATUS_COMPLETE;
wait_desc.wait.address = ((uintptr_t)&wait_status) >> 2;
desc->low = wait_desc.desc.low;
desc->high = wait_desc.desc.high;
vtd_qi_tail_inc(dev);
vtd_write_reg64(dev, VTD_IQT_REG, data->qi_tail);
wait_count = 0;
while (wait_status != QI_WAIT_STATUS_COMPLETE) {
/* We cannot use timeout here, this function being called
* at init time, it might result that the system clock
* is not initialized yet since VT-D init comes first.
*/
if (wait_count > QI_WAIT_COUNT_LIMIT) {
printk("QI timeout\n");
return -ETIME;
}
if (vtd_read_reg32(dev, VTD_FSTS_REG) & VTD_FSTS_IQE) {
printk("QI error\n");
return -EIO;
}
vtd_pause_cpu();
wait_count++;
}
return 0;
}
static int vtd_global_cc_invalidate(const struct device *dev)
{
union qi_icc_descriptor iec_desc = { 0 };
iec_desc.icc.type = QI_TYPE_ICC;
iec_desc.icc.granularity = 1; /* Global Invalidation requested */
return vtd_qi_send(dev, &iec_desc.desc);
}
static int vtd_global_iec_invalidate(const struct device *dev)
{
union qi_iec_descriptor iec_desc = { 0 };
iec_desc.iec.type = QI_TYPE_IEC;
iec_desc.iec.granularity = 0; /* Global Invalidation requested */
return vtd_qi_send(dev, &iec_desc.desc);
}
static int vtd_index_iec_invalidate(const struct device *dev, uint8_t irte_idx)
{
union qi_iec_descriptor iec_desc = { 0 };
iec_desc.iec.type = QI_TYPE_IEC;
iec_desc.iec.granularity = 1; /* Index based invalidation requested */
iec_desc.iec.interrupt_index = irte_idx;
iec_desc.iec.index_mask = 0;
return vtd_qi_send(dev, &iec_desc.desc);
}
static void fault_status_description(uint32_t status)
{
if (status & VTD_FSTS_PFO) {
printk("Primary Fault Overflow (PFO)\n");
}
if (status & VTD_FSTS_AFO) {
printk("Advanced Fault Overflow (AFO)\n");
}
if (status & VTD_FSTS_APF) {
printk("Advanced Primary Fault (APF)\n");
}
if (status & VTD_FSTS_IQE) {
printk("Invalidation Queue Error (IQE)\n");
}
if (status & VTD_FSTS_ICE) {
printk("Invalidation Completion Error (ICE)\n");
}
if (status & VTD_FSTS_ITE) {
printk("Invalidation Timeout Error\n");
}
if (status & VTD_FSTS_PPF) {
printk("Primary Pending Fault (PPF) %u\n",
VTD_FSTS_FRI(status));
}
}
static void fault_record_description(uint64_t low, uint64_t high)
{
printk("Fault %s request: Reason 0x%x info 0x%llx src 0x%x\n",
(high & VTD_FRCD_T) ? "Read/Atomic" : "Write/Page",
VTD_FRCD_FR(high), VTD_FRCD_FI(low), VTD_FRCD_SID(high));
}
static void fault_event_isr(const void *arg)
{
const struct device *dev = arg;
struct vtd_ictl_data *data = dev->data;
uint32_t status;
uint8_t f_idx;
status = vtd_read_reg32(dev, VTD_FSTS_REG);
fault_status_description(status);
if (!(status & VTD_FSTS_PPF)) {
goto out;
}
f_idx = VTD_FSTS_FRI(status);
while (f_idx < data->fault_record_num) {
uint64_t fault_l, fault_h;
/* Reading fault's 64 lowest bits */
fault_l = vtd_read_reg64(dev, data->fault_record_reg +
(VTD_FRCD_REG_SIZE * f_idx));
/* Reading fault's 64 highest bits */
fault_h = vtd_read_reg64(dev, data->fault_record_reg +
(VTD_FRCD_REG_SIZE * f_idx) + 8);
if (fault_h & VTD_FRCD_F) {
fault_record_description(fault_l, fault_h);
}
/* Clearing the fault */
vtd_write_reg64(dev, data->fault_record_reg +
(VTD_FRCD_REG_SIZE * f_idx), fault_l);
vtd_write_reg64(dev, data->fault_record_reg +
(VTD_FRCD_REG_SIZE * f_idx) + 8, fault_h);
f_idx++;
}
out:
/* Clearing fault status */
vtd_write_reg32(dev, VTD_FSTS_REG, VTD_FSTS_CLEAR(status));
}
static void vtd_fault_event_init(const struct device *dev)
{
struct vtd_ictl_data *data = dev->data;
uint64_t value;
uint32_t reg;
value = vtd_read_reg64(dev, VTD_CAP_REG);
data->fault_record_num = VTD_CAP_NFR(value) + 1;
data->fault_record_reg = DEVICE_MMIO_GET(dev) +
(uintptr_t)(16 * VTD_CAP_FRO(value));
/* Allocating IRQ & vector and connecting the ISR handler,
* by-passing remapping by using x86 functions directly.
*/
data->fault_irq = arch_irq_allocate();
data->fault_vector = z_x86_allocate_vector(0, -1);
vtd_write_reg32(dev, VTD_FEDATA_REG, data->fault_vector);
vtd_write_reg32(dev, VTD_FEADDR_REG,
pcie_msi_map(data->fault_irq, NULL, 0));
vtd_write_reg32(dev, VTD_FEUADDR_REG, 0);
z_x86_irq_connect_on_vector(data->fault_irq, data->fault_vector,
fault_event_isr, dev);
vtd_write_reg32(dev, VTD_FSTS_REG,
VTD_FSTS_CLEAR(vtd_read_reg32(dev, VTD_FSTS_REG)));
/* Unmasking interrupts */
reg = vtd_read_reg32(dev, VTD_FECTL_REG);
reg &= ~BIT(VTD_FECTL_REG_IM);
vtd_write_reg32(dev, VTD_FECTL_REG, reg);
}
static int vtd_ictl_allocate_entries(const struct device *dev,
uint8_t n_entries)
{
struct vtd_ictl_data *data = dev->data;
int irte_idx_start;
if ((data->irte_num_used + n_entries) > IRTE_NUM) {
return -EBUSY;
}
irte_idx_start = data->irte_num_used;
data->irte_num_used += n_entries;
return irte_idx_start;
}
static uint32_t vtd_ictl_remap_msi(const struct device *dev,
msi_vector_t *vector,
uint8_t n_vector)
{
uint32_t shv = (n_vector > 1) ? VTD_INT_SHV : 0;
return VTD_MSI_MAP(vector->arch.irte, shv);
}
static int vtd_ictl_remap(const struct device *dev,
uint8_t irte_idx,
uint16_t vector,
uint32_t flags,
uint16_t src_id)
{
struct vtd_ictl_data *data = dev->data;
union vtd_irte irte = { 0 };
uint32_t delivery_mode;
irte.bits.vector = vector;
if (IS_ENABLED(CONFIG_X2APIC)) {
/* Getting the logical APIC ID */
irte.bits.dst_id = x86_read_loapic(LOAPIC_LDR);
} else {
/* As for IOAPIC: let's mask all possible IDs */
irte.bits.dst_id = 0xFF << 8;
}
if (src_id != USHRT_MAX &&
!IS_ENABLED(CONFIG_INTEL_VTD_ICTL_NO_SRC_ID_CHECK)) {
irte.bits.src_validation_type = 1;
irte.bits.src_id = src_id;
}
delivery_mode = (flags & IOAPIC_DELIVERY_MODE_MASK);
if ((delivery_mode != IOAPIC_FIXED) ||
(delivery_mode != IOAPIC_LOW)) {
delivery_mode = IOAPIC_LOW;
}
irte.bits.trigger_mode = (flags & IOAPIC_TRIGGER_MASK) >> 15;
irte.bits.delivery_mode = delivery_mode >> 8;
irte.bits.redirection_hint = 1;
irte.bits.dst_mode = 1; /* Always logical */
irte.bits.present = 1;
data->irte[irte_idx].parts.low = irte.parts.low;
data->irte[irte_idx].parts.high = irte.parts.high;
vtd_index_iec_invalidate(dev, irte_idx);
vtd_flush_irte_from_cache(dev, irte_idx);
return 0;
}
static int vtd_ictl_set_irte_vector(const struct device *dev,
uint8_t irte_idx,
uint16_t vector)
{
struct vtd_ictl_data *data = dev->data;
data->vectors[irte_idx] = vector;
return 0;
}
static int vtd_ictl_get_irte_by_vector(const struct device *dev,
uint16_t vector)
{
struct vtd_ictl_data *data = dev->data;
int irte_idx;
for (irte_idx = 0; irte_idx < IRTE_NUM; irte_idx++) {
if (data->vectors[irte_idx] == vector) {
return irte_idx;
}
}
return -EINVAL;
}
static uint16_t vtd_ictl_get_irte_vector(const struct device *dev,
uint8_t irte_idx)
{
struct vtd_ictl_data *data = dev->data;
return data->vectors[irte_idx];
}
static int vtd_ictl_set_irte_irq(const struct device *dev,
uint8_t irte_idx,
unsigned int irq)
{
struct vtd_ictl_data *data = dev->data;
data->irqs[irte_idx] = irq;
return 0;
}
static int vtd_ictl_get_irte_by_irq(const struct device *dev,
unsigned int irq)
{
struct vtd_ictl_data *data = dev->data;
int irte_idx;
for (irte_idx = 0; irte_idx < IRTE_NUM; irte_idx++) {
if (data->irqs[irte_idx] == irq) {
return irte_idx;
}
}
return -EINVAL;
}
static void vtd_ictl_set_irte_msi(const struct device *dev,
uint8_t irte_idx, bool msi)
{
struct vtd_ictl_data *data = dev->data;
data->msi[irte_idx] = msi;
}
static bool vtd_ictl_irte_is_msi(const struct device *dev,
uint8_t irte_idx)
{
struct vtd_ictl_data *data = dev->data;
return data->msi[irte_idx];
}
static int vtd_ictl_init(const struct device *dev)
{
struct vtd_ictl_data *data = dev->data;
unsigned int key = irq_lock();
uint64_t eime = 0;
uint64_t value;
int ret = 0;
DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
if (vtd_read_reg64(dev, VTD_ECAP_REG) & VTD_ECAP_C) {
printk("Page walk coherency supported\n");
data->pwc = true;
}
vtd_fault_event_init(dev);
vtd_qi_init(dev);
if (vtd_global_cc_invalidate(dev) != 0) {
printk("Could not perform ICC invalidation\n");
ret = -EIO;
goto out;
}
if (IS_ENABLED(CONFIG_X2APIC)) {
eime = VTD_IRTA_EIME;
}
value = VTD_IRTA_REG_GEN_CONTENT((uintptr_t)data->irte,
IRTA_SIZE, eime);
vtd_write_reg64(dev, VTD_IRTA_REG, value);
if (vtd_global_iec_invalidate(dev) != 0) {
printk("Could not perform IEC invalidation\n");
ret = -EIO;
goto out;
}
if (!IS_ENABLED(CONFIG_X2APIC) &&
IS_ENABLED(CONFIG_INTEL_VTD_ICTL_XAPIC_PASSTHROUGH)) {
vtd_send_cmd(dev, VTD_GCMD_CFI, VTD_GSTS_CFIS);
}
vtd_send_cmd(dev, VTD_GCMD_SIRTP, VTD_GSTS_SIRTPS);
vtd_send_cmd(dev, VTD_GCMD_IRE, VTD_GSTS_IRES);
printk("Intel VT-D up and running (status 0x%x)\n",
vtd_read_reg32(dev, VTD_GSTS_REG));
out:
irq_unlock(key);
return ret;
}
static const struct vtd_driver_api vtd_api = {
.allocate_entries = vtd_ictl_allocate_entries,
.remap_msi = vtd_ictl_remap_msi,
.remap = vtd_ictl_remap,
.set_irte_vector = vtd_ictl_set_irte_vector,
.get_irte_by_vector = vtd_ictl_get_irte_by_vector,
.get_irte_vector = vtd_ictl_get_irte_vector,
.set_irte_irq = vtd_ictl_set_irte_irq,
.get_irte_by_irq = vtd_ictl_get_irte_by_irq,
.set_irte_msi = vtd_ictl_set_irte_msi,
.irte_is_msi = vtd_ictl_irte_is_msi
};
static struct vtd_ictl_data vtd_ictl_data_0 = {
.irqs = { -EINVAL },
.vectors = { -EINVAL },
};
static const struct vtd_ictl_cfg vtd_ictl_cfg_0 = {
DEVICE_MMIO_ROM_INIT(DT_DRV_INST(0)),
};
DEVICE_DT_INST_DEFINE(0,
vtd_ictl_init, NULL,
&vtd_ictl_data_0, &vtd_ictl_cfg_0,
PRE_KERNEL_1, CONFIG_INTEL_VTD_ICTL_INIT_PRIORITY, &vtd_api);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_intel_vtd.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,046 |
```c
*/
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#define DT_DRV_COMPAT mediatek_adsp_intc
struct intc_mtk_cfg {
uint32_t xtensa_irq;
uint32_t irq_mask;
uint32_t sw_isr_off;
volatile uint32_t *enable_reg;
volatile uint32_t *status_reg;
};
bool intc_mtk_adsp_get_enable(const struct device *dev, int irq)
{
const struct intc_mtk_cfg *cfg = dev->config;
return (*cfg->enable_reg | (BIT(irq) & cfg->irq_mask)) != 0;
}
void intc_mtk_adsp_set_enable(const struct device *dev, int irq, bool val)
{
const struct intc_mtk_cfg *cfg = dev->config;
irq_enable(cfg->xtensa_irq);
if ((BIT(irq) & cfg->irq_mask) != 0) {
if (val) {
*cfg->enable_reg |= BIT(irq);
} else {
*cfg->enable_reg &= ~BIT(irq);
}
}
}
static void intc_isr(const void *arg)
{
const struct intc_mtk_cfg *cfg = ((struct device *)arg)->config;
uint32_t irqs = *cfg->status_reg & cfg->irq_mask;
while (irqs != 0) {
uint32_t irq = find_msb_set(irqs) - 1;
uint32_t off = cfg->sw_isr_off + irq;
_sw_isr_table[off].isr(_sw_isr_table[off].arg);
irqs &= ~BIT(irq);
}
}
static void dev_init(const struct device *dev)
{
const struct intc_mtk_cfg *cfg = dev->config;
*cfg->enable_reg = 0;
irq_enable(cfg->xtensa_irq);
}
#define DEV_INIT(N) \
IRQ_CONNECT(DT_INST_IRQN(N), 0, intc_isr, DEVICE_DT_INST_GET(N), 0); \
dev_init(DEVICE_DT_INST_GET(N));
static int intc_init(void)
{
DT_INST_FOREACH_STATUS_OKAY(DEV_INIT);
return 0;
}
SYS_INIT(intc_init, PRE_KERNEL_1, 0);
#define DEF_DEV(N) \
static const struct intc_mtk_cfg dev_cfg##N = { \
.xtensa_irq = DT_INST_IRQN(N), \
.irq_mask = DT_INST_PROP(N, mask), \
.sw_isr_off = (N + 1) * 32, \
.enable_reg = (void *)DT_INST_REG_ADDR(N), \
.status_reg = (void *)DT_INST_PROP(N, status_reg) }; \
DEVICE_DT_INST_DEFINE(N, NULL, NULL, NULL, &dev_cfg##N, PRE_KERNEL_1, 0, NULL);
DT_INST_FOREACH_STATUS_OKAY(DEF_DEV);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_mtk_adsp.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 625 |
```unknown
# CAVS interrupt controller configuration
config CAVS_ICTL
bool "CAVS Interrupt Logic"
default y
depends on DT_HAS_INTEL_CAVS_INTC_ENABLED
depends on MULTI_LEVEL_INTERRUPTS
help
These are 4 in number supporting a max of 32 interrupts each.
if CAVS_ICTL
config CAVS_ISR_TBL_OFFSET
int "Offset in the SW ISR Table"
default 0
help
This indicates the offset in the SW_ISR_TABLE beginning from where
the ISRs for CAVS Interrupt Controller are assigned.
config CAVS_ICTL_0_OFFSET
int "Parent interrupt number to which CAVS_0 maps"
default 0
config CAVS_ICTL_1_OFFSET
int "Parent interrupt number to which CAVS_1 maps"
default 0
config CAVS_ICTL_2_OFFSET
int "Parent interrupt number to which CAVS_2 maps"
default 0
config CAVS_ICTL_3_OFFSET
int "Parent interrupt number to which CAVS_3 maps"
default 0
config CAVS_ICTL_INIT_PRIORITY
int "CAVS ICTL Init priority"
default 45
help
Cavs Interrupt Logic initialization priority.
endif # CAVS_ICTL
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.cavs | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 274 |
```c
/*
*
*/
#define DT_DRV_COMPAT swerv_pic
/**
* @brief SweRV EH1 PIC driver
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/irq.h>
#include <zephyr/arch/riscv/irq.h>
#define SWERV_PIC_MAX_NUM CONFIG_NUM_IRQS
#define SWERV_PIC_MAX_ID (SWERV_PIC_MAX_NUM + RISCV_MAX_GENERIC_IRQ)
#define SWERV_PIC_MAX_PRIO 16
#define SWERV_PIC_mpiccfg 0x3000
#define SWERV_PIC_meipl(s) (0x0 + (s)*4)
#define SWERV_PIC_meip(x) (0x1000 + (x)*4)
#define SWERV_PIC_meie(s) (0x2000 + (s)*4)
#define SWERV_PIC_meigwctrl(s) (0x4000 + (s)*4)
#define SWERV_PIC_meigwclr(s) (0x5000 + (s)*4)
#define SWERV_PIC_meivt "0xBC8"
#define SWERV_PIC_meipt "0xBC9"
#define SWERV_PIC_meicpct "0xBCA"
#define SWERV_PIC_meicidpl "0xBCB"
#define SWERV_PIC_meicurpl "0xBCC"
#define SWERV_PIC_meihap "0xFC8"
#define swerv_piccsr(csr) SWERV_PIC_##csr
#define swerv_pic_readcsr(csr, value) \
volatile("csrr %0, "swerv_piccsr(csr) : "=r" (value))
#define swerv_pic_writecsr(csr, value) \
volatile("csrw "swerv_piccsr(csr)", %0" :: "rK" (value))
static int save_irq;
static uint32_t swerv_pic_read(uint32_t reg)
{
return *(volatile uint32_t *)(DT_INST_REG_ADDR(0) + reg);
}
static void swerv_pic_write(uint32_t reg, uint32_t val)
{
*(volatile uint32_t *)(DT_INST_REG_ADDR(0) + reg) = val;
}
void swerv_pic_irq_enable(uint32_t irq)
{
uint32_t key;
if ((irq >= SWERV_PIC_MAX_ID) || (irq < RISCV_MAX_GENERIC_IRQ)) {
return;
}
key = irq_lock();
swerv_pic_write(SWERV_PIC_meie(irq - RISCV_MAX_GENERIC_IRQ), 1);
irq_unlock(key);
}
void swerv_pic_irq_disable(uint32_t irq)
{
uint32_t key;
if ((irq >= SWERV_PIC_MAX_ID) || (irq < RISCV_MAX_GENERIC_IRQ)) {
return;
}
key = irq_lock();
swerv_pic_write(SWERV_PIC_meie(irq - RISCV_MAX_GENERIC_IRQ), 0);
irq_unlock(key);
}
int swerv_pic_irq_is_enabled(uint32_t irq)
{
if ((irq >= SWERV_PIC_MAX_ID) || (irq < RISCV_MAX_GENERIC_IRQ)) {
return -1;
}
return swerv_pic_read(SWERV_PIC_meie(irq - RISCV_MAX_GENERIC_IRQ))
& 0x1;
}
void swerv_pic_set_priority(uint32_t irq, uint32_t priority)
{
uint32_t key;
if (irq <= RISCV_MAX_GENERIC_IRQ) {
return;
}
if ((irq >= SWERV_PIC_MAX_ID) || (irq < RISCV_MAX_GENERIC_IRQ)) {
return;
}
if (priority >= SWERV_PIC_MAX_PRIO) {
return;
}
key = irq_lock();
swerv_pic_write(SWERV_PIC_meipl(irq - RISCV_MAX_GENERIC_IRQ), priority);
irq_unlock(key);
}
int swerv_pic_get_irq(void)
{
return save_irq;
}
static void swerv_pic_irq_handler(const void *arg)
{
uint32_t tmp;
uint32_t irq;
struct _isr_table_entry *ite;
/* trigger the capture of the interrupt source ID */
__asm__ swerv_pic_writecsr(meicpct, 0);
__asm__ swerv_pic_readcsr(meihap, tmp);
irq = (tmp >> 2) & 0xff;
save_irq = irq;
if (irq == 0U || irq >= 64) {
z_irq_spurious(NULL);
}
irq += RISCV_MAX_GENERIC_IRQ;
/* Call the corresponding IRQ handler in _sw_isr_table */
ite = (struct _isr_table_entry *)&_sw_isr_table[irq];
if (ite->isr) {
ite->isr(ite->arg);
}
swerv_pic_write(SWERV_PIC_meigwclr(irq), 0);
}
static int swerv_pic_init(const struct device *dev)
{
int i;
/* Init priority order to 0, 0=lowest to 15=highest */
swerv_pic_write(SWERV_PIC_mpiccfg, 0);
/* Ensure that all interrupts are disabled initially */
for (i = 1; i < SWERV_PIC_MAX_ID; i++) {
swerv_pic_write(SWERV_PIC_meie(i), 0);
}
/* Set priority of each interrupt line to 0 initially */
for (i = 1; i < SWERV_PIC_MAX_ID; i++) {
swerv_pic_write(SWERV_PIC_meipl(i), 15);
}
/* Set property of each interrupt line to level-triggered/high */
for (i = 1; i < SWERV_PIC_MAX_ID; i++) {
swerv_pic_write(SWERV_PIC_meigwctrl(i), (0<<1)|(0<<0));
}
/* clear pending of each interrupt line */
for (i = 1; i < SWERV_PIC_MAX_ID; i++) {
swerv_pic_write(SWERV_PIC_meigwclr(i), 0);
}
/* No interrupts masked */
__asm__ swerv_pic_writecsr(meipt, 0);
__asm__ swerv_pic_writecsr(meicidpl, 0);
__asm__ swerv_pic_writecsr(meicurpl, 0);
/* Setup IRQ handler for SweRV PIC driver */
IRQ_CONNECT(RISCV_IRQ_MEXT,
0,
swerv_pic_irq_handler,
NULL,
0);
/* Enable IRQ for SweRV PIC driver */
irq_enable(RISCV_IRQ_MEXT);
return 0;
}
void arch_irq_enable(unsigned int irq)
{
uint32_t mie;
if (irq > RISCV_MAX_GENERIC_IRQ) {
swerv_pic_irq_enable(irq);
return;
}
/*
* CSR mie register is updated using atomic instruction csrrs
* (atomic read and set bits in CSR register)
*/
__asm__ volatile ("csrrs %0, mie, %1\n"
: "=r" (mie)
: "r" (1 << irq));
}
void arch_irq_disable(unsigned int irq)
{
uint32_t mie;
if (irq > RISCV_MAX_GENERIC_IRQ) {
swerv_pic_irq_disable(irq);
return;
}
/*
* Use atomic instruction csrrc to disable device interrupt in mie CSR.
* (atomic read and clear bits in CSR register)
*/
__asm__ volatile ("csrrc %0, mie, %1\n"
: "=r" (mie)
: "r" (1 << irq));
};
int arch_irq_is_enabled(unsigned int irq)
{
uint32_t mie;
if (irq > RISCV_MAX_GENERIC_IRQ) {
return swerv_pic_irq_is_enabled(irq);
}
__asm__ volatile ("csrr %0, mie" : "=r" (mie));
return !!(mie & (1 << irq));
}
DEVICE_DT_INST_DEFINE(0, swerv_pic_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_swerv_pic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,772 |
```c
/*
*
*/
#define DT_DRV_COMPAT nuvoton_npcx_miwu
/**
* @file
* @brief Nuvoton NPCX MIWU driver
*
* The device Multi-Input Wake-Up Unit (MIWU) supports the Nuvoton embedded
* controller (EC) to exit 'Sleep' or 'Deep Sleep' power state which allows chip
* has better power consumption. Also, it provides signal conditioning such as
* 'Level' and 'Edge' trigger type and grouping of external interrupt sources
* of NVIC. The NPCX series has three identical MIWU modules: MIWU0, MIWU1,
* MIWU2. Together, they support a total of over 140 internal and/or external
* wake-up input (WUI) sources.
*
* This driver uses device tree files to present the relationship between
* MIWU and the other devices in different npcx series. For npcx7 series,
* it include:
* 1. npcxn-miwus-wui-map.dtsi: it presents relationship between wake-up inputs
* (WUI) and its source device such as gpio, timer, eSPI VWs and so on.
* 2. npcxn-miwus-int-map.dtsi: it presents relationship between MIWU group
* and NVIC interrupt in npcx series. Please notice it isn't 1-to-1 mapping.
* For example, here is the mapping between miwu0's group a & d and IRQ7:
*
* map_miwu0_groups: {
* parent = <&miwu0>;
* group_ad0: group_ad0_map {
* irq = <7>;
* group_mask = <0x09>;
* };
* ...
* };
*
* It will connect IRQ 7 and intc_miwu_isr0() with the argument, group_mask,
* by IRQ_CONNECT() during driver initialization function. With group_mask,
* 0x09, the driver checks the pending bits of group a and group d in ISR.
* Then it will execute related callback functions if they have been
* registered properly.
*
* INCLUDE FILES: soc_miwu.h
*
*/
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <soc.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/irq_nextlevel.h>
#include <zephyr/drivers/gpio.h>
#include "soc_miwu.h"
#include "soc_gpio.h"
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(intc_miwu, LOG_LEVEL_ERR);
/* MIWU module instances */
#define NPCX_MIWU_DEV(inst) DEVICE_DT_INST_GET(inst),
static const struct device *const miwu_devs[] = {
DT_INST_FOREACH_STATUS_OKAY(NPCX_MIWU_DEV)
};
BUILD_ASSERT(ARRAY_SIZE(miwu_devs) == NPCX_MIWU_TABLE_COUNT,
"Size of miwu_devs array must equal to NPCX_MIWU_TABLE_COUNT");
/* Driver config */
struct intc_miwu_config {
/* miwu controller base address */
uintptr_t base;
/* index of miwu controller */
uint8_t index;
};
/* Driver data */
struct intc_miwu_data {
/* Callback functions list for each MIWU group */
sys_slist_t cb_list_grp[8];
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
uint8_t both_edge_pins[8];
struct k_spinlock lock;
#endif
};
BUILD_ASSERT(sizeof(struct miwu_io_params) == sizeof(gpio_port_pins_t),
"Size of struct miwu_io_params must equal to struct gpio_port_pins_t");
BUILD_ASSERT(offsetof(struct miwu_callback, io_cb.params) +
sizeof(struct miwu_io_params) == sizeof(struct gpio_callback),
"Failed in size check of miwu_callback and gpio_callback structures!");
BUILD_ASSERT(offsetof(struct miwu_callback, io_cb.params.cb_type) ==
offsetof(struct miwu_callback, dev_cb.params.cb_type),
"Failed in offset check of cb_type field of miwu_callback structure");
/* MIWU local functions */
static void intc_miwu_dispatch_isr(sys_slist_t *cb_list, uint8_t mask)
{
struct miwu_callback *cb, *tmp;
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(cb_list, cb, tmp, node) {
if (cb->io_cb.params.cb_type == NPCX_MIWU_CALLBACK_GPIO) {
if (BIT(cb->io_cb.params.wui.bit) & mask) {
__ASSERT(cb->io_cb.handler, "No GPIO callback handler!");
cb->io_cb.handler(
npcx_get_gpio_dev(cb->io_cb.params.gpio_port),
(struct gpio_callback *)cb,
cb->io_cb.params.pin_mask);
}
} else {
if (BIT(cb->dev_cb.params.wui.bit) & mask) {
__ASSERT(cb->dev_cb.handler, "No device callback handler!");
cb->dev_cb.handler(cb->dev_cb.params.source,
&cb->dev_cb.params.wui);
}
}
}
}
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
static void npcx_miwu_set_pseudo_both_edge(uint8_t table, uint8_t group, uint8_t bit)
{
const struct intc_miwu_config *config = miwu_devs[table]->config;
const uint32_t base = config->base;
uint8_t pmask = BIT(bit);
if (IS_BIT_SET(NPCX_WKST(base, group), bit)) {
/* Current signal level is high, set falling edge triger. */
NPCX_WKEDG(base, group) |= pmask;
} else {
/* Current signal level is low, set rising edge triger. */
NPCX_WKEDG(base, group) &= ~pmask;
}
}
#endif
static void intc_miwu_isr_pri(int wui_table, int wui_group)
{
const struct intc_miwu_config *config = miwu_devs[wui_table]->config;
struct intc_miwu_data *data = miwu_devs[wui_table]->data;
const uint32_t base = config->base;
uint8_t mask = NPCX_WKPND(base, wui_group) & NPCX_WKEN(base, wui_group);
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
uint8_t new_mask = mask;
while (new_mask != 0) {
uint8_t pending_bit = find_lsb_set(new_mask) - 1;
uint8_t pending_mask = BIT(pending_bit);
NPCX_WKPCL(base, wui_group) = pending_mask;
if ((data->both_edge_pins[wui_group] & pending_mask) != 0) {
npcx_miwu_set_pseudo_both_edge(wui_table, wui_group, pending_bit);
}
new_mask &= ~pending_mask;
};
#else
/* Clear pending bits before dispatch ISR */
if (mask) {
NPCX_WKPCL(base, wui_group) = mask;
}
#endif
/* Dispatch registered gpio isrs */
intc_miwu_dispatch_isr(&data->cb_list_grp[wui_group], mask);
}
/* Platform specific MIWU functions */
void npcx_miwu_irq_enable(const struct npcx_wui *wui)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
k_spinlock_key_t key;
struct intc_miwu_data *data = miwu_devs[wui->table]->data;
key = k_spin_lock(&data->lock);
#endif
NPCX_WKEN(base, wui->group) |= BIT(wui->bit);
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
if ((data->both_edge_pins[wui->group] & BIT(wui->bit)) != 0) {
npcx_miwu_set_pseudo_both_edge(wui->table, wui->group, wui->bit);
}
k_spin_unlock(&data->lock, key);
#endif
}
void npcx_miwu_irq_disable(const struct npcx_wui *wui)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
NPCX_WKEN(base, wui->group) &= ~BIT(wui->bit);
}
void npcx_miwu_io_enable(const struct npcx_wui *wui)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
NPCX_WKINEN(base, wui->group) |= BIT(wui->bit);
}
void npcx_miwu_io_disable(const struct npcx_wui *wui)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
NPCX_WKINEN(base, wui->group) &= ~BIT(wui->bit);
}
bool npcx_miwu_irq_get_state(const struct npcx_wui *wui)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
return IS_BIT_SET(NPCX_WKEN(base, wui->group), wui->bit);
}
bool npcx_miwu_irq_get_and_clear_pending(const struct npcx_wui *wui)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
k_spinlock_key_t key;
struct intc_miwu_data *data = miwu_devs[wui->table]->data;
#endif
bool pending = IS_BIT_SET(NPCX_WKPND(base, wui->group), wui->bit);
if (pending) {
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
key = k_spin_lock(&data->lock);
NPCX_WKPCL(base, wui->group) = BIT(wui->bit);
if ((data->both_edge_pins[wui->group] & BIT(wui->bit)) != 0) {
npcx_miwu_set_pseudo_both_edge(wui->table, wui->group, wui->bit);
}
k_spin_unlock(&data->lock, key);
#else
NPCX_WKPCL(base, wui->group) = BIT(wui->bit);
#endif
}
return pending;
}
int npcx_miwu_interrupt_configure(const struct npcx_wui *wui,
enum miwu_int_mode mode, enum miwu_int_trig trig)
{
const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
const uint32_t base = config->base;
uint8_t pmask = BIT(wui->bit);
int ret = 0;
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
struct intc_miwu_data *data = miwu_devs[wui->table]->data;
k_spinlock_key_t key;
#endif
/* Disable interrupt of wake-up input source before configuring it */
npcx_miwu_irq_disable(wui);
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
key = k_spin_lock(&data->lock);
data->both_edge_pins[wui->group] &= ~BIT(wui->bit);
#endif
/* Handle interrupt for level trigger */
if (mode == NPCX_MIWU_MODE_LEVEL) {
/* Set detection mode to level */
NPCX_WKMOD(base, wui->group) |= pmask;
switch (trig) {
/* Enable interrupting on level high */
case NPCX_MIWU_TRIG_HIGH:
NPCX_WKEDG(base, wui->group) &= ~pmask;
break;
/* Enable interrupting on level low */
case NPCX_MIWU_TRIG_LOW:
NPCX_WKEDG(base, wui->group) |= pmask;
break;
default:
ret = -EINVAL;
goto early_exit;
}
/* Handle interrupt for edge trigger */
} else {
/* Set detection mode to edge */
NPCX_WKMOD(base, wui->group) &= ~pmask;
switch (trig) {
/* Handle interrupting on falling edge */
case NPCX_MIWU_TRIG_LOW:
NPCX_WKAEDG(base, wui->group) &= ~pmask;
NPCX_WKEDG(base, wui->group) |= pmask;
break;
/* Handle interrupting on rising edge */
case NPCX_MIWU_TRIG_HIGH:
NPCX_WKAEDG(base, wui->group) &= ~pmask;
NPCX_WKEDG(base, wui->group) &= ~pmask;
break;
/* Handle interrupting on both edges */
case NPCX_MIWU_TRIG_BOTH:
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
NPCX_WKAEDG(base, wui->group) &= ~pmask;
data->both_edge_pins[wui->group] |= BIT(wui->bit);
#else
/* Enable any edge */
NPCX_WKAEDG(base, wui->group) |= pmask;
#endif
break;
default:
ret = -EINVAL;
goto early_exit;
}
}
/* Enable wake-up input sources */
NPCX_WKINEN(base, wui->group) |= pmask;
/*
* Clear pending bit since it might be set if WKINEN bit is
* changed.
*/
NPCX_WKPCL(base, wui->group) |= pmask;
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
if ((data->both_edge_pins[wui->group] & BIT(wui->bit)) != 0) {
npcx_miwu_set_pseudo_both_edge(wui->table, wui->group, wui->bit);
}
#endif
early_exit:
#ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
k_spin_unlock(&data->lock, key);
#endif
return ret;
}
void npcx_miwu_init_gpio_callback(struct miwu_callback *callback,
const struct npcx_wui *io_wui, int port)
{
/* Initialize WUI and GPIO settings in unused bits field */
callback->io_cb.params.wui.table = io_wui->table;
callback->io_cb.params.wui.bit = io_wui->bit;
callback->io_cb.params.gpio_port = port;
callback->io_cb.params.cb_type = NPCX_MIWU_CALLBACK_GPIO;
callback->io_cb.params.wui.group = io_wui->group;
}
void npcx_miwu_init_dev_callback(struct miwu_callback *callback,
const struct npcx_wui *dev_wui,
miwu_dev_callback_handler_t handler,
const struct device *source)
{
/* Initialize WUI and input device settings */
callback->dev_cb.params.wui.table = dev_wui->table;
callback->dev_cb.params.wui.group = dev_wui->group;
callback->dev_cb.params.wui.bit = dev_wui->bit;
callback->dev_cb.params.source = source;
callback->dev_cb.params.cb_type = NPCX_MIWU_CALLBACK_DEV;
callback->dev_cb.handler = handler;
}
int npcx_miwu_manage_callback(struct miwu_callback *cb, bool set)
{
struct npcx_wui *wui;
struct intc_miwu_data *data;
sys_slist_t *cb_list;
if (cb->io_cb.params.cb_type == NPCX_MIWU_CALLBACK_GPIO) {
wui = &cb->io_cb.params.wui;
} else {
wui = &cb->dev_cb.params.wui;
}
data = miwu_devs[wui->table]->data;
cb_list = &data->cb_list_grp[wui->group];
if (!sys_slist_is_empty(cb_list)) {
if (!sys_slist_find_and_remove(cb_list, &cb->node)) {
if (!set) {
return -EINVAL;
}
}
}
if (set) {
sys_slist_prepend(cb_list, &cb->node);
}
return 0;
}
/* MIWU driver registration */
#define NPCX_MIWU_ISR_FUNC(index) _CONCAT(intc_miwu_isr, index)
#define NPCX_MIWU_INIT_FUNC(inst) _CONCAT(intc_miwu_init, inst)
#define NPCX_MIWU_INIT_FUNC_DECL(inst) \
static int intc_miwu_init##inst(const struct device *dev)
/* MIWU ISR implementation */
#define NPCX_MIWU_ISR_FUNC_IMPL(inst) \
static void intc_miwu_isr##inst(void *arg) \
{ \
uint8_t grp_mask = (uint32_t)arg; \
int group = 0; \
\
/* Check all MIWU groups belong to the same irq */ \
do { \
if (grp_mask & 0x01) \
intc_miwu_isr_pri(inst, group); \
group++; \
grp_mask = grp_mask >> 1; \
\
} while (grp_mask != 0); \
}
/* MIWU init function implementation */
#define NPCX_MIWU_INIT_FUNC_IMPL(inst) \
static int intc_miwu_init##inst(const struct device *dev) \
{ \
int i; \
const struct intc_miwu_config *config = dev->config; \
const uint32_t base = config->base; \
\
/* Clear all MIWUs' pending and enable bits of MIWU device */ \
for (i = 0; i < NPCX_MIWU_GROUP_COUNT; i++) { \
NPCX_WKEN(base, i) = 0; \
NPCX_WKPCL(base, i) = 0xFF; \
} \
\
/* Config IRQ and MWIU group directly */ \
DT_FOREACH_CHILD(NPCX_DT_NODE_FROM_MIWU_MAP(inst), \
NPCX_DT_MIWU_IRQ_CONNECT_IMPL_CHILD_FUNC) \
return 0; \
} \
#define NPCX_MIWU_INIT(inst) \
NPCX_MIWU_INIT_FUNC_DECL(inst); \
\
static const struct intc_miwu_config miwu_config_##inst = { \
.base = DT_REG_ADDR(DT_NODELABEL(miwu##inst)), \
.index = DT_PROP(DT_NODELABEL(miwu##inst), index), \
}; \
struct intc_miwu_data miwu_data_##inst; \
\
DEVICE_DT_INST_DEFINE(inst, \
NPCX_MIWU_INIT_FUNC(inst), \
NULL, \
&miwu_data_##inst, &miwu_config_##inst, \
PRE_KERNEL_1, \
CONFIG_INTC_INIT_PRIORITY, NULL); \
\
NPCX_MIWU_ISR_FUNC_IMPL(inst) \
\
NPCX_MIWU_INIT_FUNC_IMPL(inst)
DT_INST_FOREACH_STATUS_OKAY(NPCX_MIWU_INIT)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_miwu.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,323 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/init.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(intc_it8xxx2, LOG_LEVEL_DBG);
#include <zephyr/sys/printk.h>
#include <zephyr/sw_isr_table.h>
#include "intc_ite_it8xxx2.h"
#define MAX_REGISR_IRQ_NUM 8
#define IVECT_OFFSET_WITH_IRQ 0x10
/* Interrupt number of INTC module */
static uint8_t intc_irq;
static volatile uint8_t *const reg_status[] = {
&ISR0, &ISR1, &ISR2, &ISR3,
&ISR4, &ISR5, &ISR6, &ISR7,
&ISR8, &ISR9, &ISR10, &ISR11,
&ISR12, &ISR13, &ISR14, &ISR15,
&ISR16, &ISR17, &ISR18, &ISR19,
&ISR20, &ISR21, &ISR22, &ISR23
};
static volatile uint8_t *const reg_enable[] = {
&IER0, &IER1, &IER2, &IER3,
&IER4, &IER5, &IER6, &IER7,
&IER8, &IER9, &IER10, &IER11,
&IER12, &IER13, &IER14, &IER15,
&IER16, &IER17, &IER18, &IER19,
&IER20, &IER21, &IER22, &IER23
};
/* edge/level trigger register */
static volatile uint8_t *const reg_ielmr[] = {
&IELMR0, &IELMR1, &IELMR2, &IELMR3,
&IELMR4, &IELMR5, &IELMR6, &IELMR7,
&IELMR8, &IELMR9, &IELMR10, &IELMR11,
&IELMR12, &IELMR13, &IELMR14, &IELMR15,
&IELMR16, &IELMR17, &IELMR18, &IELMR19,
&IELMR20, &IELMR21, &IELMR22, &IELMR23,
};
/* high/low trigger register */
static volatile uint8_t *const reg_ipolr[] = {
&IPOLR0, &IPOLR1, &IPOLR2, &IPOLR3,
&IPOLR4, &IPOLR5, &IPOLR6, &IPOLR7,
&IPOLR8, &IPOLR9, &IPOLR10, &IPOLR11,
&IPOLR12, &IPOLR13, &IPOLR14, &IPOLR15,
&IPOLR16, &IPOLR17, &IPOLR18, &IPOLR19,
&IPOLR20, &IPOLR21, &IPOLR22, &IPOLR23
};
#define IT8XXX2_IER_COUNT ARRAY_SIZE(reg_enable)
static uint8_t ier_setting[IT8XXX2_IER_COUNT];
void ite_intc_save_and_disable_interrupts(void)
{
volatile uint8_t _ier __unused;
/* Disable global interrupt for critical section */
unsigned int key = irq_lock();
/* Save and disable interrupts */
for (int i = 0; i < IT8XXX2_IER_COUNT; i++) {
ier_setting[i] = *reg_enable[i];
*reg_enable[i] = 0;
}
/*
* This load operation will guarantee the above modification of
* SOC's register can be seen by any following instructions.
* Note: Barrier instruction can not synchronize chip register,
* so we introduce workaround here.
*/
_ier = *reg_enable[IT8XXX2_IER_COUNT - 1];
irq_unlock(key);
}
void ite_intc_restore_interrupts(void)
{
/*
* Ensure the highest priority interrupt will be the first fired
* interrupt when soc is ready to go.
*/
unsigned int key = irq_lock();
/* Restore interrupt state */
for (int i = 0; i < IT8XXX2_IER_COUNT; i++) {
*reg_enable[i] = ier_setting[i];
}
irq_unlock(key);
}
void ite_intc_isr_clear(unsigned int irq)
{
uint32_t g, i;
volatile uint8_t *isr;
if (irq > CONFIG_NUM_IRQS) {
return;
}
g = irq / MAX_REGISR_IRQ_NUM;
i = irq % MAX_REGISR_IRQ_NUM;
isr = reg_status[g];
*isr = BIT(i);
}
void __soc_ram_code ite_intc_irq_enable(unsigned int irq)
{
uint32_t g, i;
volatile uint8_t *en;
if (irq > CONFIG_NUM_IRQS) {
return;
}
g = irq / MAX_REGISR_IRQ_NUM;
i = irq % MAX_REGISR_IRQ_NUM;
en = reg_enable[g];
/* critical section due to run a bit-wise OR operation */
unsigned int key = irq_lock();
SET_MASK(*en, BIT(i));
irq_unlock(key);
}
void __soc_ram_code ite_intc_irq_disable(unsigned int irq)
{
uint32_t g, i;
volatile uint8_t *en;
volatile uint8_t _ier __unused;
if (irq > CONFIG_NUM_IRQS) {
return;
}
g = irq / MAX_REGISR_IRQ_NUM;
i = irq % MAX_REGISR_IRQ_NUM;
en = reg_enable[g];
/* critical section due to run a bit-wise OR operation */
unsigned int key = irq_lock();
CLEAR_MASK(*en, BIT(i));
/*
* This load operation will guarantee the above modification of
* SOC's register can be seen by any following instructions.
*/
_ier = *en;
irq_unlock(key);
}
void ite_intc_irq_polarity_set(unsigned int irq, unsigned int flags)
{
uint32_t g, i;
volatile uint8_t *tri;
if ((irq > CONFIG_NUM_IRQS) || ((flags&IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)) {
return;
}
g = irq / MAX_REGISR_IRQ_NUM;
i = irq % MAX_REGISR_IRQ_NUM;
tri = reg_ipolr[g];
if ((flags&IRQ_TYPE_LEVEL_HIGH) || (flags&IRQ_TYPE_EDGE_RISING)) {
CLEAR_MASK(*tri, BIT(i));
} else {
SET_MASK(*tri, BIT(i));
}
tri = reg_ielmr[g];
if ((flags&IRQ_TYPE_LEVEL_LOW) || (flags&IRQ_TYPE_LEVEL_HIGH)) {
CLEAR_MASK(*tri, BIT(i));
} else {
SET_MASK(*tri, BIT(i));
}
}
int __soc_ram_code ite_intc_irq_is_enable(unsigned int irq)
{
uint32_t g, i;
volatile uint8_t *en;
if (irq > CONFIG_NUM_IRQS) {
return 0;
}
g = irq / MAX_REGISR_IRQ_NUM;
i = irq % MAX_REGISR_IRQ_NUM;
en = reg_enable[g];
return IS_MASK_SET(*en, BIT(i));
}
uint8_t __soc_ram_code ite_intc_get_irq_num(void)
{
return intc_irq;
}
bool __soc_ram_code ite_intc_no_irq(void)
{
return (IVECT == IVECT_OFFSET_WITH_IRQ);
}
uint8_t __soc_ram_code get_irq(void *arg)
{
ARG_UNUSED(arg);
/* wait until two equal interrupt values are read */
do {
/* Read interrupt number from interrupt vector register */
intc_irq = IVECT;
/*
* WORKAROUND: when the interrupt vector register (IVECT)
* isn't latched in a load operation, we read it again to make
* sure the value we got is the correct value.
*/
} while (intc_irq != IVECT);
/* determine interrupt number */
intc_irq -= IVECT_OFFSET_WITH_IRQ;
/*
* Look for pending interrupt if there's interrupt number 0 from
* the AIVECT register.
*/
if (intc_irq == 0) {
uint8_t int_pending;
for (int i = (IT8XXX2_IER_COUNT - 1); i >= 0; i--) {
int_pending = (*reg_status[i] & *reg_enable[i]);
if (int_pending != 0) {
intc_irq = (MAX_REGISR_IRQ_NUM * i) +
find_msb_set(int_pending) - 1;
LOG_DBG("Pending interrupt found: %d",
intc_irq);
LOG_DBG("CPU mepc: 0x%lx", csr_read(mepc));
break;
}
}
}
/* clear interrupt status */
ite_intc_isr_clear(intc_irq);
/* return interrupt number */
return intc_irq;
}
static void intc_irq0_handler(const void *arg)
{
ARG_UNUSED(arg);
LOG_DBG("SOC it8xxx2 Interrupt 0 handler");
}
void soc_interrupt_init(void)
{
#ifdef CONFIG_ZTEST
/*
* After flashed EC image, we needed to manually press the reset button
* on it8xxx2_evb, then run the test. Now, without pressing the button,
* we can disable debug mode and trigger a watchdog hard reset then
* run tests.
*/
struct wdt_it8xxx2_regs *const wdt_regs = WDT_IT8XXX2_REGS_BASE;
struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE;
if (gctrl_regs->GCTRL_DBGROS & IT8XXX2_GCTRL_SMB_DBGR) {
/* Disable debug mode through i2c */
IT8XXX2_SMB_SLVISELR |= BIT(4);
/* Enable ETWD reset */
wdt_regs->ETWCFG = 0;
wdt_regs->ET1PSR = IT8XXX2_WDT_ETPS_1P024_KHZ;
wdt_regs->ETWCFG = (IT8XXX2_WDT_EWDKEYEN | IT8XXX2_WDT_EWDSRC);
/* Enable ETWD hardware reset */
gctrl_regs->GCTRL_ETWDUARTCR |= IT8XXX2_GCTRL_ETWD_HW_RST_EN;
/* Trigger ETWD reset */
wdt_regs->EWDKEYR = 0;
/* Spin and wait for reboot */
while (1)
;
} else {
/* Disable ETWD hardware reset */
gctrl_regs->GCTRL_ETWDUARTCR &= ~IT8XXX2_GCTRL_ETWD_HW_RST_EN;
}
#endif
/* Ensure interrupts of soc are disabled at default */
for (int i = 0; i < ARRAY_SIZE(reg_enable); i++) {
*reg_enable[i] = 0;
}
/*
* WORKAROUND: In the it8xxx2 chip, the interrupt for INT0 is reserved.
* However, in some stress tests, the unhandled IRQ0 issue occurs.
* To prevent the system from going directly into kernel panic, we
* implemented a workaround by registering interrupt number 0 and doing
* nothing in the IRQ0 handler. The side effect of this solution is
* that when IRQ0 is triggered, it will take some time to execute the
* routine. There is no need to worry about missing interrupts because
* each IRQ's ISR is write-clear, and if the status is not cleared, it
* will continue to trigger.
*
* NOTE: After this workaround is merged, we will then find out under
* what circumstances the situation can be reproduced and fix it, and
* then remove the workaround.
*/
IRQ_CONNECT(0, 0, intc_irq0_handler, 0, 0);
/* Enable M-mode external interrupt */
csr_set(mie, MIP_MEIP);
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_ite_it8xxx2.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,618 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/drivers/interrupt_controller/riscv_clic.h>
#include <hal/nrf_vpr_clic.h>
void riscv_clic_irq_enable(uint32_t irq)
{
nrf_vpr_clic_int_enable_set(NRF_VPRCLIC, irq, true);
}
void riscv_clic_irq_disable(uint32_t irq)
{
nrf_vpr_clic_int_enable_set(NRF_VPRCLIC, irq, false);
}
int riscv_clic_irq_is_enabled(uint32_t irq)
{
return nrf_vpr_clic_int_enable_check(NRF_VPRCLIC, irq);
}
void riscv_clic_irq_priority_set(uint32_t irq, uint32_t pri, uint32_t flags)
{
nrf_vpr_clic_int_priority_set(NRF_VPRCLIC, irq, NRF_VPR_CLIC_INT_TO_PRIO(pri));
}
void riscv_clic_irq_set_pending(uint32_t irq)
{
nrf_vpr_clic_int_pending_set(NRF_VPRCLIC, irq);
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_nrfx_clic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 234 |
```c
/*
*
*/
#define DT_DRV_COMPAT openisa_rv32m1_intmux
/**
* @file
* @brief RV32M1 INTMUX (interrupt multiplexer) driver
*
* This driver provides support for level 2 interrupts on the RV32M1
* SoC using the INTMUX peripheral.
*
* Each of the RI5CY and ZERO-RISCY cores has an INTMUX peripheral;
* INTMUX0 is wired to the RI5CY event unit interrupt table, while
* INTMUX1 is used with ZERO-RISCY.
*
* For this reason, only a single intmux device is declared here. The
* dtsi for each core needs to set up the intmux device and any
* associated IRQ numbers to work with this driver.
*/
#include <zephyr/kernel.h>
#include <zephyr/devicetree/interrupt_controller.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/init.h>
#include <zephyr/irq.h>
#include <zephyr/irq_nextlevel.h>
#include <zephyr/sw_isr_table.h>
#include <soc.h>
#include <zephyr/dt-bindings/interrupt-controller/openisa-intmux.h>
/*
* CHn_VEC registers are offset by a value that is convenient if
* you're dealing with a Cortex-M NVIC vector table; we're not, so it
* needs to be subtracted out to get a useful value.
*/
#define VECN_OFFSET 48U
struct rv32m1_intmux_config {
INTMUX_Type *regs;
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
struct _isr_table_entry *isr_base;
};
#define DEV_REGS(dev) (((const struct rv32m1_intmux_config *)(dev->config))->regs)
/*
* <irq_nextlevel.h> API
*/
static void rv32m1_intmux_irq_enable(const struct device *dev, uint32_t irq)
{
INTMUX_Type *regs = DEV_REGS(dev);
uint32_t channel = rv32m1_intmux_channel(irq);
uint32_t line = rv32m1_intmux_line(irq);
regs->CHANNEL[channel].CHn_IER_31_0 |= BIT(line);
}
static void rv32m1_intmux_irq_disable(const struct device *dev, uint32_t irq)
{
INTMUX_Type *regs = DEV_REGS(dev);
uint32_t channel = rv32m1_intmux_channel(irq);
uint32_t line = rv32m1_intmux_line(irq);
regs->CHANNEL[channel].CHn_IER_31_0 &= ~BIT(line);
}
static uint32_t rv32m1_intmux_get_state(const struct device *dev)
{
INTMUX_Type *regs = DEV_REGS(dev);
size_t i;
for (i = 0; i < INTMUX_CHn_IER_31_0_COUNT; i++) {
if (regs->CHANNEL[i].CHn_IER_31_0) {
return 1;
}
}
return 0;
}
static int rv32m1_intmux_get_line_state(const struct device *dev,
unsigned int irq)
{
INTMUX_Type *regs = DEV_REGS(dev);
uint32_t channel = rv32m1_intmux_channel(irq);
uint32_t line = rv32m1_intmux_line(irq);
if ((regs->CHANNEL[channel].CHn_IER_31_0 & BIT(line)) != 0) {
return 1;
}
return 0;
}
/*
* IRQ handling.
*/
#define ISR_ENTRY(channel, line) \
((channel) * CONFIG_MAX_IRQ_PER_AGGREGATOR + line)
static void rv32m1_intmux_isr(const void *arg)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
const struct rv32m1_intmux_config *config = dev->config;
INTMUX_Type *regs = DEV_REGS(dev);
uint32_t channel = POINTER_TO_UINT(arg);
uint32_t line = (regs->CHANNEL[channel].CHn_VEC >> 2);
struct _isr_table_entry *isr_base = config->isr_base;
struct _isr_table_entry *entry;
/*
* Make sure the vector is valid, there is a note of page 1243~1244
* of chapter 36 INTMUX of RV32M1 RM,
* Note: Unlike the NVIC, the INTMUX does not latch pending source
* interrupts. This means that the INTMUX output channel ISRs must
* check for and handle a 0 value of the CHn_VEC register to
* account for spurious interrupts.
*/
if (line < VECN_OFFSET) {
return;
}
entry = &isr_base[ISR_ENTRY(channel, (line - VECN_OFFSET))];
entry->isr(entry->arg);
}
/*
* Instance and initialization
*/
static const struct irq_next_level_api rv32m1_intmux_apis = {
.intr_enable = rv32m1_intmux_irq_enable,
.intr_disable = rv32m1_intmux_irq_disable,
.intr_get_state = rv32m1_intmux_get_state,
.intr_get_line_state = rv32m1_intmux_get_line_state,
};
static const struct rv32m1_intmux_config rv32m1_intmux_cfg = {
.regs = (INTMUX_Type *)DT_INST_REG_ADDR(0),
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(0)),
.clock_subsys = UINT_TO_POINTER(DT_INST_CLOCKS_CELL(0, name)),
.isr_base = &_sw_isr_table[CONFIG_2ND_LVL_ISR_TBL_OFFSET],
};
static int rv32m1_intmux_init(const struct device *dev)
{
const struct rv32m1_intmux_config *config = dev->config;
INTMUX_Type *regs = DEV_REGS(dev);
size_t i;
if (!device_is_ready(config->clock_dev)) {
return -ENODEV;
}
/* Enable INTMUX clock. */
clock_control_on(config->clock_dev, config->clock_subsys);
/*
* Reset all channels, not just the ones we're configured to
* support. We don't want to continue to take level 2 IRQs
* enabled by bootloaders, for example.
*/
for (i = 0; i < INTMUX_CHn_CSR_COUNT; i++) {
regs->CHANNEL[i].CHn_CSR |= INTMUX_CHn_CSR_RST_MASK;
}
/* Connect and enable level 1 (channel) interrupts. */
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_0
IRQ_CONNECT(INTMUX_CH0_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(0), 0);
irq_enable(INTMUX_CH0_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_1
IRQ_CONNECT(INTMUX_CH1_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(1), 0);
irq_enable(INTMUX_CH1_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_2
IRQ_CONNECT(INTMUX_CH2_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(2), 0);
irq_enable(INTMUX_CH2_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_3
IRQ_CONNECT(INTMUX_CH3_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(3), 0);
irq_enable(INTMUX_CH3_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_4
IRQ_CONNECT(INTMUX_CH4_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(4), 0);
irq_enable(INTMUX_CH4_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_5
IRQ_CONNECT(INTMUX_CH5_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(5), 0);
irq_enable(INTMUX_CH5_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_6
IRQ_CONNECT(INTMUX_CH6_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(6), 0);
irq_enable(INTMUX_CH6_IRQ);
#endif
#ifdef CONFIG_RV32M1_INTMUX_CHANNEL_7
IRQ_CONNECT(INTMUX_CH7_IRQ, 0, rv32m1_intmux_isr,
UINT_TO_POINTER(7), 0);
irq_enable(INTMUX_CH7_IRQ);
#endif
return 0;
}
DEVICE_DT_INST_DEFINE(0, &rv32m1_intmux_init, NULL, NULL,
&rv32m1_intmux_cfg, PRE_KERNEL_1,
CONFIG_RV32M1_INTMUX_INIT_PRIORITY, &rv32m1_intmux_apis);
#define INTC_CHILD_IRQ_ENTRY_DEF(node_id) \
IRQ_PARENT_ENTRY_DEFINE(CONCAT(DT_DRV_COMPAT, _child_, DT_NODE_CHILD_IDX(node_id)), NULL, \
DT_IRQN(node_id), INTC_CHILD_ISR_TBL_OFFSET(node_id), \
DT_INTC_GET_AGGREGATOR_LEVEL(node_id));
DT_INST_FOREACH_CHILD_STATUS_OKAY(0, INTC_CHILD_IRQ_ENTRY_DEF);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_rv32m1_intmux.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,972 |
```unknown
# Intel VT-D interrupt remapping controller configuration
menuconfig INTEL_VTD_ICTL
bool "Intel VT-D interrupt remapping controller"
default y
depends on DT_HAS_INTEL_VT_D_ENABLED
depends on !BOARD_QEMU_X86_64 && ACPI && X86 && 64BIT && PCIE_MSI_MULTI_VECTOR
select CACHE_MANAGEMENT
help
Such interrupt remapping hardware is provided through Intel VT-D
technology. It's being used, currently, only for MSI/MSI-X
multi-vector support. If you have such PCIe device requiring
multi-vector support, you will need to enable this.
if INTEL_VTD_ICTL
config INTEL_VTD_ICTL_XAPIC_PASSTHROUGH
bool "XAPIC mode pass-through"
depends on !X2APIC
help
If XAPIC mode is enabled, it will avoid remapping all interrupts.
config INTEL_VTD_ICTL_NO_SRC_ID_CHECK
bool "Never check the source id"
help
Disable the source id check in IRTE.
config INTEL_VTD_ICTL_INIT_PRIORITY
int "Initialization priority"
default 40
help
This device should be initialized as soon as possible, before any
other device that would require it for MSI/MSI-X multi-vector support.
endif # INTEL_VTD_ICTL
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.intel_vtd | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 290 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_s32_wkpu
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/irq.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/drivers/interrupt_controller/intc_wkpu_nxp_s32.h>
/* NMI Status Flag Register */
#define WKPU_NSR 0x0
/* NMI Configuration Register */
#define WKPU_NCR 0x8
/* Wakeup/Interrupt Status Flag Register */
#define WKPU_WISR(n) (0x14 + 0x40 * (n))
/* Interrupt Request Enable Register */
#define WKPU_IRER(n) (0x18 + 0x40 * (n))
/* Wakeup Request Enable Register */
#define WKPU_WRER(n) (0x1c + 0x40 * (n))
/* Wakeup/Interrupt Rising-Edge Event Enable Register */
#define WKPU_WIREER(n) (0x28 + 0x40 * (n))
/* Wakeup/Interrupt Falling-Edge Event Enable Register */
#define WKPU_WIFEER(n) (0x2c + 0x40 * (n))
/* Wakeup/Interrupt Filter Enable Register */
#define WKPU_WIFER(n) (0x30 + 0x40 * (n))
/* Handy accessors */
#define REG_READ(r) sys_read32(config->base + (r))
#define REG_WRITE(r, v) sys_write32((v), config->base + (r))
struct wkpu_nxp_s32_config {
mem_addr_t base;
uint64_t filter_enable;
};
struct wkpu_nxp_s32_cb {
wkpu_nxp_s32_callback_t cb;
uint8_t pin;
void *data;
};
struct wkpu_nxp_s32_data {
struct wkpu_nxp_s32_cb *cb;
};
static void wkpu_nxp_s32_interrupt_handler(const struct device *dev)
{
const struct wkpu_nxp_s32_config *config = dev->config;
struct wkpu_nxp_s32_data *data = dev->data;
uint64_t pending = wkpu_nxp_s32_get_pending(dev);
uint64_t irq_mask;
int irq;
while (pending) {
irq_mask = LSB_GET(pending);
irq = u64_count_trailing_zeros(irq_mask);
/* Clear status flag */
REG_WRITE(WKPU_WISR(irq / 32U), REG_READ(WKPU_WISR(irq / 32U)) | irq_mask);
if (data->cb[irq].cb != NULL) {
data->cb[irq].cb(data->cb[irq].pin, data->cb[irq].data);
}
pending ^= irq_mask;
}
}
int wkpu_nxp_s32_set_callback(const struct device *dev, uint8_t irq, uint8_t pin,
wkpu_nxp_s32_callback_t cb, void *arg)
{
struct wkpu_nxp_s32_data *data = dev->data;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_WKPU_SOURCES_MAX);
if ((data->cb[irq].cb == cb) && (data->cb[irq].data == arg)) {
return 0;
}
if (data->cb[irq].cb) {
return -EBUSY;
}
data->cb[irq].cb = cb;
data->cb[irq].pin = pin;
data->cb[irq].data = arg;
return 0;
}
void wkpu_nxp_s32_unset_callback(const struct device *dev, uint8_t irq)
{
struct wkpu_nxp_s32_data *data = dev->data;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_WKPU_SOURCES_MAX);
data->cb[irq].cb = NULL;
data->cb[irq].pin = 0;
data->cb[irq].data = NULL;
}
void wkpu_nxp_s32_enable_interrupt(const struct device *dev, uint8_t irq,
enum wkpu_nxp_s32_trigger trigger)
{
const struct wkpu_nxp_s32_config *config = dev->config;
uint32_t mask = BIT(irq % 32U);
uint8_t reg_idx = irq / 32U;
uint32_t reg_val;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_WKPU_SOURCES_MAX);
/* Configure trigger */
reg_val = REG_READ(WKPU_WIREER(reg_idx));
if ((trigger == WKPU_NXP_S32_RISING_EDGE) || (trigger == WKPU_NXP_S32_BOTH_EDGES)) {
reg_val |= mask;
} else {
reg_val &= ~mask;
}
REG_WRITE(WKPU_WIREER(reg_idx), reg_val);
reg_val = REG_READ(WKPU_WIFEER(reg_idx));
if ((trigger == WKPU_NXP_S32_FALLING_EDGE) || (trigger == WKPU_NXP_S32_BOTH_EDGES)) {
reg_val |= mask;
} else {
reg_val &= ~mask;
}
REG_WRITE(WKPU_WIFEER(reg_idx), reg_val);
/* Clear status flag and unmask interrupt */
REG_WRITE(WKPU_WISR(reg_idx), REG_READ(WKPU_WISR(reg_idx)) | mask);
REG_WRITE(WKPU_IRER(reg_idx), REG_READ(WKPU_IRER(reg_idx)) | mask);
}
void wkpu_nxp_s32_disable_interrupt(const struct device *dev, uint8_t irq)
{
const struct wkpu_nxp_s32_config *config = dev->config;
uint32_t mask = BIT(irq % 32U);
uint8_t reg_idx = irq / 32U;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_WKPU_SOURCES_MAX);
/* Disable triggers */
REG_WRITE(WKPU_WIREER(reg_idx), REG_READ(WKPU_WIREER(reg_idx)) & ~mask);
REG_WRITE(WKPU_WIFEER(reg_idx), REG_READ(WKPU_WIFEER(reg_idx)) & ~mask);
/* Clear status flag and mask interrupt */
REG_WRITE(WKPU_WISR(reg_idx), REG_READ(WKPU_WISR(reg_idx)) | mask);
REG_WRITE(WKPU_IRER(reg_idx), REG_READ(WKPU_IRER(reg_idx)) & ~mask);
}
uint64_t wkpu_nxp_s32_get_pending(const struct device *dev)
{
const struct wkpu_nxp_s32_config *config = dev->config;
uint64_t flags;
flags = REG_READ(WKPU_WISR(0U)) & REG_READ(WKPU_IRER(0U));
if (CONFIG_NXP_S32_WKPU_SOURCES_MAX > 32U) {
flags |= ((uint64_t)(REG_READ(WKPU_WISR(1U)) & REG_READ(WKPU_IRER(1U)))) << 32U;
}
return flags;
}
static int wkpu_nxp_s32_init(const struct device *dev)
{
const struct wkpu_nxp_s32_config *config = dev->config;
/* Disable triggers, clear status flags and mask all interrupts */
REG_WRITE(WKPU_WIREER(0U), 0U);
REG_WRITE(WKPU_WIFEER(0U), 0U);
REG_WRITE(WKPU_WISR(0U), 0xffffffff);
REG_WRITE(WKPU_IRER(0U), 0U);
/* Configure glitch filters */
REG_WRITE(WKPU_WIFER(0U), (uint32_t)config->filter_enable);
if (CONFIG_NXP_S32_WKPU_SOURCES_MAX > 32U) {
REG_WRITE(WKPU_WIREER(1U), 0U);
REG_WRITE(WKPU_WIFEER(1U), 0U);
REG_WRITE(WKPU_WISR(1U), 0xffffffff);
REG_WRITE(WKPU_IRER(1U), 0U);
REG_WRITE(WKPU_WIFER(1U), (uint32_t)(config->filter_enable >> 32U));
}
return 0;
}
#define WKPU_NXP_S32_FILTER_CONFIG(idx, n) \
COND_CODE_1(DT_PROP(DT_INST_CHILD(n, irq_##idx), filter_enable), (BIT(idx)), (0U))
#define WKPU_NXP_S32_INIT_DEVICE(n) \
static const struct wkpu_nxp_s32_config wkpu_nxp_s32_conf_##n = { \
.base = DT_INST_REG_ADDR(n), \
.filter_enable = LISTIFY(CONFIG_NXP_S32_WKPU_SOURCES_MAX, \
WKPU_NXP_S32_FILTER_CONFIG, (|), n), \
}; \
static struct wkpu_nxp_s32_cb wkpu_nxp_s32_cb_##n[CONFIG_NXP_S32_WKPU_SOURCES_MAX]; \
static struct wkpu_nxp_s32_data wkpu_nxp_s32_data_##n = { \
.cb = wkpu_nxp_s32_cb_##n, \
}; \
static int wkpu_nxp_s32_init_##n(const struct device *dev) \
{ \
int err; \
\
err = wkpu_nxp_s32_init(dev); \
if (err) { \
return err; \
} \
\
IRQ_CONNECT(DT_INST_IRQ(n, irq), DT_INST_IRQ(n, priority), \
wkpu_nxp_s32_interrupt_handler, DEVICE_DT_INST_GET(n), \
COND_CODE_1(CONFIG_GIC, (DT_INST_IRQ(n, flags)), (0U))); \
irq_enable(DT_INST_IRQ(n, irq)); \
\
return 0; \
} \
DEVICE_DT_INST_DEFINE(n, wkpu_nxp_s32_init_##n, NULL, &wkpu_nxp_s32_data_##n, \
&wkpu_nxp_s32_conf_##n, PRE_KERNEL_2, CONFIG_INTC_INIT_PRIORITY, \
NULL);
DT_INST_FOREACH_STATUS_OKAY(WKPU_NXP_S32_INIT_DEVICE)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_wkpu_nxp_s32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,193 |
```unknown
config HAS_ITE_INTC
bool
help
This option is selected when ITE_IT8XXX2_INTC or
ITE_IT8XXX2_INTC_V2 is enabled.
config ITE_IT8XXX2_INTC
def_bool DT_HAS_ITE_IT8XXX2_INTC_ENABLED
depends on DT_HAS_ITE_IT8XXX2_INTC_ENABLED
select HAS_ITE_INTC
help
Configures the maximum number of clients allowed per shared
instance of the shared interrupt driver. To conserve RAM set
this value to the lowest practical value.
this software interrupt default set on by device tree.
config ITE_IT8XXX2_INTC_V2
def_bool DT_HAS_ITE_IT8XXX2_INTC_V2_ENABLED
depends on DT_HAS_ITE_IT8XXX2_INTC_V2_ENABLED
select HAS_ITE_INTC
help
This option enables the interrupt controller for IT82XX2 family.
config ITE_IT8XXX2_WUC
bool "ITE it8xxx2 Wakeup controller (WUC) interface"
default y
depends on DT_HAS_ITE_IT8XXX2_WUC_ENABLED
help
This option enables the wakeup controller interface for IT8XXX2
family.
This is required for KSCAN, UART, eSPI, GPIO etc., interrupt support.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.it8xxx2 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 281 |
```objective-c
/* ioapic_priv.h - private IOAPIC APIs */
/*
*
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_IOAPIC_PRIV_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_IOAPIC_PRIV_H_
/* IO APIC direct register offsets */
#define IOAPIC_IND 0x00 /* Index Register */
#define IOAPIC_DATA 0x10 /* IO window (data) - pc.h */
#define IOAPIC_IRQPA 0x20 /* IRQ Pin Assertion Register */
#define IOAPIC_EOI 0x40 /* EOI Register */
/* IO APIC indirect register offset */
#define IOAPIC_ID 0x00 /* IOAPIC ID */
#define IOAPIC_VERS 0x01 /* IOAPIC Version */
#define IOAPIC_ARB 0x02 /* IOAPIC Arbitration ID */
#define IOAPIC_BOOT 0x03 /* IOAPIC Boot Configuration */
#define IOAPIC_REDTBL 0x10 /* Redirection Table (24 * 64bit) */
/* Interrupt delivery type */
#define IOAPIC_DT_APIC 0x0 /* APIC serial bus */
#define IOAPIC_DT_FS 0x1 /* Front side bus message*/
/* Version register bits */
#define IOAPIC_MRE_MASK 0x00ff0000 /* Max Red. entry mask */
#define IOAPIC_MRE_POS 16
#define IOAPIC_PRQ 0x00008000 /* this has IRQ reg */
#define IOAPIC_VERSION 0x000000ff /* version number */
/* Redirection table entry bits: upper 32 bit */
#define IOAPIC_DESTINATION 0xff000000
/* Redirection table entry bits: lower 32 bit */
#define IOAPIC_VEC_MASK 0x000000ff
/* VTD related macros */
#define IOAPIC_VTD_REMAP_FORMAT BIT(16)
/* We care only about the first 14 bits.
* The 15th bits is in the first 32bits of RTE but since
* we don't go up to that value, let's ignore it.
*/
#define IOAPIC_VTD_INDEX(index) (index << 17)
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_IOAPIC_PRIV_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_ioapic_priv.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 481 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_CAVS_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_CAVS_H_
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*cavs_ictl_config_irq_t)(const struct device *port);
struct cavs_ictl_config {
uint32_t irq_num;
uint32_t isr_table_offset;
cavs_ictl_config_irq_t config_func;
};
struct cavs_ictl_runtime {
uint32_t base_addr;
};
struct cavs_registers {
uint32_t disable_il; /* il_msd - offset 0x00 */
uint32_t enable_il; /* il_mcd - offset 0x04 */
uint32_t disable_state_il; /* il_md - offset 0x08 */
uint32_t status_il; /* il_sd - offset 0x0C */
};
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_CAVS_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_cavs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 226 |
```c
/*
*
*/
#define DT_DRV_COMPAT ite_it8xxx2_wuc
#include <zephyr/device.h>
#include <zephyr/drivers/interrupt_controller/wuc_ite_it8xxx2.h>
#include <zephyr/dt-bindings/interrupt-controller/it8xxx2-wuc.h>
#include <zephyr/kernel.h>
#include <soc.h>
#include <soc_common.h>
#include <stdlib.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(wuc_ite_it8xxx2, CONFIG_INTC_LOG_LEVEL);
/* Driver config */
struct it8xxx2_wuc_cfg {
/* WUC wakeup edge mode register */
uint8_t *reg_wuemr;
/* WUC wakeup edge sense register */
uint8_t *reg_wuesr;
/* WUC wakeup enable register */
uint8_t *reg_wuenr;
/* WUC wakeup both edge mode register */
uint8_t *reg_wubemr;
};
void it8xxx2_wuc_enable(const struct device *dev, uint8_t mask)
{
const struct it8xxx2_wuc_cfg *config = dev->config;
volatile uint8_t *reg_wuenr = config->reg_wuenr;
/*
* WUC group only 1, 3, and 4 have enable/disable register,
* others are always enabled.
*/
if (reg_wuenr == IT8XXX2_WUC_UNUSED_REG) {
return;
}
/* Enable wakeup interrupt of the pin */
*reg_wuenr |= mask;
}
void it8xxx2_wuc_disable(const struct device *dev, uint8_t mask)
{
const struct it8xxx2_wuc_cfg *config = dev->config;
volatile uint8_t *reg_wuenr = config->reg_wuenr;
/*
* WUC group only 1, 3, and 4 have enable/disable register,
* others are always enabled.
*/
if (reg_wuenr == IT8XXX2_WUC_UNUSED_REG) {
return;
}
/* Disable wakeup interrupt of the pin */
*reg_wuenr &= ~mask;
}
void it8xxx2_wuc_clear_status(const struct device *dev, uint8_t mask)
{
const struct it8xxx2_wuc_cfg *config = dev->config;
volatile uint8_t *reg_wuesr = config->reg_wuesr;
if (reg_wuesr == IT8XXX2_WUC_UNUSED_REG) {
return;
}
/* W/C wakeup interrupt status of the pin */
*reg_wuesr = mask;
}
void it8xxx2_wuc_set_polarity(const struct device *dev, uint8_t mask, uint32_t flags)
{
const struct it8xxx2_wuc_cfg *config = dev->config;
volatile uint8_t *reg_wuemr = config->reg_wuemr;
volatile uint8_t *reg_wubemr = config->reg_wubemr;
if (reg_wuemr == IT8XXX2_WUC_UNUSED_REG) {
return;
}
/* Set wakeup interrupt edge trigger mode of the pin */
if ((flags & WUC_TYPE_EDGE_BOTH) == WUC_TYPE_EDGE_RISING) {
*reg_wubemr &= ~mask;
*reg_wuemr &= ~mask;
} else if ((flags & WUC_TYPE_EDGE_BOTH) == WUC_TYPE_EDGE_FALLING) {
*reg_wubemr &= ~mask;
*reg_wuemr |= mask;
} else {
/* Both edge trigger mode */
*reg_wubemr |= mask;
}
}
#define IT8XXX2_WUC_INIT(inst) \
\
static const struct it8xxx2_wuc_cfg it8xxx2_wuc_cfg_##inst = { \
.reg_wuemr = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(inst, 0), \
.reg_wuesr = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(inst, 1), \
.reg_wuenr = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(inst, 2), \
.reg_wubemr = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(inst, 3), \
}; \
\
DEVICE_DT_INST_DEFINE(inst, \
NULL, \
NULL, \
NULL, \
&it8xxx2_wuc_cfg_##inst, \
PRE_KERNEL_1, \
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS, \
NULL);
DT_INST_FOREACH_STATUS_OKAY(IT8XXX2_WUC_INIT)
``` | /content/code_sandbox/drivers/interrupt_controller/wuc_ite_it8xxx2.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 997 |
```c
*
*/
#define DT_DRV_COMPAT ti_vim
#include <stdint.h>
#include <zephyr/arch/arm/irq.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/interrupt_controller/intc_vim.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/util_macro.h>
LOG_MODULE_REGISTER(vim);
unsigned int z_vim_irq_get_active(void)
{
uint32_t irq_group_num, irq_bit_num;
uint32_t actirq, vec_addr;
/* Reading IRQVEC register, ACTIRQ gets loaded with valid IRQ values */
vec_addr = sys_read32(VIM_IRQVEC);
/* ACTIRQ register should be read only after reading IRQVEC register */
actirq = sys_read32(VIM_ACTIRQ);
/* Check if the irq number is valid, else return invalid irq number.
* which will be considered as spurious interrupt
*/
if ((actirq & (VIM_ACTIRQ_VALID_MASK)) == 0) {
return CONFIG_NUM_IRQS + 1;
}
irq_group_num = VIM_GET_IRQ_GROUP_NUM(actirq & VIM_PRIIRQ_NUM_MASK);
irq_bit_num = VIM_GET_IRQ_BIT_NUM(actirq & VIM_PRIIRQ_NUM_MASK);
/* Ack the interrupt in IRQSTS register */
sys_write32(BIT(irq_bit_num), VIM_IRQSTS(irq_group_num));
if (irq_group_num > VIM_MAX_GROUP_NUM) {
return (CONFIG_NUM_IRQS + 1);
}
return (actirq & VIM_ACTIRQ_NUM_MASK);
}
void z_vim_irq_eoi(unsigned int irq)
{
sys_write32(0, VIM_IRQVEC);
}
void z_vim_irq_init(void)
{
uint32_t num_of_irqs = sys_read32(VIM_INFO_INTERRUPTS_MASK);
LOG_DBG("VIM: Number of IRQs = %u\n", num_of_irqs);
}
void z_vim_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{
uint32_t irq_group_num, irq_bit_num, regval;
if (irq > CONFIG_NUM_IRQS || prio > VIM_PRI_INT_MAX ||
(flags != IRQ_TYPE_EDGE && flags != IRQ_TYPE_LEVEL)) {
LOG_ERR("%s: Invalid argument irq = %u prio = %u flags = %u\n",
__func__, irq, prio, flags);
return;
}
sys_write8(prio, VIM_PRI_INT(irq));
irq_group_num = VIM_GET_IRQ_GROUP_NUM(irq);
irq_bit_num = VIM_GET_IRQ_BIT_NUM(irq);
regval = sys_read32(VIM_INTTYPE(irq_group_num));
if (flags == IRQ_TYPE_EDGE) {
regval |= (BIT(irq_bit_num));
} else {
regval &= ~(BIT(irq_bit_num));
}
sys_write32(regval, VIM_INTTYPE(irq_group_num));
}
void z_vim_irq_enable(unsigned int irq)
{
uint32_t irq_group_num, irq_bit_num;
if (irq > CONFIG_NUM_IRQS) {
LOG_ERR("%s: Invalid irq number = %u\n", __func__, irq);
return;
}
irq_group_num = VIM_GET_IRQ_GROUP_NUM(irq);
irq_bit_num = VIM_GET_IRQ_BIT_NUM(irq);
sys_write32(BIT(irq_bit_num), VIM_INTR_EN_SET(irq_group_num));
}
void z_vim_irq_disable(unsigned int irq)
{
uint32_t irq_group_num, irq_bit_num;
if (irq > CONFIG_NUM_IRQS) {
LOG_ERR("%s: Invalid irq number = %u\n", __func__, irq);
return;
}
irq_group_num = VIM_GET_IRQ_GROUP_NUM(irq);
irq_bit_num = VIM_GET_IRQ_BIT_NUM(irq);
sys_write32(BIT(irq_bit_num), VIM_INTR_EN_CLR(irq_group_num));
}
int z_vim_irq_is_enabled(unsigned int irq)
{
uint32_t irq_group_num, irq_bit_num, regval;
if (irq > CONFIG_NUM_IRQS) {
LOG_ERR("%s: Invalid irq number = %u\n", __func__, irq);
return -EINVAL;
}
irq_group_num = VIM_GET_IRQ_GROUP_NUM(irq);
irq_bit_num = VIM_GET_IRQ_BIT_NUM(irq);
regval = sys_read32(VIM_INTR_EN_SET(irq_group_num));
return !!(regval & (BIT(irq_bit_num)));
}
void z_vim_arm_enter_irq(int irq)
{
uint32_t irq_group_num, irq_bit_num;
if (irq > CONFIG_NUM_IRQS) {
LOG_ERR("%s: Invalid irq number = %u\n", __func__, irq);
return;
}
irq_group_num = VIM_GET_IRQ_GROUP_NUM(irq);
irq_bit_num = VIM_GET_IRQ_BIT_NUM(irq);
sys_write32(BIT(irq_bit_num), VIM_RAW(irq_group_num));
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_vim.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,067 |
```c
/*
*
*/
/*
* This is a driver for the GRLIB IRQMP interrupt controller common in LEON
* systems.
*
* Interrupt level 1..15 are SPARC interrupts. Interrupt level 16..31, if
* implemented in the interrupt controller, are IRQMP "extended interrupts".
*
* For more information about IRQMP, see the GRLIB IP Core User's Manual.
*/
#define DT_DRV_COMPAT gaisler_irqmp
#include <zephyr/kernel.h>
#include <zephyr/device.h>
/*
* Register description for IRQMP and IRQAMP interrupt controllers
* IRQMP - Multiprocessor Interrupt Controller
* IRQ(A)MP - Multiprocessor Interrupt Controller with extended ASMP support
*/
#define IRQMP_NCPU_MAX 16
struct irqmp_regs {
uint32_t ilevel; /* 0x00 */
uint32_t ipend; /* 0x04 */
uint32_t iforce0; /* 0x08 */
uint32_t iclear; /* 0x0c */
uint32_t mpstat; /* 0x10 */
uint32_t brdlst; /* 0x14 */
uint32_t errstat; /* 0x18 */
uint32_t wdogctrl; /* 0x1c */
uint32_t asmpctrl; /* 0x20 */
uint32_t icselr[2]; /* 0x24 */
uint32_t reserved2c; /* 0x2c */
uint32_t reserved30; /* 0x30 */
uint32_t reserved34; /* 0x34 */
uint32_t reserved38; /* 0x38 */
uint32_t reserved3c; /* 0x3c */
uint32_t pimask[IRQMP_NCPU_MAX]; /* 0x40 */
uint32_t piforce[IRQMP_NCPU_MAX]; /* 0x80 */
uint32_t pextack[IRQMP_NCPU_MAX]; /* 0xc0 */
};
#define IRQMP_PEXTACK_EID (0x1f << 0)
static volatile struct irqmp_regs *get_irqmp_regs(void)
{
return (struct irqmp_regs *) DT_INST_REG_ADDR(0);
}
static int get_irqmp_eirq(void)
{
return DT_INST_PROP(0, eirq);
}
void arch_irq_enable(unsigned int source)
{
volatile struct irqmp_regs *regs = get_irqmp_regs();
volatile uint32_t *pimask = ®s->pimask[0];
const uint32_t setbit = (1U << source);
unsigned int key;
key = arch_irq_lock();
*pimask |= setbit;
arch_irq_unlock(key);
}
void arch_irq_disable(unsigned int source)
{
volatile struct irqmp_regs *regs = get_irqmp_regs();
volatile uint32_t *pimask = ®s->pimask[0];
const uint32_t keepbits = ~(1U << source);
unsigned int key;
key = arch_irq_lock();
*pimask &= keepbits;
arch_irq_unlock(key);
}
int arch_irq_is_enabled(unsigned int source)
{
volatile struct irqmp_regs *regs = get_irqmp_regs();
volatile uint32_t *pimask = ®s->pimask[0];
return !!(*pimask & (1U << source));
}
int z_sparc_int_get_source(int irl)
{
volatile struct irqmp_regs *regs = get_irqmp_regs();
const int eirq = get_irqmp_eirq();
int source;
if ((eirq != 0) && (irl == eirq)) {
source = regs->pextack[0] & IRQMP_PEXTACK_EID;
if (source == 0) {
source = irl;
}
} else {
source = irl;
}
return source;
}
static int irqmp_init(const struct device *dev)
{
volatile struct irqmp_regs *regs = get_irqmp_regs();
regs->ilevel = 0;
regs->ipend = 0;
regs->iforce0 = 0;
regs->pimask[0] = 0;
regs->piforce[0] = 0xfffe0000;
return 0;
}
DEVICE_DT_INST_DEFINE(0, irqmp_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_irqmp.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 945 |
```unknown
config PLIC
bool "Platform Level Interrupt Controller (PLIC)"
default y
depends on DT_HAS_SIFIVE_PLIC_1_0_0_ENABLED
select MULTI_LEVEL_INTERRUPTS
select 2ND_LEVEL_INTERRUPTS
help
Platform Level Interrupt Controller provides support
for external interrupt lines defined by the RISC-V SoC.
if PLIC
config PLIC_SHELL
bool "PLIC shell commands"
depends on SHELL
help
Enable additional shell commands useful for debugging.
Caution: This can use quite a bit of RAM (PLICs * IRQs * sizeof(uint16_t)).
endif # PLIC
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.plic | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 137 |
```objective-c
/*
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_INTEL_VTD_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_INTEL_VTD_H_
#define VTD_INT_SHV BIT(3)
#define VTD_INT_FORMAT BIT(4)
/* We don't care about int_idx[15], since the size is fixed to 256,
* it's always 0
*/
#define VTD_MSI_MAP(int_idx, shv) \
((0x0FEE00000U) | (int_idx << 5) | shv | VTD_INT_FORMAT)
/* Interrupt Remapping Table Entry (IRTE) for Remapped Interrupts */
union vtd_irte {
struct irte_parts {
uint64_t low;
uint64_t high;
} parts;
struct irte_bits {
uint64_t present : 1;
uint64_t fpd : 1;
uint64_t dst_mode : 1;
uint64_t redirection_hint : 1;
uint64_t trigger_mode : 1;
uint64_t delivery_mode : 3;
uint64_t available : 4;
uint64_t _reserved_0 : 3;
uint64_t irte_mode : 1;
uint64_t vector : 8;
uint64_t _reserved_1 : 8;
uint64_t dst_id : 32;
uint64_t src_id : 16;
uint64_t src_id_qualifier : 2;
uint64_t src_validation_type : 2;
uint64_t _reserved : 44;
} bits __packed;
};
/* The table must be 4KB aligned, which is exactly 256 entries.
* And since we allow only 256 entries as a maximum: let's align to it.
*/
#define IRTE_NUM 256
#define IRTA_SIZE 7 /* size = 2^(X+1) where IRTA_SIZE is X 2^8 = 256 */
#define QI_NUM 256 /* Which is the minimal number we can set for the queue */
#define QI_SIZE 0 /* size = 2^(X+8) where QI_SIZE is X: 2^8 = 256 */
#define QI_WIDTH 128
struct qi_descriptor {
uint64_t low;
uint64_t high;
};
#define QI_TYPE_ICC 0x1UL
union qi_icc_descriptor {
struct qi_descriptor desc;
struct icc_bits {
uint64_t type : 4;
uint64_t granularity : 2;
uint64_t _reserved_0 : 3;
uint64_t zero : 3;
uint64_t _reserved_1 : 4;
uint64_t domain_id : 16;
uint64_t source_id : 16;
uint64_t function_mask : 2;
uint64_t _reserved_2 : 14;
uint64_t reserved;
} icc __packed;
};
#define QI_TYPE_IEC 0x4UL
union qi_iec_descriptor {
struct qi_descriptor desc;
struct iec_bits {
uint64_t type : 4;
uint64_t granularity : 1;
uint64_t _reserved_0 : 4;
uint64_t zero : 3;
uint64_t _reserved_1 : 15;
uint64_t index_mask : 5;
uint64_t interrupt_index: 16;
uint64_t _reserved_2 : 16;
uint64_t reserved;
} iec __packed;
};
#define QI_TYPE_WAIT 0x5UL
union qi_wait_descriptor {
struct qi_descriptor desc;
struct wait_bits {
uint64_t type : 4;
uint64_t interrupt_flag : 1;
uint64_t status_write : 1;
uint64_t fence_flag : 1;
uint64_t page_req_drain : 1;
uint64_t _reserved_0 : 1;
uint64_t zero : 3;
uint64_t _reserved_1 : 20;
uint64_t status_data : 32;
uint64_t reserved : 2;
uint64_t address : 62;
} wait __packed;
};
#define QI_WAIT_STATUS_INCOMPLETE 0x0UL
#define QI_WAIT_STATUS_COMPLETE 0x1UL
/* Arbitrary wait counter limit */
#define QI_WAIT_COUNT_LIMIT 100
struct vtd_ictl_data {
DEVICE_MMIO_RAM;
union vtd_irte irte[IRTE_NUM] __aligned(0x1000);
struct qi_descriptor qi[QI_NUM] __aligned(0x1000);
int irqs[IRTE_NUM];
int vectors[IRTE_NUM];
bool msi[IRTE_NUM];
int irte_num_used;
unsigned int fault_irq;
uintptr_t fault_record_reg;
uint16_t fault_record_num;
uint16_t qi_tail;
uint8_t fault_vector;
bool pwc;
};
struct vtd_ictl_cfg {
DEVICE_MMIO_ROM;
};
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_INTEL_VTD_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_intel_vtd.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,152 |
```unknown
config NXP_IRQSTEER
bool "IRQ_STEER interrupt controller for NXP chips"
default y
depends on DT_HAS_NXP_IRQSTEER_INTC_ENABLED
depends on MULTI_LEVEL_INTERRUPTS
depends on XTENSA
help
The IRQSTEER INTC provides support for MUX-ing
multiple interrupts from peripheral to one or
more CPU interrupt lines. This is used for CPUs
such as XTENSA DSPs.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.nxp_irqsteer | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 103 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <soc.h>
#include <zephyr/drivers/interrupt_controller/intc_esp32.h>
#include <esp_memory_utils.h>
#include <esp_attr.h>
#include <esp_cpu.h>
#include <esp_private/rtc_ctrl.h>
#include <limits.h>
#include <assert.h>
#include <soc/soc.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(esp32_intc, CONFIG_LOG_DEFAULT_LEVEL);
#define ETS_INTERNAL_TIMER0_INTR_NO 6
#define ETS_INTERNAL_TIMER1_INTR_NO 15
#define ETS_INTERNAL_TIMER2_INTR_NO 16
#define ETS_INTERNAL_SW0_INTR_NO 7
#define ETS_INTERNAL_SW1_INTR_NO 29
#define ETS_INTERNAL_PROFILING_INTR_NO 11
#define VECDESC_FL_RESERVED (1 << 0)
#define VECDESC_FL_INIRAM (1 << 1)
#define VECDESC_FL_SHARED (1 << 2)
#define VECDESC_FL_NONSHARED (1 << 3)
/*
* Define this to debug the choices made when allocating the interrupt. This leads to much debugging
* output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
* being triggered, that is why it is separate from the normal LOG* scheme.
*/
#ifdef CONFIG_INTC_ESP32_DECISIONS_LOG
# define INTC_LOG(...) LOG_INF(__VA_ARGS__)
#else
# define INTC_LOG(...) do {} while (false)
#endif
/* Typedef for C-callable interrupt handler function */
typedef void (*intc_handler_t)(void *);
typedef void (*intc_dyn_handler_t)(const void *);
/* shared critical section context */
static int esp_intc_csec;
static inline void esp_intr_lock(void)
{
esp_intc_csec = irq_lock();
}
static inline void esp_intr_unlock(void)
{
irq_unlock(esp_intc_csec);
}
/*
* Interrupt handler table and unhandled interrupt routine. Duplicated
* from xtensa_intr.c... it's supposed to be private, but we need to look
* into it in order to see if someone allocated an int using
* set_interrupt_handler.
*/
struct intr_alloc_table_entry {
void (*handler)(void *arg);
void *arg;
};
/* Default handler for unhandled interrupts. */
void IRAM_ATTR default_intr_handler(void *arg)
{
esp_rom_printf("Unhandled interrupt %d on cpu %d!\n", (int)arg, esp_cpu_get_core_id());
}
static struct intr_alloc_table_entry intr_alloc_table[ESP_INTC_INTS_NUM * CONFIG_MP_MAX_NUM_CPUS];
static void set_interrupt_handler(int n, intc_handler_t f, void *arg)
{
irq_disable(n);
intr_alloc_table[n * CONFIG_MP_MAX_NUM_CPUS].handler = f;
irq_connect_dynamic(n, 0, (intc_dyn_handler_t)f, arg, 0);
}
/* Linked list of vector descriptions, sorted by cpu.intno value */
static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */
/* This bitmask has an 1 if the int should be disabled when the flash is disabled. */
static uint32_t non_iram_int_mask[CONFIG_MP_MAX_NUM_CPUS];
/* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */
static uint32_t non_iram_int_disabled[CONFIG_MP_MAX_NUM_CPUS];
static bool non_iram_int_disabled_flag[CONFIG_MP_MAX_NUM_CPUS];
/*
* Inserts an item into vector_desc list so that the list is sorted
* with an incrementing cpu.intno value.
*/
static void insert_vector_desc(struct vector_desc_t *to_insert)
{
struct vector_desc_t *vd = vector_desc_head;
struct vector_desc_t *prev = NULL;
while (vd != NULL) {
if (vd->cpu > to_insert->cpu) {
break;
}
if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) {
break;
}
prev = vd;
vd = vd->next;
}
if ((vector_desc_head == NULL) || (prev == NULL)) {
/* First item */
to_insert->next = vd;
vector_desc_head = to_insert;
} else {
prev->next = to_insert;
to_insert->next = vd;
}
}
/* Returns a vector_desc entry for an intno/cpu, or NULL if none exists. */
static struct vector_desc_t *find_desc_for_int(int intno, int cpu)
{
struct vector_desc_t *vd = vector_desc_head;
while (vd != NULL) {
if (vd->cpu == cpu && vd->intno == intno) {
break;
}
vd = vd->next;
}
return vd;
}
/*
* Returns a vector_desc entry for an intno/cpu.
* Either returns a preexisting one or allocates a new one and inserts
* it into the list. Returns NULL on malloc fail.
*/
static struct vector_desc_t *get_desc_for_int(int intno, int cpu)
{
struct vector_desc_t *vd = find_desc_for_int(intno, cpu);
if (vd == NULL) {
struct vector_desc_t *newvd = k_malloc(sizeof(struct vector_desc_t));
if (newvd == NULL) {
return NULL;
}
memset(newvd, 0, sizeof(struct vector_desc_t));
newvd->intno = intno;
newvd->cpu = cpu;
insert_vector_desc(newvd);
return newvd;
} else {
return vd;
}
}
/*
* Returns a vector_desc entry for an source, the cpu parameter is used
* to tell GPIO_INT and GPIO_NMI from different CPUs
*/
static struct vector_desc_t *find_desc_for_source(int source, int cpu)
{
struct vector_desc_t *vd = vector_desc_head;
while (vd != NULL) {
if (!(vd->flags & VECDESC_FL_SHARED)) {
if (vd->source == source && cpu == vd->cpu) {
break;
}
} else if (vd->cpu == cpu) {
/* check only shared vds for the correct cpu, otherwise skip */
bool found = false;
struct shared_vector_desc_t *svd = vd->shared_vec_info;
assert(svd != NULL);
while (svd) {
if (svd->source == source) {
found = true;
break;
}
svd = svd->next;
}
if (found) {
break;
}
}
vd = vd->next;
}
return vd;
}
void esp_intr_initialize(void)
{
unsigned int num_cpus = arch_num_cpus();
for (size_t i = 0; i < (ESP_INTC_INTS_NUM * num_cpus); ++i) {
intr_alloc_table[i].handler = default_intr_handler;
intr_alloc_table[i].arg = (void *)i;
}
}
int esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
{
if (intno >= ESP_INTC_INTS_NUM) {
return -EINVAL;
}
if (cpu >= arch_num_cpus()) {
return -EINVAL;
}
esp_intr_lock();
struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
if (vd == NULL) {
esp_intr_unlock();
return -ENOMEM;
}
vd->flags = VECDESC_FL_SHARED;
if (is_int_ram) {
vd->flags |= VECDESC_FL_INIRAM;
}
esp_intr_unlock();
return 0;
}
int esp_intr_reserve(int intno, int cpu)
{
if (intno >= ESP_INTC_INTS_NUM) {
return -EINVAL;
}
if (cpu >= arch_num_cpus()) {
return -EINVAL;
}
esp_intr_lock();
struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
if (vd == NULL) {
esp_intr_unlock();
return -ENOMEM;
}
vd->flags = VECDESC_FL_RESERVED;
esp_intr_unlock();
return 0;
}
/* Returns true if handler for interrupt is not the default unhandled interrupt handler */
static bool intr_has_handler(int intr, int cpu)
{
bool r;
r = intr_alloc_table[intr * CONFIG_MP_MAX_NUM_CPUS + cpu].handler != default_intr_handler;
return r;
}
static bool is_vect_desc_usable(struct vector_desc_t *vd, int flags, int cpu, int force)
{
/* Check if interrupt is not reserved by design */
int x = vd->intno;
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu, x, &intr_desc);
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
INTC_LOG("....Unusable: reserved");
return false;
}
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
INTC_LOG("....Unusable: special-purpose int");
return false;
}
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
/* Check if the interrupt priority is acceptable */
if (!(flags & (1 << intr_desc.priority))) {
INTC_LOG("....Unusable: incompatible priority");
return false;
}
/* check if edge/level type matches what we want */
if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
(((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
INTC_LOG("....Unusable: incompatible trigger type");
return false;
}
#endif
/* check if interrupt is reserved at runtime */
if (vd->flags & VECDESC_FL_RESERVED) {
INTC_LOG("....Unusable: reserved at runtime.");
return false;
}
/* Ints can't be both shared and non-shared. */
assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
/* check if interrupt already is in use by a non-shared interrupt */
if (vd->flags & VECDESC_FL_NONSHARED) {
INTC_LOG("....Unusable: already in (non-shared) use.");
return false;
}
/* check shared interrupt flags */
if (vd->flags & VECDESC_FL_SHARED) {
if (flags & ESP_INTR_FLAG_SHARED) {
bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
/*
* Bail out if int is shared, but iram property
* doesn't match what we want.
*/
if ((vd->flags & VECDESC_FL_SHARED) &&
(desc_in_iram_flag != in_iram_flag)) {
INTC_LOG("....Unusable: shared but iram prop doesn't match");
return false;
}
} else {
/*
* We need an unshared IRQ; can't use shared ones;
* bail out if this is shared.
*/
INTC_LOG("...Unusable: int is shared, we need non-shared.");
return false;
}
} else if (intr_has_handler(x, cpu)) {
/* Check if interrupt already is allocated by set_interrupt_handler */
INTC_LOG("....Unusable: already allocated");
return false;
}
return true;
}
/*
* Locate a free interrupt compatible with the flags given.
* The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
* When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
*/
static int get_available_int(int flags, int cpu, int force, int source)
{
int x;
int best = -1;
int best_level = 9;
int best_shared_ct = INT_MAX;
/* Default vector desc, for vectors not in the linked list */
struct vector_desc_t empty_vect_desc;
memset(&empty_vect_desc, 0, sizeof(struct vector_desc_t));
/* Level defaults to any low/med interrupt */
if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
flags |= ESP_INTR_FLAG_LOWMED;
}
INTC_LOG("%s: try to find existing. Cpu: %d, Source: %d", __func__, cpu, source);
struct vector_desc_t *vd = find_desc_for_source(source, cpu);
if (vd) {
/* if existing vd found, don't need to search any more. */
INTC_LOG("%s: existing vd found. intno: %d", __func__, vd->intno);
if (force != -1 && force != vd->intno) {
INTC_LOG("%s: intr forced but not match existing. "
"existing intno: %d, force: %d", __func__, vd->intno, force);
} else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
INTC_LOG("%s: existing vd invalid.", __func__);
} else {
best = vd->intno;
}
return best;
}
if (force != -1) {
INTC_LOG("%s: try to find force. "
"Cpu: %d, Source: %d, Force: %d", __func__, cpu, source, force);
/* if force assigned, don't need to search any more. */
vd = find_desc_for_int(force, cpu);
if (vd == NULL) {
/* if existing vd not found, just check the default state for the intr. */
empty_vect_desc.intno = force;
vd = &empty_vect_desc;
}
if (is_vect_desc_usable(vd, flags, cpu, force)) {
best = vd->intno;
} else {
INTC_LOG("%s: forced vd invalid.", __func__);
}
return best;
}
INTC_LOG("%s: start looking. Current cpu: %d", __func__, cpu);
/* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
for (x = 0; x < ESP_INTC_INTS_NUM; x++) {
/* Grab the vector_desc for this vector. */
vd = find_desc_for_int(x, cpu);
if (vd == NULL) {
empty_vect_desc.intno = x;
vd = &empty_vect_desc;
}
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu, x, &intr_desc);
INTC_LOG("Int %d reserved %d level %d %s hasIsr %d",
x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD,
intr_desc.priority,
intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL ? "LEVEL" : "EDGE",
intr_has_handler(x, cpu));
if (!is_vect_desc_usable(vd, flags, cpu, force)) {
continue;
}
if (flags & ESP_INTR_FLAG_SHARED) {
/* We're allocating a shared int. */
/* See if int already is used as a shared interrupt. */
if (vd->flags & VECDESC_FL_SHARED) {
/*
* We can use this already-marked-as-shared interrupt. Count the
* already attached isrs in order to see how useful it is.
*/
int no = 0;
struct shared_vector_desc_t *svdesc = vd->shared_vec_info;
while (svdesc != NULL) {
no++;
svdesc = svdesc->next;
}
if (no < best_shared_ct || best_level > intr_desc.priority) {
/*
* Seems like this shared vector is both okay and has
* the least amount of ISRs already attached to it.
*/
best = x;
best_shared_ct = no;
best_level = intr_desc.priority;
INTC_LOG("...int %d more usable as a shared int: "
"has %d existing vectors", x, no);
} else {
INTC_LOG("...worse than int %d", best);
}
} else {
if (best == -1) {
/*
* We haven't found a feasible shared interrupt yet.
* This one is still free and usable, even if not
* marked as shared.
* Remember it in case we don't find any other shared
* interrupt that qualifies.
*/
if (best_level > intr_desc.priority) {
best = x;
best_level = intr_desc.priority;
INTC_LOG("...int %d usable as new shared int", x);
}
} else {
INTC_LOG("...already have a shared int");
}
}
} else {
/*
* Seems this interrupt is feasible. Select it and break out of the loop
* No need to search further.
*/
if (best_level > intr_desc.priority) {
best = x;
best_level = intr_desc.priority;
} else {
INTC_LOG("...worse than int %d", best);
}
}
}
INTC_LOG("%s: using int %d", __func__, best);
/*
* By now we have looked at all potential interrupts and
* hopefully have selected the best one in best.
*/
return best;
}
/* Common shared isr handler. Chain-call all ISRs. */
static void IRAM_ATTR shared_intr_isr(void *arg)
{
struct vector_desc_t *vd = (struct vector_desc_t *)arg;
struct shared_vector_desc_t *sh_vec = vd->shared_vec_info;
esp_intr_lock();
while (sh_vec) {
if (!sh_vec->disabled) {
if ((sh_vec->statusreg == NULL) ||
(*sh_vec->statusreg & sh_vec->statusmask)) {
sh_vec->isr(sh_vec->arg);
}
}
sh_vec = sh_vec->next;
}
esp_intr_unlock();
}
int esp_intr_alloc_intrstatus(int source,
int flags,
uint32_t intrstatusreg,
uint32_t intrstatusmask,
intr_handler_t handler,
void *arg,
struct intr_handle_data_t **ret_handle)
{
struct intr_handle_data_t *ret = NULL;
int force = -1;
INTC_LOG("%s (cpu %d): checking args", __func__, esp_cpu_get_core_id());
/* Shared interrupts should be level-triggered. */
if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
return -EINVAL;
}
/* You can't set an handler / arg for a non-C-callable interrupt. */
if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
return -EINVAL;
}
/* Shared ints should have handler and non-processor-local source */
if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source < 0)) {
return -EINVAL;
}
/* Statusreg should have a mask */
if (intrstatusreg && !intrstatusmask) {
return -EINVAL;
}
/*
* If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
* If we are to allow placing interrupt handlers into the 0x400c00000x400c2000 region,
* we need to make sure the interrupt is connected to the CPU0.
* CPU1 does not have access to the RTC fast memory through this region.
*/
if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) &&
!esp_ptr_in_rtc_iram_fast(handler)) {
return -EINVAL;
}
/*
* Default to prio 1 for shared interrupts.
* Default to prio 1, 2 or 3 for non-shared interrupts.
*/
if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
if (flags & ESP_INTR_FLAG_SHARED) {
flags |= ESP_INTR_FLAG_LEVEL1;
} else {
flags |= ESP_INTR_FLAG_LOWMED;
}
}
INTC_LOG("%s (cpu %d): Args okay."
"Resulting flags 0x%X", __func__, esp_cpu_get_core_id(), flags);
/*
* Check 'special' interrupt sources. These are tied to one specific
* interrupt, so we have to force get_available_int to only look at that.
*/
switch (source) {
case ETS_INTERNAL_TIMER0_INTR_SOURCE:
force = ETS_INTERNAL_TIMER0_INTR_NO;
break;
case ETS_INTERNAL_TIMER1_INTR_SOURCE:
force = ETS_INTERNAL_TIMER1_INTR_NO;
break;
case ETS_INTERNAL_TIMER2_INTR_SOURCE:
force = ETS_INTERNAL_TIMER2_INTR_NO;
break;
case ETS_INTERNAL_SW0_INTR_SOURCE:
force = ETS_INTERNAL_SW0_INTR_NO;
break;
case ETS_INTERNAL_SW1_INTR_SOURCE:
force = ETS_INTERNAL_SW1_INTR_NO;
break;
case ETS_INTERNAL_PROFILING_INTR_SOURCE:
force = ETS_INTERNAL_PROFILING_INTR_NO;
break;
default:
break;
}
/* Allocate a return handle. If we end up not needing it, we'll free it later on. */
ret = k_malloc(sizeof(struct intr_handle_data_t));
if (ret == NULL) {
return -ENOMEM;
}
esp_intr_lock();
int cpu = esp_cpu_get_core_id();
/* See if we can find an interrupt that matches the flags. */
int intr = get_available_int(flags, cpu, force, source);
if (intr == -1) {
/* None found. Bail out. */
esp_intr_unlock();
k_free(ret);
return -ENODEV;
}
/* Get an int vector desc for int. */
struct vector_desc_t *vd = get_desc_for_int(intr, cpu);
if (vd == NULL) {
esp_intr_unlock();
k_free(ret);
return -ENOMEM;
}
/* Allocate that int! */
if (flags & ESP_INTR_FLAG_SHARED) {
/* Populate vector entry and add to linked list. */
struct shared_vector_desc_t *sv = k_malloc(sizeof(struct shared_vector_desc_t));
if (sv == NULL) {
esp_intr_unlock();
k_free(ret);
return -ENOMEM;
}
memset(sv, 0, sizeof(struct shared_vector_desc_t));
sv->statusreg = (uint32_t *)intrstatusreg;
sv->statusmask = intrstatusmask;
sv->isr = handler;
sv->arg = arg;
sv->next = vd->shared_vec_info;
sv->source = source;
sv->disabled = 0;
vd->shared_vec_info = sv;
vd->flags |= VECDESC_FL_SHARED;
/* (Re-)set shared isr handler to new value. */
set_interrupt_handler(intr, shared_intr_isr, vd);
} else {
/* Mark as unusable for other interrupt sources. This is ours now! */
vd->flags = VECDESC_FL_NONSHARED;
if (handler) {
set_interrupt_handler(intr, handler, arg);
}
if (flags & ESP_INTR_FLAG_EDGE) {
xthal_set_intclear(1 << intr);
}
vd->source = source;
}
if (flags & ESP_INTR_FLAG_IRAM) {
vd->flags |= VECDESC_FL_INIRAM;
non_iram_int_mask[cpu] &= ~(1 << intr);
} else {
vd->flags &= ~VECDESC_FL_INIRAM;
non_iram_int_mask[cpu] |= (1 << intr);
}
if (source >= 0) {
esp_rom_route_intr_matrix(cpu, source, intr);
}
/* Fill return handle data. */
ret->vector_desc = vd;
ret->shared_vector_desc = vd->shared_vec_info;
/* Enable int at CPU-level; */
irq_enable(intr);
/*
* If interrupt has to be started disabled, do that now; ints won't be enabled for
* real until the end of the critical section.
*/
if (flags & ESP_INTR_FLAG_INTRDISABLED) {
esp_intr_disable(ret);
}
#ifdef SOC_CPU_HAS_FLEXIBLE_INTC
/* Extract the level from the interrupt passed flags */
int level = esp_intr_flags_to_level(flags);
esp_cpu_intr_set_priority(intr, level);
if (flags & ESP_INTR_FLAG_EDGE) {
esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
} else {
esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
}
#endif
esp_intr_unlock();
/* Fill return handle if needed, otherwise free handle. */
if (ret_handle != NULL) {
*ret_handle = ret;
} else {
k_free(ret);
}
LOG_DBG("Connected src %d to int %d (cpu %d)", source, intr, cpu);
return 0;
}
int esp_intr_alloc(int source,
int flags,
intr_handler_t handler,
void *arg,
struct intr_handle_data_t **ret_handle)
{
/*
* As an optimization, we can create a table with the possible interrupt status
* registers and masks for every single source there is. We can then add code here to
* look up an applicable value and pass that to the esp_intr_alloc_intrstatus function.
*/
return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
}
int IRAM_ATTR esp_intr_set_in_iram(struct intr_handle_data_t *handle, bool is_in_iram)
{
if (!handle) {
return -EINVAL;
}
struct vector_desc_t *vd = handle->vector_desc;
if (vd->flags & VECDESC_FL_SHARED) {
return -EINVAL;
}
esp_intr_lock();
uint32_t mask = (1 << vd->intno);
if (is_in_iram) {
vd->flags |= VECDESC_FL_INIRAM;
non_iram_int_mask[vd->cpu] &= ~mask;
} else {
vd->flags &= ~VECDESC_FL_INIRAM;
non_iram_int_mask[vd->cpu] |= mask;
}
esp_intr_unlock();
return 0;
}
int esp_intr_free(struct intr_handle_data_t *handle)
{
bool free_shared_vector = false;
if (!handle) {
return -EINVAL;
}
esp_intr_lock();
esp_intr_disable(handle);
if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
/* Find and kill the shared int */
struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
struct shared_vector_desc_t *prevsvd = NULL;
assert(svd); /* should be something in there for a shared int */
while (svd != NULL) {
if (svd == handle->shared_vector_desc) {
/* Found it. Now kill it. */
if (prevsvd) {
prevsvd->next = svd->next;
} else {
handle->vector_desc->shared_vec_info = svd->next;
}
k_free(svd);
break;
}
prevsvd = svd;
svd = svd->next;
}
/* If nothing left, disable interrupt. */
if (handle->vector_desc->shared_vec_info == NULL) {
free_shared_vector = true;
}
INTC_LOG("%s: Deleting shared int: %s. "
"Shared int is %s", __func__, svd ? "not found or last one" : "deleted",
free_shared_vector ? "empty now." : "still in use");
}
if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
INTC_LOG("%s: Disabling int, killing handler", __func__);
/* Reset to normal handler */
set_interrupt_handler(handle->vector_desc->intno,
default_intr_handler,
(void *)((int)handle->vector_desc->intno));
/*
* Theoretically, we could free the vector_desc... not sure if that's worth the
* few bytes of memory we save.(We can also not use the same exit path for empty
* shared ints anymore if we delete the desc.) For now, just mark it as free.
*/
handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED |
VECDESC_FL_RESERVED | VECDESC_FL_SHARED);
/* Also kill non_iram mask bit. */
non_iram_int_mask[handle->vector_desc->cpu] &= ~(1 << (handle->vector_desc->intno));
}
esp_intr_unlock();
k_free(handle);
return 0;
}
int esp_intr_get_intno(struct intr_handle_data_t *handle)
{
return handle->vector_desc->intno;
}
int esp_intr_get_cpu(struct intr_handle_data_t *handle)
{
return handle->vector_desc->cpu;
}
/**
* Interrupt disabling strategy:
* If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a
* non-connected interrupt. If the source is <0 (meaning an internal, per-cpu interrupt).
* This allows us to, for the muxed CPUs, disable an int from
* the other core. It also allows disabling shared interrupts.
*/
/*
* Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29
* cause the interrupt to effectively be disabled.
*/
#define INT_MUX_DISABLED_INTNO 6
int IRAM_ATTR esp_intr_enable(struct intr_handle_data_t *handle)
{
if (!handle) {
return -EINVAL;
}
esp_intr_lock();
int source;
if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled = 0;
source = handle->shared_vector_desc->source;
} else {
source = handle->vector_desc->source;
}
if (source >= 0) {
/* Disabled using int matrix; re-connect to enable */
esp_rom_route_intr_matrix(handle->vector_desc->cpu,
source, handle->vector_desc->intno);
} else {
/* Re-enable using cpu int ena reg */
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
esp_intr_unlock();
return -EINVAL; /* Can only enable these ints on this cpu */
}
irq_enable(handle->vector_desc->intno);
}
esp_intr_unlock();
return 0;
}
int IRAM_ATTR esp_intr_disable(struct intr_handle_data_t *handle)
{
if (!handle) {
return -EINVAL;
}
esp_intr_lock();
int source;
bool disabled = 1;
if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled = 1;
source = handle->shared_vector_desc->source;
struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
assert(svd != NULL);
while (svd) {
if (svd->source == source && svd->disabled == 0) {
disabled = 0;
break;
}
svd = svd->next;
}
} else {
source = handle->vector_desc->source;
}
if (source >= 0) {
if (disabled) {
/* Disable using int matrix */
esp_rom_route_intr_matrix(handle->vector_desc->cpu,
source, INT_MUX_DISABLED_INTNO);
}
} else {
/* Disable using per-cpu regs */
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
esp_intr_unlock();
return -EINVAL; /* Can only enable these ints on this cpu */
}
irq_disable(handle->vector_desc->intno);
}
esp_intr_unlock();
return 0;
}
void IRAM_ATTR esp_intr_noniram_disable(void)
{
esp_intr_lock();
int oldint;
int cpu = esp_cpu_get_core_id();
int non_iram_ints = ~non_iram_int_mask[cpu];
if (non_iram_int_disabled_flag[cpu]) {
abort();
}
non_iram_int_disabled_flag[cpu] = true;
oldint = esp_cpu_intr_get_enabled_mask();
esp_cpu_intr_disable(non_iram_ints);
rtc_isr_noniram_disable(cpu);
non_iram_int_disabled[cpu] = oldint & non_iram_ints;
esp_intr_unlock();
}
void IRAM_ATTR esp_intr_noniram_enable(void)
{
esp_intr_lock();
int cpu = esp_cpu_get_core_id();
int non_iram_ints = non_iram_int_disabled[cpu];
if (!non_iram_int_disabled_flag[cpu]) {
abort();
}
non_iram_int_disabled_flag[cpu] = false;
esp_cpu_intr_enable(non_iram_ints);
rtc_isr_noniram_enable(cpu);
esp_intr_unlock();
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_esp32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,214 |
```unknown
config $(cur-level)_LVL_INTR_0$(aggregator)_OFFSET
int "Level $(prev-level-num) IRQ line for $(cur-level) level aggregator $(aggregator)"
default 0
depends on $(cur-level)_LEVEL_INTERRUPTS
help
This is the level $(prev-level-num) interrupt number for level
$(cur-level-num) interrupt aggregator $(aggregator).
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.multilevel.aggregator_template | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 83 |
```unknown
config NUCLEI_ECLIC
bool "Enhanced Core Local Interrupt Controller (ECLIC)"
default y
depends on DT_HAS_NUCLEI_ECLIC_ENABLED
select RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING if !RISCV_VECTORED_MODE
help
Interrupt controller for Nuclei SoC core.
config NRFX_CLIC
bool "VPR Core Local Interrpt Controller (CLIC)"
default y
depends on DT_HAS_NORDIC_NRF_CLIC_ENABLED
select GEN_IRQ_VECTOR_TABLE
help
Interrupt controller for Nordic VPR cores.
if NUCLEI_ECLIC
config LEGACY_CLIC
bool "Use the legacy clic specification"
depends on RISCV_HAS_CLIC
help
Enables legacy clic, where smclicshv extension is not supported and
hardware vectoring is set via mode bits of mtvec.
endif # NUCLEI_ECLIC
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.clic | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 202 |
```unknown
config NXP_PINT
bool "Pin interrupt and pattern match engine (PINT) for NXP MCUs"
default y
depends on DT_HAS_NXP_PINT_ENABLED
help
Enable PINT driver for NXP MCUs
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.nxp_pint | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 53 |
```c
/*
*
*/
/*
* NOTE: This driver implements the GICv1 and GICv2 interfaces.
*/
#include <zephyr/device.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/devicetree.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/dt-bindings/interrupt-controller/arm-gic.h>
#include <zephyr/drivers/interrupt_controller/gic.h>
#include <zephyr/sys/barrier.h>
#if defined(CONFIG_GIC_V1)
#define DT_DRV_COMPAT arm_gic_v1
#elif defined(CONFIG_GIC_V2)
#define DT_DRV_COMPAT arm_gic_v2
#else
#error "Unknown GIC controller compatible for this configuration"
#endif
static const uint64_t cpu_mpid_list[] = {
DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))
};
BUILD_ASSERT(ARRAY_SIZE(cpu_mpid_list) >= CONFIG_MP_MAX_NUM_CPUS,
"The count of CPU Cores nodes in dts is less than CONFIG_MP_MAX_NUM_CPUS\n");
void arm_gic_irq_enable(unsigned int irq)
{
int int_grp, int_off;
int_grp = irq / 32;
int_off = irq % 32;
sys_write32((1 << int_off), (GICD_ISENABLERn + int_grp * 4));
}
void arm_gic_irq_disable(unsigned int irq)
{
int int_grp, int_off;
int_grp = irq / 32;
int_off = irq % 32;
sys_write32((1 << int_off), (GICD_ICENABLERn + int_grp * 4));
}
bool arm_gic_irq_is_enabled(unsigned int irq)
{
int int_grp, int_off;
unsigned int enabler;
int_grp = irq / 32;
int_off = irq % 32;
enabler = sys_read32(GICD_ISENABLERn + int_grp * 4);
return (enabler & (1 << int_off)) != 0;
}
bool arm_gic_irq_is_pending(unsigned int irq)
{
int int_grp, int_off;
unsigned int enabler;
int_grp = irq / 32;
int_off = irq % 32;
enabler = sys_read32(GICD_ISPENDRn + int_grp * 4);
return (enabler & (1 << int_off)) != 0;
}
void arm_gic_irq_set_pending(unsigned int irq)
{
int int_grp, int_off;
int_grp = irq / 32;
int_off = irq % 32;
sys_write32((1 << int_off), (GICD_ISPENDRn + int_grp * 4));
}
void arm_gic_irq_clear_pending(unsigned int irq)
{
int int_grp, int_off;
int_grp = irq / 32;
int_off = irq % 32;
sys_write32((1 << int_off), (GICD_ICPENDRn + int_grp * 4));
}
void arm_gic_irq_set_priority(
unsigned int irq, unsigned int prio, uint32_t flags)
{
int int_grp, int_off;
uint32_t val;
/* Set priority */
sys_write8(prio & 0xff, GICD_IPRIORITYRn + irq);
/* Set interrupt type */
int_grp = (irq / 16) * 4;
int_off = (irq % 16) * 2;
val = sys_read32(GICD_ICFGRn + int_grp);
val &= ~(GICD_ICFGR_MASK << int_off);
if (flags & IRQ_TYPE_EDGE) {
val |= (GICD_ICFGR_TYPE << int_off);
}
sys_write32(val, GICD_ICFGRn + int_grp);
}
unsigned int arm_gic_get_active(void)
{
unsigned int irq;
/*
* "ARM Generic Interrupt Controller Architecture version 2.0" states that
* [4.4.5 End of Interrupt Register, GICC_EOIR)]:
* """
* For compatibility with possible extensions to the GIC architecture
* specification, ARM recommends that software preserves the entire register
* value read from the GICC_IAR when it acknowledges the interrupt, and uses
* that entire value for its corresponding write to the GICC_EOIR.
* """
* Because of that, we read the entire value here, to be later written back to GICC_EOIR
*/
irq = sys_read32(GICC_IAR);
return irq;
}
void arm_gic_eoi(unsigned int irq)
{
/*
* Ensure the write to peripheral registers are *complete* before the write
* to GIC_EOIR.
*
* Note: The completion guarantee depends on various factors of system design
* and the barrier is the best core can do by which execution of further
* instructions waits till the barrier is alive.
*/
barrier_dsync_fence_full();
/* set to inactive */
sys_write32(irq, GICC_EOIR);
}
void gic_raise_sgi(unsigned int sgi_id, uint64_t target_aff,
uint16_t target_list)
{
uint32_t sgi_val;
ARG_UNUSED(target_aff);
sgi_val = GICD_SGIR_TGTFILT_CPULIST |
GICD_SGIR_CPULIST(target_list & GICD_SGIR_CPULIST_MASK) |
sgi_id;
barrier_dsync_fence_full();
sys_write32(sgi_val, GICD_SGIR);
barrier_isync_fence_full();
}
static void gic_dist_init(void)
{
unsigned int gic_irqs, i;
uint8_t cpu_mask = 0;
uint32_t reg_val;
gic_irqs = sys_read32(GICD_TYPER) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32;
if (gic_irqs > 1020) {
gic_irqs = 1020;
}
/*
* Disable the forwarding of pending interrupts
* from the Distributor to the CPU interfaces
*/
sys_write32(0, GICD_CTLR);
/*
* Enable all global interrupts distributing to CPUs listed
* in dts with the count of arch_num_cpus().
*/
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
cpu_mask |= BIT(cpu_mpid_list[i]);
}
reg_val = cpu_mask | (cpu_mask << 8) | (cpu_mask << 16)
| (cpu_mask << 24);
for (i = GIC_SPI_INT_BASE; i < gic_irqs; i += 4) {
sys_write32(reg_val, GICD_ITARGETSRn + i);
}
/*
* Set all global interrupts to be level triggered, active low.
*/
for (i = GIC_SPI_INT_BASE; i < gic_irqs; i += 16) {
sys_write32(0, GICD_ICFGRn + i / 4);
}
/* Set priority on all global interrupts. */
for (i = GIC_SPI_INT_BASE; i < gic_irqs; i += 4) {
sys_write32(0, GICD_IPRIORITYRn + i);
}
/* Set all interrupts to group 0 */
for (i = GIC_SPI_INT_BASE; i < gic_irqs; i += 32) {
sys_write32(0, GICD_IGROUPRn + i / 8);
}
/*
* Disable all interrupts. Leave the PPI and SGIs alone
* as these enables are banked registers.
*/
for (i = GIC_SPI_INT_BASE; i < gic_irqs; i += 32) {
#ifndef CONFIG_GIC_V1
sys_write32(0xffffffff, GICD_ICACTIVERn + i / 8);
#endif
sys_write32(0xffffffff, GICD_ICENABLERn + i / 8);
}
/*
* Enable the forwarding of pending interrupts
* from the Distributor to the CPU interfaces
*/
sys_write32(1, GICD_CTLR);
}
static void gic_cpu_init(void)
{
int i;
uint32_t val;
/*
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts, ensure all SGI interrupts are enabled.
*/
#ifndef CONFIG_GIC_V1
sys_write32(0xffffffff, GICD_ICACTIVERn);
#endif
sys_write32(0xffff0000, GICD_ICENABLERn);
sys_write32(0x0000ffff, GICD_ISENABLERn);
/*
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < 32; i += 4) {
sys_write32(0xa0a0a0a0, GICD_IPRIORITYRn + i);
}
sys_write32(0xf0, GICC_PMR);
/*
* Enable interrupts and signal them using the IRQ signal.
*/
val = sys_read32(GICC_CTLR);
#ifndef CONFIG_GIC_V1
val &= ~GICC_CTLR_BYPASS_MASK;
#endif
val |= GICC_CTLR_ENABLE_MASK;
sys_write32(val, GICC_CTLR);
}
#define GIC_PARENT_IRQ 0
#define GIC_PARENT_IRQ_PRI 0
#define GIC_PARENT_IRQ_FLAGS 0
/**
* @brief Initialize the GIC device driver
*/
int arm_gic_init(const struct device *dev)
{
/* Init of Distributor interface registers */
gic_dist_init();
/* Init CPU interface registers */
gic_cpu_init();
return 0;
}
DEVICE_DT_INST_DEFINE(0, arm_gic_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
#ifdef CONFIG_SMP
void arm_gic_secondary_init(void)
{
/* Init CPU interface registers for each secondary core */
gic_cpu_init();
}
#endif
``` | /content/code_sandbox/drivers/interrupt_controller/intc_gic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,161 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_s32_siul2_eirq
#include <soc.h>
#include <zephyr/irq.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/interrupt_controller/intc_eirq_nxp_s32.h>
/* SIUL2 External Interrupt Controller registers (offsets from DISR0) */
/* SIUL2 DMA/Interrupt Status Flag */
#define SIUL2_DISR0 0x0
/* SIUL2 DMA/Interrupt Request Enable */
#define SIUL2_DIRER0 0x8
/* SIUL2 DMA/Interrupt Request Select */
#define SIUL2_DIRSR0 0x10
/* SIUL2 Interrupt Rising-Edge Event Enable */
#define SIUL2_IREER0 0x18
/* SIUL2 Interrupt Falling-Edge Event Enable */
#define SIUL2_IFEER0 0x20
/* SIUL2 Interrupt Filter Enable */
#define SIUL2_IFER0 0x28
/* SIUL2 Interrupt Filter Maximum Counter Register */
#define SIUL2_IFMCR(n) (0x30 + 0x4 * (n))
#define SIUL2_IFMCR_MAXCNT_MASK GENMASK(3, 0)
#define SIUL2_IFMCR_MAXCNT(v) FIELD_PREP(SIUL2_IFMCR_MAXCNT_MASK, (v))
/* SIUL2 Interrupt Filter Clock Prescaler Register */
#define SIUL2_IFCPR 0xb0
#define SIUL2_IFCPR_IFCP_MASK GENMASK(3, 0)
#define SIUL2_IFCPR_IFCP(v) FIELD_PREP(SIUL2_IFCPR_IFCP_MASK, (v))
/* Handy accessors */
#define REG_READ(r) sys_read32(config->base + (r))
#define REG_WRITE(r, v) sys_write32((v), config->base + (r))
#define GLITCH_FILTER_DISABLED (SIUL2_IFMCR_MAXCNT_MASK + 1)
struct eirq_nxp_s32_config {
mem_addr_t base;
const struct pinctrl_dev_config *pincfg;
uint8_t filter_clock_prescaler;
uint8_t max_filter_counter[CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX];
};
struct eirq_nxp_s32_cb {
eirq_nxp_s32_callback_t cb;
uint8_t pin;
void *data;
};
struct eirq_nxp_s32_data {
struct eirq_nxp_s32_cb *cb;
};
static inline void eirq_nxp_s32_interrupt_handler(const struct device *dev, uint32_t irq_idx)
{
const struct eirq_nxp_s32_config *config = dev->config;
struct eirq_nxp_s32_data *data = dev->data;
uint32_t mask = GENMASK(CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_GROUP - 1, 0);
uint32_t pending;
uint8_t irq;
pending = eirq_nxp_s32_get_pending(dev);
pending &= mask << (irq_idx * CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_GROUP);
while (pending) {
mask = LSB_GET(pending);
irq = u64_count_trailing_zeros(mask);
/* Clear status flag */
REG_WRITE(SIUL2_DISR0, REG_READ(SIUL2_DISR0) | mask);
if (data->cb[irq].cb != NULL) {
data->cb[irq].cb(data->cb[irq].pin, data->cb[irq].data);
}
pending ^= mask;
}
}
int eirq_nxp_s32_set_callback(const struct device *dev, uint8_t irq, uint8_t pin,
eirq_nxp_s32_callback_t cb, void *arg)
{
struct eirq_nxp_s32_data *data = dev->data;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
if ((data->cb[irq].cb == cb) && (data->cb[irq].data == arg)) {
return 0;
}
if (data->cb[irq].cb) {
return -EBUSY;
}
data->cb[irq].cb = cb;
data->cb[irq].pin = pin;
data->cb[irq].data = arg;
return 0;
}
void eirq_nxp_s32_unset_callback(const struct device *dev, uint8_t irq)
{
struct eirq_nxp_s32_data *data = dev->data;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
data->cb[irq].cb = NULL;
data->cb[irq].pin = 0;
data->cb[irq].data = NULL;
}
void eirq_nxp_s32_enable_interrupt(const struct device *dev, uint8_t irq,
enum eirq_nxp_s32_trigger trigger)
{
const struct eirq_nxp_s32_config *config = dev->config;
uint32_t reg_val;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
/* Configure trigger */
reg_val = REG_READ(SIUL2_IREER0);
if ((trigger == EIRQ_NXP_S32_RISING_EDGE) || (trigger == EIRQ_NXP_S32_BOTH_EDGES)) {
reg_val |= BIT(irq);
} else {
reg_val &= ~BIT(irq);
}
REG_WRITE(SIUL2_IREER0, reg_val);
reg_val = REG_READ(SIUL2_IFEER0);
if ((trigger == EIRQ_NXP_S32_FALLING_EDGE) || (trigger == EIRQ_NXP_S32_BOTH_EDGES)) {
reg_val |= BIT(irq);
} else {
reg_val &= ~BIT(irq);
}
REG_WRITE(SIUL2_IFEER0, reg_val);
/* Clear status flag and unmask interrupt */
REG_WRITE(SIUL2_DISR0, REG_READ(SIUL2_DISR0) | BIT(irq));
REG_WRITE(SIUL2_DIRER0, REG_READ(SIUL2_DIRER0) | BIT(irq));
}
void eirq_nxp_s32_disable_interrupt(const struct device *dev, uint8_t irq)
{
const struct eirq_nxp_s32_config *config = dev->config;
__ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
/* Disable triggers */
REG_WRITE(SIUL2_IREER0, REG_READ(SIUL2_IREER0) & ~BIT(irq));
REG_WRITE(SIUL2_IFEER0, REG_READ(SIUL2_IFEER0) & ~BIT(irq));
/* Clear status flag and mask interrupt */
REG_WRITE(SIUL2_DISR0, REG_READ(SIUL2_DISR0) | BIT(irq));
REG_WRITE(SIUL2_DIRER0, REG_READ(SIUL2_DIRER0) & ~BIT(irq));
}
uint32_t eirq_nxp_s32_get_pending(const struct device *dev)
{
const struct eirq_nxp_s32_config *config = dev->config;
return REG_READ(SIUL2_DISR0) & REG_READ(SIUL2_DIRER0);
}
static int eirq_nxp_s32_init(const struct device *dev)
{
const struct eirq_nxp_s32_config *config = dev->config;
uint8_t irq;
int err;
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
/* Disable triggers, clear status flags and mask all interrupts */
REG_WRITE(SIUL2_IREER0, 0U);
REG_WRITE(SIUL2_IFEER0, 0U);
REG_WRITE(SIUL2_DISR0, 0xffffffff);
REG_WRITE(SIUL2_DIRER0, 0U);
/* Select the request type as interrupt */
REG_WRITE(SIUL2_DIRSR0, 0U);
/* Configure glitch filters */
REG_WRITE(SIUL2_IFCPR, SIUL2_IFCPR_IFCP(config->filter_clock_prescaler));
for (irq = 0; irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX; irq++) {
if (config->max_filter_counter[irq] < GLITCH_FILTER_DISABLED) {
REG_WRITE(SIUL2_IFMCR(irq),
SIUL2_IFMCR_MAXCNT(config->max_filter_counter[irq]));
REG_WRITE(SIUL2_IFER0, REG_READ(SIUL2_IFER0) | BIT(irq));
} else {
REG_WRITE(SIUL2_IFER0, REG_READ(SIUL2_IFER0) & ~BIT(irq));
}
}
return 0;
}
#define EIRQ_NXP_S32_ISR_DEFINE(idx, n) \
static void eirq_nxp_s32_isr##idx##_##n(const struct device *dev) \
{ \
eirq_nxp_s32_interrupt_handler(dev, idx); \
}
#define _EIRQ_NXP_S32_IRQ_CONFIG(idx, n) \
do { \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, idx, irq), DT_INST_IRQ_BY_IDX(n, idx, priority), \
eirq_nxp_s32_isr##idx##_##n, DEVICE_DT_INST_GET(n), \
COND_CODE_1(CONFIG_GIC, (DT_INST_IRQ_BY_IDX(n, idx, flags)), (0))); \
irq_enable(DT_INST_IRQ_BY_IDX(n, idx, irq)); \
} while (false);
#define EIRQ_NXP_S32_IRQ_CONFIG(n) \
LISTIFY(DT_NUM_IRQS(DT_DRV_INST(n)), _EIRQ_NXP_S32_IRQ_CONFIG, (), n)
#define EIRQ_NXP_S32_FILTER_CONFIG(idx, n) \
COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(n, irq_##idx)), \
(DT_PROP_OR(DT_INST_CHILD(n, irq_##idx), max_filter_counter, \
GLITCH_FILTER_DISABLED)), \
(GLITCH_FILTER_DISABLED))
#define EIRQ_NXP_S32_INIT_DEVICE(n) \
LISTIFY(DT_NUM_IRQS(DT_DRV_INST(n)), EIRQ_NXP_S32_ISR_DEFINE, (), n) \
PINCTRL_DT_INST_DEFINE(n); \
static const struct eirq_nxp_s32_config eirq_nxp_s32_conf_##n = { \
.base = DT_INST_REG_ADDR(n), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.filter_clock_prescaler = DT_INST_PROP_OR(n, filter_prescaler, 0), \
.max_filter_counter = {LISTIFY(CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX, \
EIRQ_NXP_S32_FILTER_CONFIG, (,), n)}, \
}; \
static struct eirq_nxp_s32_cb eirq_nxp_s32_cb_##n[CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX]; \
static struct eirq_nxp_s32_data eirq_nxp_s32_data_##n = { \
.cb = eirq_nxp_s32_cb_##n, \
}; \
static int eirq_nxp_s32_init_##n(const struct device *dev) \
{ \
int err; \
\
err = eirq_nxp_s32_init(dev); \
if (err) { \
return err; \
} \
\
EIRQ_NXP_S32_IRQ_CONFIG(n); \
\
return 0; \
} \
DEVICE_DT_INST_DEFINE(n, eirq_nxp_s32_init_##n, NULL, &eirq_nxp_s32_data_##n, \
&eirq_nxp_s32_conf_##n, PRE_KERNEL_2, CONFIG_INTC_INIT_PRIORITY, \
NULL);
DT_INST_FOREACH_STATUS_OKAY(EIRQ_NXP_S32_INIT_DEVICE)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_eirq_nxp_s32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,616 |
```viml
#
if CPU_CORTEX_R5
config VIM
bool "TI Vectored Interrupt Manager"
default y
depends on DT_HAS_TI_VIM_ENABLED
help
The TI Vectored Interrupt Manager provides hardware assistance for prioritizing
and aggregating the interrupt sources for ARM Cortex-R5 processor cores.
endif # CPU_CORTEX_R5
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.vim | viml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 73 |
```c
/*
*
*/
/* Based on STM32 EXTI driver, which is (c) 2016 Open-RnD Sp. z o.o. */
#include <zephyr/device.h>
#include <zephyr/irq.h>
#include <errno.h>
#include <zephyr/drivers/interrupt_controller/nxp_pint.h>
#include <fsl_inputmux.h>
#include <fsl_power.h>
#define DT_DRV_COMPAT nxp_pint
static PINT_Type *pint_base = (PINT_Type *)DT_INST_REG_ADDR(0);
/* Describes configuration of PINT IRQ slot */
struct pint_irq_slot {
nxp_pint_cb_t callback;
void *user_data;
uint8_t pin: 6;
uint8_t used: 1;
uint8_t irq;
};
#define NO_PINT_ID 0xFF
/* Tracks IRQ configuration for each pint interrupt source */
static struct pint_irq_slot pint_irq_cfg[DT_INST_PROP(0, num_lines)];
/* Tracks pint interrupt source selected for each pin */
static uint8_t pin_pint_id[DT_INST_PROP(0, num_inputs)];
#define PIN_TO_INPUT_MUX_CONNECTION(pin) \
((PINTSEL_PMUX_ID << PMUX_SHIFT) + (pin))
/* Attaches pin to PINT IRQ slot using INPUTMUX */
static void attach_pin_to_pint(uint8_t pin, uint8_t pint_slot)
{
INPUTMUX_Init(INPUTMUX);
/* Three parameters here- INPUTMUX base, the ID of the PINT slot,
* and a integer describing the GPIO pin.
*/
INPUTMUX_AttachSignal(INPUTMUX, pint_slot,
PIN_TO_INPUT_MUX_CONNECTION(pin));
/* Disable INPUTMUX after making changes, this gates clock and
* saves power.
*/
INPUTMUX_Deinit(INPUTMUX);
}
/**
* @brief Enable PINT interrupt source.
*
* @param pin: pin to use as interrupt source
* 0-64, corresponding to GPIO0 pin 1 - GPIO1 pin 31)
* @param trigger: one of nxp_pint_trigger flags
* @param wake: indicates if the pin should wakeup the system
* @return 0 on success, or negative value on error
*/
int nxp_pint_pin_enable(uint8_t pin, enum nxp_pint_trigger trigger, bool wake)
{
uint8_t slot = 0U;
if (pin > ARRAY_SIZE(pin_pint_id)) {
/* Invalid pin ID */
return -EINVAL;
}
/* Find unused IRQ slot */
if (pin_pint_id[pin] != NO_PINT_ID) {
slot = pin_pint_id[pin];
} else {
for (slot = 0; slot < ARRAY_SIZE(pint_irq_cfg); slot++) {
if (!pint_irq_cfg[slot].used) {
break;
}
}
if (slot == ARRAY_SIZE(pint_irq_cfg)) {
/* No free IRQ slots */
return -EBUSY;
}
pin_pint_id[pin] = slot;
}
pint_irq_cfg[slot].used = true;
pint_irq_cfg[slot].pin = pin;
/* Attach pin to interrupt slot using INPUTMUX */
attach_pin_to_pint(pin, slot);
/* Now configure the interrupt. No need to install callback, this
* driver handles the IRQ
*/
PINT_PinInterruptConfig(pint_base, slot, trigger, NULL);
#if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
if (wake) {
EnableDeepSleepIRQ(pint_irq_cfg[slot].irq);
} else {
DisableDeepSleepIRQ(pint_irq_cfg[slot].irq);
irq_enable(pint_irq_cfg[slot].irq);
}
#endif
return 0;
}
/**
* @brief disable PINT interrupt source.
*
* @param pin: pin interrupt source to disable
*/
void nxp_pint_pin_disable(uint8_t pin)
{
uint8_t slot;
if (pin > ARRAY_SIZE(pin_pint_id)) {
return;
}
slot = pin_pint_id[pin];
if (slot == NO_PINT_ID) {
return;
}
/* Remove this pin from the PINT slot if one was in use */
pint_irq_cfg[slot].used = false;
PINT_PinInterruptConfig(pint_base, slot, kPINT_PinIntEnableNone, NULL);
}
/**
* @brief Install PINT callback
*
* @param pin: interrupt source to install callback for
* @param cb: callback to install
* @param data: user data to include in callback
* @return 0 on success, or negative value on error
*/
int nxp_pint_pin_set_callback(uint8_t pin, nxp_pint_cb_t cb, void *data)
{
uint8_t slot;
if (pin > ARRAY_SIZE(pin_pint_id)) {
return -EINVAL;
}
slot = pin_pint_id[pin];
if (slot == NO_PINT_ID) {
return -EINVAL;
}
pint_irq_cfg[slot].callback = cb;
pint_irq_cfg[slot].user_data = data;
return 0;
}
/**
* @brief Remove PINT callback
*
* @param pin: interrupt source to remove callback for
*/
void nxp_pint_pin_unset_callback(uint8_t pin)
{
uint8_t slot;
if (pin > ARRAY_SIZE(pin_pint_id)) {
return;
}
slot = pin_pint_id[pin];
if (slot == NO_PINT_ID) {
return;
}
pint_irq_cfg[slot].callback = NULL;
}
/* NXP PINT ISR handler- called with PINT slot ID */
static void nxp_pint_isr(uint8_t *slot)
{
PINT_PinInterruptClrStatus(pint_base, *slot);
if (pint_irq_cfg[*slot].used && pint_irq_cfg[*slot].callback) {
pint_irq_cfg[*slot].callback(pint_irq_cfg[*slot].pin,
pint_irq_cfg[*slot].user_data);
}
}
/* Defines PINT IRQ handler for a given irq index */
#define NXP_PINT_IRQ(idx, node_id) \
IF_ENABLED(DT_IRQ_HAS_IDX(node_id, idx), \
(static uint8_t nxp_pint_idx_##idx = idx; \
do { \
IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, idx, irq), \
DT_IRQ_BY_IDX(node_id, idx, priority), \
nxp_pint_isr, &nxp_pint_idx_##idx, 0); \
irq_enable(DT_IRQ_BY_IDX(node_id, idx, irq)); \
pint_irq_cfg[idx].irq = DT_IRQ_BY_IDX(node_id, idx, irq); \
} while (false)))
static int intc_nxp_pint_init(const struct device *dev)
{
/* First, connect IRQs for each interrupt.
* The IRQ handler will receive the PINT slot as a
* parameter.
*/
LISTIFY(8, NXP_PINT_IRQ, (;), DT_INST(0, DT_DRV_COMPAT));
PINT_Init(pint_base);
memset(pin_pint_id, NO_PINT_ID, ARRAY_SIZE(pin_pint_id));
return 0;
}
DEVICE_DT_INST_DEFINE(0, intc_nxp_pint_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_nxp_pint.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,585 |
```unknown
# RV32M1 INTMUX config
config RV32M1_INTMUX
bool "OpenISA RV32M1 INTMUX interrupt controller support"
default y
depends on DT_HAS_OPENISA_RV32M1_INTMUX_ENABLED
depends on MULTI_LEVEL_INTERRUPTS
help
Select this option to enable support for the RV32M1 INTMUX
driver. This provides a level 2 interrupt controller for the SoC.
The INTMUX peripheral combines level 2 interrupts into
eight channels; each channel has its own level 1 interrupt to
the core.
if RV32M1_INTMUX
config RV32M1_INTMUX_INIT_PRIORITY
int "INTMUX driver initialization priority"
default 40
help
Boot time initialization priority for INTMUX driver.
Don't change the default unless you know what you are doing.
config RV32M1_INTMUX_CHANNEL_0
bool "INTMUX channel 0"
help
Enable support for INTMUX channel 0.
config RV32M1_INTMUX_CHANNEL_1
bool "INTMUX channel 1"
help
Enable support for INTMUX channel 1.
if !BT
config RV32M1_INTMUX_CHANNEL_2
bool "INTMUX channel 2"
help
Enable support for INTMUX channel 2.
config RV32M1_INTMUX_CHANNEL_3
bool "INTMUX channel 3"
help
Enable support for INTMUX channel 3.
endif # BT
config RV32M1_INTMUX_CHANNEL_4
bool "INTMUX channel 4"
help
Enable support for INTMUX channel 4.
config RV32M1_INTMUX_CHANNEL_5
bool "INTMUX channel 5"
help
Enable support for INTMUX channel 5.
config RV32M1_INTMUX_CHANNEL_6
bool "INTMUX channel 6"
help
Enable support for INTMUX channel 6.
config RV32M1_INTMUX_CHANNEL_7
bool "INTMUX channel 7"
help
Enable support for INTMUX channel 7.
endif # RV32M1_INTMUX
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.rv32m1 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 445 |
```unknown
# Microchip XEC ECIA configuration
config MCHP_ECIA_XEC
bool "External EC Interrupt Aggregator (ECIA) Driver for MCHP MEC family of MCUs"
default y
depends on DT_HAS_MICROCHIP_XEC_ECIA_ENABLED
help
Enable XEC ECIA driver for Microchip MEC line of MCUs
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.xec | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 78 |
```c
/*
*
*/
#define DT_DRV_COMPAT atmel_sam0_eic
#include <zephyr/device.h>
#include <zephyr/irq.h>
#include <soc.h>
#include <zephyr/drivers/interrupt_controller/sam0_eic.h>
#include "intc_sam0_eic_priv.h"
struct sam0_eic_line_assignment {
uint8_t pin : 5;
uint8_t port : 2;
uint8_t enabled : 1;
};
struct sam0_eic_port_data {
sam0_eic_callback_t cb;
void *data;
};
struct sam0_eic_data {
struct sam0_eic_port_data ports[PORT_GROUPS];
struct sam0_eic_line_assignment lines[EIC_EXTINT_NUM];
};
static void wait_synchronization(void)
{
#ifdef REG_EIC_SYNCBUSY
while (EIC->SYNCBUSY.reg) {
}
#else
while (EIC->STATUS.bit.SYNCBUSY) {
}
#endif
}
static inline void set_eic_enable(bool on)
{
#ifdef REG_EIC_CTRLA
EIC->CTRLA.bit.ENABLE = on;
#else
EIC->CTRL.bit.ENABLE = on;
#endif
}
static void sam0_eic_isr(const struct device *dev)
{
struct sam0_eic_data *const dev_data = dev->data;
uint16_t bits = EIC->INTFLAG.reg;
uint32_t line_index;
/* Acknowledge all interrupts */
EIC->INTFLAG.reg = bits;
/* No clz on M0, so just do a quick test */
#if __CORTEX_M >= 3
line_index = __CLZ(__RBIT(bits));
bits >>= line_index;
#else
if (bits & 0xFF) {
line_index = 0;
} else {
line_index = 8;
bits >>= 8;
}
#endif
/*
* Map the EIC lines to the port pin masks based on which port is
* selected in the line data.
*/
for (; bits; bits >>= 1, line_index++) {
if (!(bits & 1)) {
continue;
}
/*
* These could be aggregated together into one call, but
* usually on a single one will be set, so just call them
* one by one.
*/
struct sam0_eic_line_assignment *line_assignment =
&dev_data->lines[line_index];
struct sam0_eic_port_data *port_data =
&dev_data->ports[line_assignment->port];
port_data->cb(BIT(line_assignment->pin), port_data->data);
}
}
int sam0_eic_acquire(int port, int pin, enum sam0_eic_trigger trigger,
bool filter, sam0_eic_callback_t cb, void *data)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct sam0_eic_data *dev_data = dev->data;
struct sam0_eic_port_data *port_data;
struct sam0_eic_line_assignment *line_assignment;
uint32_t mask;
int line_index;
int config_index;
int config_shift;
unsigned int key;
uint32_t config;
line_index = sam0_eic_map_to_line(port, pin);
if (line_index < 0) {
return line_index;
}
mask = BIT(line_index);
config_index = line_index / 8;
config_shift = (line_index % 8) * 4;
/* Lock everything so it's safe to reconfigure */
key = irq_lock();
/* Disable the EIC for reconfiguration */
set_eic_enable(0);
line_assignment = &dev_data->lines[line_index];
/* Check that the required line is available */
if (line_assignment->enabled) {
if (line_assignment->port != port ||
line_assignment->pin != pin) {
goto err_in_use;
}
}
/* Set the EIC configuration data */
port_data = &dev_data->ports[port];
port_data->cb = cb;
port_data->data = data;
line_assignment->pin = pin;
line_assignment->port = port;
line_assignment->enabled = 1;
config = EIC->CONFIG[config_index].reg;
config &= ~(0xF << config_shift);
switch (trigger) {
case SAM0_EIC_RISING:
config |= EIC_CONFIG_SENSE0_RISE << config_shift;
break;
case SAM0_EIC_FALLING:
config |= EIC_CONFIG_SENSE0_FALL << config_shift;
break;
case SAM0_EIC_BOTH:
config |= EIC_CONFIG_SENSE0_BOTH << config_shift;
break;
case SAM0_EIC_HIGH:
config |= EIC_CONFIG_SENSE0_HIGH << config_shift;
break;
case SAM0_EIC_LOW:
config |= EIC_CONFIG_SENSE0_LOW << config_shift;
break;
}
if (filter) {
config |= EIC_CONFIG_FILTEN0 << config_shift;
}
/* Apply the config to the EIC itself */
EIC->CONFIG[config_index].reg = config;
set_eic_enable(1);
wait_synchronization();
/*
* Errata: The EIC generates a spurious interrupt for the newly
* enabled pin after being enabled, so clear it before re-enabling
* the IRQ.
*/
EIC->INTFLAG.reg = mask;
irq_unlock(key);
return 0;
err_in_use:
set_eic_enable(1);
wait_synchronization();
irq_unlock(key);
return -EBUSY;
}
static bool sam0_eic_check_ownership(int port, int pin, int line_index)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct sam0_eic_data *dev_data = dev->data;
struct sam0_eic_line_assignment *line_assignment =
&dev_data->lines[line_index];
if (!line_assignment->enabled) {
return false;
}
if (line_assignment->port != port ||
line_assignment->pin != pin) {
return false;
}
return true;
}
int sam0_eic_release(int port, int pin)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct sam0_eic_data *dev_data = dev->data;
uint32_t mask;
int line_index;
int config_index;
int config_shift;
unsigned int key;
line_index = sam0_eic_map_to_line(port, pin);
if (line_index < 0) {
return line_index;
}
mask = BIT(line_index);
config_index = line_index / 8;
config_shift = (line_index % 8) * 4;
/* Lock everything so it's safe to reconfigure */
key = irq_lock();
/* Disable the EIC */
set_eic_enable(0);
wait_synchronization();
/*
* Check to make sure the requesting actually owns the line and do
* nothing if it does not.
*/
if (!sam0_eic_check_ownership(port, pin, line_index)) {
goto done;
}
dev_data->lines[line_index].enabled = 0;
/* Clear the EIC config, including the trigger condition */
EIC->CONFIG[config_index].reg &= ~(0xF << config_shift);
/* Clear any pending interrupt for it */
EIC->INTENCLR.reg = mask;
EIC->INTFLAG.reg = mask;
done:
set_eic_enable(1);
wait_synchronization();
irq_unlock(key);
return 0;
}
int sam0_eic_enable_interrupt(int port, int pin)
{
uint32_t mask;
int line_index;
line_index = sam0_eic_map_to_line(port, pin);
if (line_index < 0) {
return line_index;
}
if (!sam0_eic_check_ownership(port, pin, line_index)) {
return -EBUSY;
}
mask = BIT(line_index);
EIC->INTFLAG.reg = mask;
EIC->INTENSET.reg = mask;
return 0;
}
int sam0_eic_disable_interrupt(int port, int pin)
{
uint32_t mask;
int line_index;
line_index = sam0_eic_map_to_line(port, pin);
if (line_index < 0) {
return line_index;
}
if (!sam0_eic_check_ownership(port, pin, line_index)) {
return -EBUSY;
}
mask = BIT(line_index);
EIC->INTENCLR.reg = mask;
EIC->INTFLAG.reg = mask;
return 0;
}
uint32_t sam0_eic_interrupt_pending(int port)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct sam0_eic_data *dev_data = dev->data;
struct sam0_eic_line_assignment *line_assignment;
uint32_t set = EIC->INTFLAG.reg;
uint32_t mask = 0;
for (int line_index = 0; line_index < EIC_EXTINT_NUM; line_index++) {
line_assignment = &dev_data->lines[line_index];
if (!line_assignment->enabled) {
continue;
}
if (line_assignment->port != port) {
continue;
}
if (!(set & BIT(line_index))) {
continue;
}
mask |= BIT(line_assignment->pin);
}
return mask;
}
#define SAM0_EIC_IRQ_CONNECT(n) \
do { \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, n, irq), \
DT_INST_IRQ_BY_IDX(0, n, priority), \
sam0_eic_isr, DEVICE_DT_INST_GET(0), 0); \
irq_enable(DT_INST_IRQ_BY_IDX(0, n, irq)); \
} while (false)
static int sam0_eic_init(const struct device *dev)
{
ARG_UNUSED(dev);
#ifdef MCLK
/* Enable the EIC clock in APBAMASK */
MCLK->APBAMASK.reg |= MCLK_APBAMASK_EIC;
/* Enable the GCLK */
GCLK->PCHCTRL[EIC_GCLK_ID].reg = GCLK_PCHCTRL_GEN_GCLK0 |
GCLK_PCHCTRL_CHEN;
#else
/* Enable the EIC clock in PM */
PM->APBAMASK.bit.EIC_ = 1;
/* Enable the GCLK */
GCLK->CLKCTRL.reg = GCLK_CLKCTRL_ID_EIC | GCLK_CLKCTRL_GEN_GCLK0 |
GCLK_CLKCTRL_CLKEN;
#endif
#if DT_INST_IRQ_HAS_CELL(0, irq)
SAM0_EIC_IRQ_CONNECT(0);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 1)
SAM0_EIC_IRQ_CONNECT(1);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 2)
SAM0_EIC_IRQ_CONNECT(2);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 3)
SAM0_EIC_IRQ_CONNECT(3);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 4)
SAM0_EIC_IRQ_CONNECT(4);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 5)
SAM0_EIC_IRQ_CONNECT(5);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 6)
SAM0_EIC_IRQ_CONNECT(6);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 7)
SAM0_EIC_IRQ_CONNECT(7);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 8)
SAM0_EIC_IRQ_CONNECT(8);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 9)
SAM0_EIC_IRQ_CONNECT(9);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 10)
SAM0_EIC_IRQ_CONNECT(10);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 11)
SAM0_EIC_IRQ_CONNECT(11);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 12)
SAM0_EIC_IRQ_CONNECT(12);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 13)
SAM0_EIC_IRQ_CONNECT(13);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 14)
SAM0_EIC_IRQ_CONNECT(14);
#endif
#if DT_INST_IRQ_HAS_IDX(0, 15)
SAM0_EIC_IRQ_CONNECT(15);
#endif
set_eic_enable(1);
wait_synchronization();
return 0;
}
static struct sam0_eic_data eic_data;
DEVICE_DT_INST_DEFINE(0, sam0_eic_init,
NULL, &eic_data, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY,
NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_sam0_eic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,674 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_DW_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_DW_H_
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*dw_ictl_config_irq_t)(const struct device *dev);
struct dw_ictl_config {
uint32_t base_addr;
uint32_t numirqs;
uint32_t isr_table_offset;
dw_ictl_config_irq_t config_func;
};
struct dw_ictl_registers {
uint32_t irq_inten_l; /* offset 00 */
uint32_t irq_inten_h; /* offset 04 */
uint32_t irq_intmask_l; /* offset 08 */
uint32_t irq_intmask_h; /* offset 0C */
uint32_t irq_intforce_l; /* offset 10 */
uint32_t irq_intforce_h; /* offset 14 */
uint32_t irq_rawstatus_l; /* offset 18 */
uint32_t irq_rawstatus_h; /* offset 1c */
uint32_t irq_status_l; /* offset 20 */
uint32_t irq_status_h; /* offset 24 */
uint32_t irq_maskstatus_l; /* offset 28 */
uint32_t irq_maskstatus_h; /* offset 2c */
uint32_t irq_finalstatus_l; /* offset 30 */
uint32_t irq_finalstatus_h; /* offset 34 */
uint32_t irq_vector; /* offset 38 */
uint32_t Reserved1; /* offset 3c */
uint32_t irq_vector_0; /* offset 40 */
uint32_t Reserved2; /* offset 44 */
uint32_t irq_vector_1; /* offset 48 */
uint32_t Reserved3; /* offset 4c */
uint32_t irq_vector_2; /* offset 50 */
uint32_t Reserved4; /* offset 54 */
uint32_t irq_vector_3; /* offset 58 */
uint32_t Reserved5; /* offset 5c */
uint32_t irq_vector_4; /* offset 60 */
uint32_t Reserved6; /* offset 64 */
uint32_t irq_vector_5; /* offset 68 */
uint32_t Reserved7; /* offset 6c */
uint32_t irq_vector_6; /* offset 70 */
uint32_t Reserved8; /* offset 74 */
uint32_t irq_vector_7; /* offset 78 */
uint32_t Reserved9; /* offset 7c */
uint32_t irq_vector_8; /* offset 80 */
uint32_t Reserved10; /* offset 84 */
uint32_t irq_vector_9; /* offset 88 */
uint32_t Reserved11; /* offset 8c */
uint32_t irq_vector_10; /* offset 90 */
uint32_t Reserved12; /* offset 94 */
uint32_t irq_vector_11; /* offset 98 */
uint32_t Reserved13; /* offset 9c */
uint32_t irq_vector_12; /* offset a0 */
uint32_t Reserved14; /* offset a4 */
uint32_t irq_vector_13; /* offset a8 */
uint32_t Reserved15; /* offset ac */
uint32_t irq_vector_14; /* offset b0 */
uint32_t Reserved16; /* offset b4 */
uint32_t irq_vector_15; /* offset b8 */
uint32_t Reserved17; /* offset bc */
uint32_t fiq_inten; /* offset c0 */
uint32_t fiq_intmask; /* offset c4 */
uint32_t fiq_intforce; /* offset c8 */
uint32_t fiq_rawstatus; /* offset cc */
uint32_t fiq_status; /* offset d0 */
uint32_t fiq_finalstatus; /* offset d4 */
uint32_t irq_plevel; /* offset d8 */
uint32_t Reserved18; /* offset dc */
uint32_t APB_ICTL_COMP_VERSION; /* offset e0 */
uint32_t Reserved19[199];
};
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_DW_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_dw.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 967 |
```unknown
# NPCX GPIO driver configuration options
config NPCX_MIWU
bool "Nuvoton NPCX embedded controller (EC) miwu driver"
default y
depends on DT_HAS_NUVOTON_NPCX_MIWU_ENABLED
help
This option enables the Multi-Input Wake-Up Unit (MIWU) driver
for NPCX family of processors.
This is required for GPIO, RTC, LPC/eSPI interrupt support.
config NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
bool
default y if SOC_NPCX9M7FB
help
Workaround the issue "MIWU Any Edge Trigger Condition"
in the npcx9m7fb SoC errata.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.npcx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 151 |
```unknown
config DW_ICTL_ACE
bool "Designware Interrupt Controller for ACE"
default y
depends on DT_HAS_INTEL_ACE_INTC_ENABLED
depends on MULTI_LEVEL_INTERRUPTS
help
Designware Interrupt Controller used by ACE.
menuconfig DW_ICTL
bool "Designware Interrupt Controller"
default y
depends on DT_HAS_SNPS_DESIGNWARE_INTC_ENABLED
depends on MULTI_LEVEL_INTERRUPTS
help
Designware Interrupt Controller can be used as a 2nd level interrupt
controller which combines several sources of interrupt into one line
that is then routed to the 1st level interrupt controller.
if DW_ICTL
config DW_ICTL_NAME
string "Name for Designware Interrupt Controller"
default "DW_ICTL"
help
Give a name for the instance of Designware Interrupt Controller
config DW_ISR_TBL_OFFSET
int "Offset in the SW ISR Table"
default 0
help
This indicates the offset in the SW_ISR_TABLE beginning from where
the ISRs for Designware Interrupt Controller are assigned.
config DW_ICTL_INIT_PRIORITY
int "Init priority for DW interrupt controller"
default 48
help
DesignWare Interrupt Controller initialization priority.
endif # DW_ICTL
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.dw | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 259 |
```c
/*
*
*/
/**
* @file
* @brief Driver for NXP's IRQ_STEER IP.
*
* Below you may find some useful information that will help you better understand how the
* driver works. The ">" sign is used to mark ideas that are considered important and should
* be taken note of.
*
* 1) What is the IRQ_STEER IP?
* - in Zephyr terminology, the IRQ_STEER can be considered an interrupt aggregator. As such,
* its main goal is to multiplex multiple interrupt lines into a single/multiple ones.
*
* 2) How does the IRQ_STEER IP work?
* - below you may find a diagram meant to give you an intuition regarding the IP's structure
* and how it works (all of the information below is applicable to i.MX8MP but it can be
* extended to any NXP SoC using the IRQ_STEER IP):
*
* SYSTEM_INTID[0:159]
* |
* MASK[0:4]------ |
* | |
* +------+
* | |
* |32 AND|
* | |
* +------+
* |
* SET[0:4]------ |
* | |
* +------+
* | |
* |32 OR |
* | |
* +------+
* |__________ STATUS[0:4]
* |
* +------+
* |GROUP |
* | BY |
* | 64 |
* +------+
* | | |
* _____________| | |________________
* | | |
* MASTER_IN[0] MASTER_IN[1] MASTER_IN[2]
* | | |
* | | |
* |_____________ | _______________|
* | | |
* +------+
* | |
* | AND | ---------- MINTDIS[0:2]
* | |
* +------+
* | | |
* _____________| | |________________
* | | |
* MASTER_OUT[0] MASTER_OUT[1] MASTER_OUT[2]
*
* - initially, all SYSTEM_INTID are grouped by 32 => 5 groups.
*
* > each of these groups is controlled by a MASK, SET and STATUS index as follows:
*
* MASK/SET/STATUS[0] => SYSTEM_INTID[159:128]
* MASK/SET/STATUS[1] => SYSTEM_INTID[127:96]
* MASK/SET/STATUS[2] => SYSTEM_INTID[95:64]
* MASK/SET/STATUS[3] => SYSTEM_INTID[63:32]
* MASK/SET/STATUS[4] => SYSTEM_INTID[31:0]
*
* > after that, all SYSTEM_INTID are grouped by 64 as follows:
*
* SYSTEM_INTID[159:96] => MASTER_IN[2]
* SYSTEM_INTID[95:32] => MASTER_IN[1]
* SYSTEM_INTID[31:0] => MASTER_IN[0]
*
* note: MASTER_IN[0] is only responsible for 32 interrupts
*
* > the value of MASTER_IN[x] is obtained by OR'ing the input interrupt lines.
*
* > the value of MASTER_OUT[x] is obtained by AND'ing MASTER_IN[x] with !MINTDIS[x].
*
* - whenever a SYSTEM_INTID is asserted, its corresponding MASTER_OUT signal will also
* be asserted, thus signaling the target processor.
*
* > please note the difference between an IRQ_STEER channel and an IRQ_STEER master output.
* An IRQ_STEER channel refers to an IRQ_STEER instance (e.g: the DSP uses IRQ_STEER channel
* 0 a.k.a instance 0). An IRQ_STEER channel has multiple master outputs. For example, in
* the case of i.MX8MP each IRQ_STEER channel has 3 master outputs since an IRQ_STEER channel
* routes 160 interrupts (32 for first master output, 64 for second master output, and 64 for
* the third master output).
*
* 3) Using Zephyr's multi-level interrupt support
* - since Zephyr supports organizing interrupts on multiple levels, we can use this to
* separate the interrupts in 2 levels:
* 1) LEVEL 1 INTERRUPTS
* - these are the interrupts that go directly to the processor (for example,
* on i.MX8MP the MU can directly assert the DSP's interrupt line 7)
*
* 2) LEVEL 2 INTERRUPTS
* - these interrupts go through IRQ_STEER and are signaled by a single
* processor interrupt line.
* - e.g: for i.MX8MP, INTID 34 (SDMA3) goes through IRQ_STEER and is signaled
* to the DSP by INTID 20 which is a direct interrupt (or LEVEL 1 interrupt).
*
* - the following diagram (1) shows the interrupt organization on i.MX8MP:
* +------------+
* | |
* SYSTEM_INTID[31:0] ------ IRQ_STEER_MASTER_0 ---- | 19 |
* | |
* SYSTEM_INTID[95:32] ----- IRQ_STEER_MASTER_1 ---- | 20 DSP |
* | |
* SYSTEM_INTID[159:96] ---- IRQ_STEER_MASTER_2 ---- | 21 |
* | |
* +------------+
*
* - as such, asserting a system interrupt will lead to asserting its corresponding DSP
* interrupt line (for example, if system interrupt 34 is asserted, that would lead to
* interrupt 20 being asserted)
*
* - in the above diagram, SYSTEM_INTID[x] are LEVEL 2 interrupts, while 19, 20, and 21 are
* LEVEL 1 interrupts.
*
* - INTID 19 is the parent of SYSTEM_INTID[31:0] and so on.
*
* > before going into how the INTIDs are encoded, we need to distinguish between 3 types of
* INTIDs:
* 1) System INTIDs
* - these are the values that can be found in NXP's TRMs for different
* SoCs (usually they have the same IDs as the GIC SPIs)
* - for example, INTID 34 is a system INTID for SDMA3 (i.MX8MP).
*
* 2) Zephyr INTIDs
* - these are the Zephyr-specific encodings of the system INTIDs.
* - these are used to encode multi-level interrupts (for more information
* please see [1])
* > if you need to register an interrupt dynamically, you need to use this
* encoding when specifying the interrupt.
*
* 3) DTS INTIDs
* - these are the encodings of the system INTIDs used in the DTS.
* - all of these INTIDs are relative to IRQ_STEER's MASTER_OUTs.
*
* > encoding an INTID:
* 1) SYSTEM INTID => ZEPHYR INTID
* - the following steps need to be performed:
*
* a) Find out which IRQ_STEER MASTER
* is in charge of aggregating this interrupt.
* * for instance, SYSTEM_INTID 34 (SDMA3 on i.MX8MP) is
* aggregated by MASTER 1 as depicted in diagram (1).
*
* b) After finding the MASTER aggregator, you need
* to find the corresponding parent interrupt.
* * for example, SYSTEM_INTID 34 (SDMA3 on i.MX8MP) is
* aggregated by MASTER 1, which has the parent INTID 20
* as depicted in diagram (1) => PARENT_INTID(34) = 20.
*
* c) Find the INTID relative to the MASTER aggregator. This is done
* by subtracting the number of interrupts each of the previous
* master aggregators is in charge of. If the master aggregator is
* MASTER 0 then RELATIVE_INTID=SYSTEM_INTID.
* * for example, SYSTEM_ID 34 is aggregated by MASTER 1.
* As such, we need to subtract 32 from 34 (because the
* previous master - MASTER 0 - is in charge of aggregating
* 32 interrupts) => RELATIVE_INTID(34) = 2.
*
* * generally speaking, RELATIVE_INTID can be computed using
* the following formula (assuming SYSTEM_INTID belongs to
* MASTER y):
* RELATIVE_INTID(x) = x -
* \sum{i=0}^{y - 1} GET_MASTER_INT_NUM(i)
* where:
* 1) GET_MASTER_INT_NUM(x) computes the number of
* interrupts master x aggregates
* 2) x is the system interrupt
*
* * to make sure your computation is correct use the
* following restriction:
* 0 <= RELATIVE_INTID(x) < GET_MASTER_INT_NUM(y)
*
* d) To the obtained RELATIVE_INTID you need to add the value of 1,
* left shift the result by the number of bits used to encode the
* level 1 interrupts (see [1] for details) and OR the parent ID.
* * for example, RELATIVE_INTID(34) = 2 (i.MX8MP),
* PARENT_INTID(34) = 20 => ZEPHYR_INTID = ((2 + 1) << 8) | 20
*
* * generally speaking, ZEPHYR_INTID can be computed using
* the following formula:
* ZEPHYR_INTID(x) = ((RELATIVE_INTID(x) + 1) <<
* NUM_LVL1_BITS) | PARENT_INTID(x)
* where:
* 1) RELATIVE_INTID(x) computes the relative INTID
* of system interrupt x (step c).
*
* 2) NUM_LVL1_BITS is the number of bits used to
* encode level 1 interrupts.
*
* 3) PARENT_INTID(x) computes the parent INTID of a
* system interrupt x (step b)
*
* - all of these steps are performed by to_zephyr_irq().
* > for interrupts aggregated by MASTER 0 you may skip step c) as
* RELATIVE_INTID(x) = x.
*
* 2) SYSTEM INTID => DTS INTID
* - for this you just have to compute RELATIVE_INTID as described above in
* step c).
* - for example, if an IP uses INTID 34 you'd write its interrupts property
* as follows (i.MX8MP):
* interrupts = <&master1 2>;
*
* 4) Notes and comments
* > PLEASE DON'T MISTAKE THE ZEPHYR MULTI-LEVEL INTERRUPT ORGANIZATION WITH THE XTENSA ONE.
* THEY ARE DIFFERENT THINGS.
*
* [1]: path_to_url#multi-level-interrupt-handling
*/
#include <zephyr/device.h>
#include <zephyr/devicetree/interrupt_controller.h>
#include <zephyr/irq.h>
#include <fsl_irqsteer.h>
#include <zephyr/cache.h>
#include <zephyr/sw_isr_table.h>
#include "sw_isr_common.h"
/* used for driver binding */
#define DT_DRV_COMPAT nxp_irqsteer_intc
/* macros used for DTS parsing */
#define _IRQSTEER_REGISTER_DISPATCHER(node_id) \
IRQ_CONNECT(DT_IRQN(node_id), \
DT_IRQ(node_id, priority), \
irqsteer_isr_dispatcher, \
&dispatchers[DT_REG_ADDR(node_id)], \
0)
#define _IRQSTEER_DECLARE_DISPATCHER(node_id) \
{ \
.dev = DEVICE_DT_GET(DT_PARENT(node_id)), \
.master_index = DT_REG_ADDR(node_id), \
.irq = DT_IRQN(node_id), \
}
#define IRQSTEER_DECLARE_DISPATCHERS(parent_id)\
DT_FOREACH_CHILD_STATUS_OKAY_SEP(parent_id, _IRQSTEER_DECLARE_DISPATCHER, (,))
#define IRQSTEER_REGISTER_DISPATCHERS(parent_id)\
DT_FOREACH_CHILD_STATUS_OKAY_SEP(parent_id, _IRQSTEER_REGISTER_DISPATCHER, (;))
/* utility macros */
#define UINT_TO_IRQSTEER(x) ((IRQSTEER_Type *)(x))
struct irqsteer_config {
uint32_t regmap_phys;
uint32_t regmap_size;
struct irqsteer_dispatcher *dispatchers;
};
struct irqsteer_dispatcher {
const struct device *dev;
/* which set of interrupts is the dispatcher in charge of? */
uint32_t master_index;
/* which interrupt line is the dispatcher tied to? */
uint32_t irq;
};
static struct irqsteer_dispatcher dispatchers[] = {
IRQSTEER_DECLARE_DISPATCHERS(DT_NODELABEL(irqsteer))
};
/* used to convert system INTID to zephyr INTID */
static int to_zephyr_irq(uint32_t regmap, uint32_t irq,
struct irqsteer_dispatcher *dispatcher)
{
int i, idx;
idx = irq;
for (i = dispatcher->master_index - 1; i >= 0; i--) {
idx -= IRQSTEER_GetMasterIrqCount(UINT_TO_IRQSTEER(regmap), i);
}
return irq_to_level_2(idx) | dispatcher->irq;
}
/* used to convert master-relative INTID to system INTID */
static int to_system_irq(uint32_t regmap, int irq, int master_index)
{
int i;
for (i = master_index - 1; i >= 0; i--) {
irq += IRQSTEER_GetMasterIrqCount(UINT_TO_IRQSTEER(regmap), i);
}
return irq;
}
/* used to convert zephyr INTID to system INTID */
static int from_zephyr_irq(uint32_t regmap, uint32_t irq, uint32_t master_index)
{
int i, idx;
idx = irq;
for (i = 0; i < master_index; i++) {
idx += IRQSTEER_GetMasterIrqCount(UINT_TO_IRQSTEER(regmap), i);
}
return idx;
}
void z_soc_irq_enable_disable(uint32_t irq, bool enable)
{
uint32_t parent_irq;
int i, system_irq, level2_irq;
const struct irqsteer_config *cfg;
if (irq_get_level(irq) == 1) {
/* LEVEL 1 interrupts are DSP direct */
if (enable) {
xtensa_irq_enable(XTENSA_IRQ_NUMBER(irq));
} else {
xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
}
return;
}
parent_irq = irq_parent_level_2(irq);
level2_irq = irq_from_level_2(irq);
/* find dispatcher responsible for this interrupt */
for (i = 0; i < ARRAY_SIZE(dispatchers); i++) {
if (dispatchers[i].irq != parent_irq) {
continue;
}
cfg = dispatchers[i].dev->config;
system_irq = from_zephyr_irq(cfg->regmap_phys, level2_irq,
dispatchers[i].master_index);
if (enable) {
IRQSTEER_EnableInterrupt(UINT_TO_IRQSTEER(cfg->regmap_phys),
system_irq);
} else {
IRQSTEER_DisableInterrupt(UINT_TO_IRQSTEER(cfg->regmap_phys),
system_irq);
}
return;
}
}
void z_soc_irq_enable(uint32_t irq)
{
z_soc_irq_enable_disable(irq, true);
}
void z_soc_irq_disable(uint32_t irq)
{
z_soc_irq_enable_disable(irq, false);
}
int z_soc_irq_is_enabled(unsigned int irq)
{
uint32_t parent_irq;
int i, system_irq, level2_irq;
const struct irqsteer_config *cfg;
if (irq_get_level(irq) == 1) {
/* LEVEL 1 interrupts are DSP direct */
return xtensa_irq_is_enabled(XTENSA_IRQ_NUMBER(irq));
}
parent_irq = irq_parent_level_2(irq);
level2_irq = irq_from_level_2(irq);
/* find dispatcher responsible for this interrupt */
for (i = 0; i < ARRAY_SIZE(dispatchers); i++) {
if (dispatchers[i].irq != parent_irq) {
continue;
}
cfg = dispatchers[i].dev->config;
system_irq = from_zephyr_irq(cfg->regmap_phys, level2_irq,
dispatchers[i].master_index);
return IRQSTEER_InterruptIsEnabled(UINT_TO_IRQSTEER(cfg->regmap_phys), system_irq);
}
return false;
}
static void irqsteer_isr_dispatcher(const void *data)
{
struct irqsteer_dispatcher *dispatcher;
const struct irqsteer_config *cfg;
uint32_t table_idx;
int system_irq, zephyr_irq, i;
uint64_t status;
dispatcher = (struct irqsteer_dispatcher *)data;
cfg = dispatcher->dev->config;
/* fetch master interrupts status */
status = IRQSTEER_GetMasterInterruptsStatus(UINT_TO_IRQSTEER(cfg->regmap_phys),
dispatcher->master_index);
for (i = 0; status; i++) {
/* if bit 0 is set then that means relative INTID i is asserted */
if (status & 1) {
/* convert master-relative INTID to a system INTID */
system_irq = to_system_irq(cfg->regmap_phys, i,
dispatcher->master_index);
/* convert system INTID to a Zephyr INTID */
zephyr_irq = to_zephyr_irq(cfg->regmap_phys, system_irq, dispatcher);
/* compute index in the SW ISR table */
table_idx = z_get_sw_isr_table_idx(zephyr_irq);
/* call child's ISR */
_sw_isr_table[table_idx].isr(_sw_isr_table[table_idx].arg);
}
status >>= 1;
}
}
static void irqsteer_enable_dispatchers(const struct device *dev)
{
int i;
struct irqsteer_dispatcher *dispatcher;
const struct irqsteer_config *cfg;
cfg = dev->config;
for (i = 0; i < ARRAY_SIZE(dispatchers); i++) {
dispatcher = &dispatchers[i];
IRQSTEER_EnableMasterInterrupt(UINT_TO_IRQSTEER(cfg->regmap_phys),
dispatcher->irq);
xtensa_irq_enable(XTENSA_IRQ_NUMBER(dispatcher->irq));
}
}
static int irqsteer_init(const struct device *dev)
{
IRQSTEER_REGISTER_DISPATCHERS(DT_NODELABEL(irqsteer));
/* enable all dispatchers */
irqsteer_enable_dispatchers(dev);
return 0;
}
/* TODO: do we need to add support for MMU-based SoCs? */
static struct irqsteer_config irqsteer_config = {
.regmap_phys = DT_REG_ADDR(DT_NODELABEL(irqsteer)),
.regmap_size = DT_REG_SIZE(DT_NODELABEL(irqsteer)),
.dispatchers = dispatchers,
};
/* assumption: only 1 IRQ_STEER instance */
DEVICE_DT_INST_DEFINE(0,
&irqsteer_init,
NULL,
NULL, &irqsteer_config,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY,
NULL);
#define NXP_IRQSTEER_MASTER_IRQ_ENTRY_DEF(node_id) \
IRQ_PARENT_ENTRY_DEFINE(CONCAT(nxp_irqsteer_master_, DT_NODE_CHILD_IDX(node_id)), NULL, \
DT_IRQN(node_id), INTC_CHILD_ISR_TBL_OFFSET(node_id), \
DT_INTC_GET_AGGREGATOR_LEVEL(node_id));
DT_INST_FOREACH_CHILD_STATUS_OKAY(0, NXP_IRQSTEER_MASTER_IRQ_ENTRY_DEF);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_nxp_irqsteer.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,449 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_ITE_IT8XXX2_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_ITE_IT8XXX2_H_
#include <zephyr/dt-bindings/interrupt-controller/ite-intc.h>
#include <ilm.h>
#include <soc.h>
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_ITE_IT8XXX2_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_ite_it8xxx2.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 91 |
```unknown
config INTC_MTK_ADSP
bool "MediaTek Audio DSP Interrupt Controller"
help
Very simple cascaded interrupt controller consisting of two
bitfield registers (status and enable) and one mask value
defining valid interrupts.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.mtk_adsp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 51 |
```c
/*
*
*/
/**
* @file
* @brief system module for variants with LOAPIC
*
*/
#include <zephyr/sys/__assert.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/drivers/interrupt_controller/ioapic.h>
#include <zephyr/drivers/interrupt_controller/loapic.h>
#include <zephyr/drivers/interrupt_controller/sysapic.h>
#include <zephyr/irq.h>
#include <zephyr/linker/sections.h>
#define IS_IOAPIC_IRQ(irq) ((irq) < z_loapic_irq_base())
#define HARDWARE_IRQ_LIMIT ((z_loapic_irq_base() + LOAPIC_IRQ_COUNT) - 1)
/**
* @brief Program interrupt controller
*
* This routine programs the interrupt controller with the given vector
* based on the given IRQ parameter.
*
* Drivers call this routine instead of IRQ_CONNECT() when interrupts are
* configured statically.
*
* The Galileo board virtualizes IRQs as follows:
*
* - The first z_ioapic_num_rtes() IRQs are provided by the IOAPIC so the
* IOAPIC is programmed for these IRQs
* - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is
* programmed.
*
* @param vector the vector number
* @param irq the virtualized IRQ
* @param flags interrupt flags
*/
__boot_func
void z_irq_controller_irq_config(unsigned int vector, unsigned int irq,
uint32_t flags)
{
__ASSERT(irq <= HARDWARE_IRQ_LIMIT, "invalid irq line");
if (IS_IOAPIC_IRQ(irq)) {
z_ioapic_irq_set(irq, vector, flags);
} else {
z_loapic_int_vec_set(irq - z_loapic_irq_base(), vector);
}
}
/**
* @brief Enable an individual interrupt (IRQ)
*
* The public interface for enabling/disabling a specific IRQ for the IA-32
* architecture is defined as follows in include/arch/x86/arch.h
*
* extern void irq_enable (unsigned int irq);
* extern void irq_disable (unsigned int irq);
*
* The irq_enable() routine is provided by the interrupt controller driver due
* to the IRQ virtualization that is performed by this platform. See the
* comments in _interrupt_vector_allocate() for more information regarding IRQ
* virtualization.
*/
__pinned_func
void arch_irq_enable(unsigned int irq)
{
if (IS_IOAPIC_IRQ(irq)) {
z_ioapic_irq_enable(irq);
} else {
z_loapic_irq_enable(irq - z_loapic_irq_base());
}
}
/**
* @brief Disable an individual interrupt (IRQ)
*
* The irq_disable() routine is provided by the interrupt controller driver due
* to the IRQ virtualization that is performed by this platform. See the
* comments in _interrupt_vector_allocate() for more information regarding IRQ
* virtualization.
*/
__pinned_func
void arch_irq_disable(unsigned int irq)
{
if (IS_IOAPIC_IRQ(irq)) {
z_ioapic_irq_disable(irq);
} else {
z_loapic_irq_disable(irq - z_loapic_irq_base());
}
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_system_apic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 702 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/dt-bindings/interrupt-controller/arm-gic.h>
#include <zephyr/drivers/interrupt_controller/gic.h>
#include <zephyr/sys/barrier.h>
#include "intc_gic_common_priv.h"
#include "intc_gicv3_priv.h"
#include <string.h>
#define DT_DRV_COMPAT arm_gic_v3
/* Redistributor base addresses for each core */
mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
#if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
#define IGROUPR_VAL 0xFFFFFFFFU
#else
#define IGROUPR_VAL 0x0U
#endif
/*
* We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
* deal with (one configuration byte per interrupt). PENDBASE has to
* be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
*/
#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
#define LPI_PROPBASE_SZ(nrbits) ROUND_UP(BIT(nrbits), KB(64))
#define LPI_PENDBASE_SZ(nrbits) ROUND_UP(BIT(nrbits) / 8, KB(64))
#ifdef CONFIG_GIC_V3_ITS
static uintptr_t lpi_prop_table;
atomic_t nlpi_intid = ATOMIC_INIT(8192);
#endif
static inline mem_addr_t gic_get_rdist(void)
{
return gic_rdists[arch_curr_cpu()->id];
}
/*
* Wait for register write pending
* TODO: add timed wait
*/
static int gic_wait_rwp(uint32_t intid)
{
uint32_t rwp_mask;
mem_addr_t base;
if (intid < GIC_SPI_INT_BASE) {
base = (gic_get_rdist() + GICR_CTLR);
rwp_mask = BIT(GICR_CTLR_RWP);
} else {
base = GICD_CTLR;
rwp_mask = BIT(GICD_CTLR_RWP);
}
while (sys_read32(base) & rwp_mask) {
;
}
return 0;
}
#ifdef CONFIG_GIC_V3_ITS
static void arm_gic_lpi_setup(unsigned int intid, bool enable)
{
uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
if (enable) {
*cfg |= BIT(0);
} else {
*cfg &= ~BIT(0);
}
barrier_dsync_fence_full();
its_rdist_invall();
}
static void arm_gic_lpi_set_priority(unsigned int intid, unsigned int prio)
{
uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
*cfg &= 0xfc;
*cfg |= prio & 0xfc;
barrier_dsync_fence_full();
its_rdist_invall();
}
static bool arm_gic_lpi_is_enabled(unsigned int intid)
{
uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
return (*cfg & BIT(0));
}
#endif
#if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
static inline void arm_gic_write_irouter(uint64_t val, unsigned int intid)
{
mem_addr_t addr = IROUTER(GET_DIST_BASE(intid), intid);
#ifdef CONFIG_ARM
sys_write32((uint32_t)val, addr);
sys_write32((uint32_t)(val >> 32U), addr + 4);
#else
sys_write64(val, addr);
#endif
}
#endif
void arm_gic_irq_set_priority(unsigned int intid,
unsigned int prio, uint32_t flags)
{
#ifdef CONFIG_GIC_V3_ITS
if (intid >= 8192) {
arm_gic_lpi_set_priority(intid, prio);
return;
}
#endif
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
uint32_t shift;
uint32_t val;
mem_addr_t base = GET_DIST_BASE(intid);
/* Disable the interrupt */
sys_write32(mask, ICENABLER(base, idx));
gic_wait_rwp(intid);
/* PRIORITYR registers provide byte access */
sys_write8(prio & GIC_PRI_MASK, IPRIORITYR(base, intid));
/* Interrupt type config */
if (!GIC_IS_SGI(intid)) {
idx = intid / GIC_NUM_CFG_PER_REG;
shift = (intid & (GIC_NUM_CFG_PER_REG - 1)) * 2;
val = sys_read32(ICFGR(base, idx));
val &= ~(GICD_ICFGR_MASK << shift);
if (flags & IRQ_TYPE_EDGE) {
val |= (GICD_ICFGR_TYPE << shift);
}
sys_write32(val, ICFGR(base, idx));
}
}
void arm_gic_irq_enable(unsigned int intid)
{
#ifdef CONFIG_GIC_V3_ITS
if (intid >= 8192) {
arm_gic_lpi_setup(intid, true);
return;
}
#endif
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
sys_write32(mask, ISENABLER(GET_DIST_BASE(intid), idx));
#if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
/*
* Affinity routing is enabled for Armv8-A Non-secure state (GICD_CTLR.ARE_NS
* is set to '1') and for GIC single security state (GICD_CTRL.ARE is set to '1'),
* so need to set SPI's affinity, now set it to be the PE on which it is enabled.
*/
if (GIC_IS_SPI(intid)) {
arm_gic_write_irouter(MPIDR_TO_CORE(GET_MPIDR()), intid);
}
#endif
}
void arm_gic_irq_disable(unsigned int intid)
{
#ifdef CONFIG_GIC_V3_ITS
if (intid >= 8192) {
arm_gic_lpi_setup(intid, false);
return;
}
#endif
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
sys_write32(mask, ICENABLER(GET_DIST_BASE(intid), idx));
/* poll to ensure write is complete */
gic_wait_rwp(intid);
}
bool arm_gic_irq_is_enabled(unsigned int intid)
{
#ifdef CONFIG_GIC_V3_ITS
if (intid >= 8192) {
return arm_gic_lpi_is_enabled(intid);
}
#endif
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
uint32_t val;
val = sys_read32(ISENABLER(GET_DIST_BASE(intid), idx));
return (val & mask) != 0;
}
bool arm_gic_irq_is_pending(unsigned int intid)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
uint32_t val;
val = sys_read32(ISPENDR(GET_DIST_BASE(intid), idx));
return (val & mask) != 0;
}
void arm_gic_irq_set_pending(unsigned int intid)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
sys_write32(mask, ISPENDR(GET_DIST_BASE(intid), idx));
}
void arm_gic_irq_clear_pending(unsigned int intid)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
sys_write32(mask, ICPENDR(GET_DIST_BASE(intid), idx));
}
unsigned int arm_gic_get_active(void)
{
int intid;
/* (Pending -> Active / AP) or (AP -> AP) */
intid = read_sysreg(ICC_IAR1_EL1);
return intid;
}
void arm_gic_eoi(unsigned int intid)
{
/*
* Interrupt request deassertion from peripheral to GIC happens
* by clearing interrupt condition by a write to the peripheral
* register. It is desired that the write transfer is complete
* before the core tries to change GIC state from 'AP/Active' to
* a new state on seeing 'EOI write'.
* Since ICC interface writes are not ordered against Device
* memory writes, a barrier is required to ensure the ordering.
* The dsb will also ensure *completion* of previous writes with
* DEVICE nGnRnE attribute.
*/
barrier_dsync_fence_full();
/* (AP -> Pending) Or (Active -> Inactive) or (AP to AP) nested case */
write_sysreg(intid, ICC_EOIR1_EL1);
}
void gic_raise_sgi(unsigned int sgi_id, uint64_t target_aff,
uint16_t target_list)
{
uint32_t aff3, aff2, aff1;
uint64_t sgi_val;
__ASSERT_NO_MSG(GIC_IS_SGI(sgi_id));
/* Extract affinity fields from target */
aff1 = MPIDR_AFFLVL(target_aff, 1);
aff2 = MPIDR_AFFLVL(target_aff, 2);
#if defined(CONFIG_ARM)
/* There is no Aff3 in AArch32 MPIDR */
aff3 = 0;
#else
aff3 = MPIDR_AFFLVL(target_aff, 3);
#endif
sgi_val = GICV3_SGIR_VALUE(aff3, aff2, aff1, sgi_id,
SGIR_IRM_TO_AFF, target_list);
barrier_dsync_fence_full();
write_sysreg(sgi_val, ICC_SGI1R);
barrier_isync_fence_full();
}
/*
* Wake up GIC redistributor.
* clear ProcessorSleep and wait till ChildAsleep is cleared.
* ProcessSleep to be cleared only when ChildAsleep is set
* Check if redistributor is not powered already.
*/
static void gicv3_rdist_enable(mem_addr_t rdist)
{
if (!(sys_read32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA))) {
return;
}
if (GICR_IIDR_PRODUCT_ID_GET(sys_read32(rdist + GICR_IIDR)) >= 0x2) {
if (sys_read32(rdist + GICR_PWRR) & BIT(GICR_PWRR_RDPD)) {
sys_set_bit(rdist + GICR_PWRR, GICR_PWRR_RDAG);
sys_clear_bit(rdist + GICR_PWRR, GICR_PWRR_RDPD);
while (sys_read32(rdist + GICR_PWRR) & BIT(GICR_PWRR_RDPD)) {
;
}
}
}
sys_clear_bit(rdist + GICR_WAKER, GICR_WAKER_PS);
while (sys_read32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA)) {
;
}
}
#ifdef CONFIG_GIC_V3_ITS
/*
* Setup LPIs Configuration & Pending tables for redistributors
* LPI configuration is global, each redistributor has a pending table
*/
static void gicv3_rdist_setup_lpis(mem_addr_t rdist)
{
unsigned int lpi_id_bits = MIN(GICD_TYPER_IDBITS(sys_read32(GICD_TYPER)),
ITS_MAX_LPI_NRBITS);
uintptr_t lpi_pend_table;
uint64_t reg;
uint32_t ctlr;
/* If not, alloc a common prop table for all redistributors */
if (!lpi_prop_table) {
lpi_prop_table = (uintptr_t)k_aligned_alloc(4 * 1024, LPI_PROPBASE_SZ(lpi_id_bits));
memset((void *)lpi_prop_table, 0, LPI_PROPBASE_SZ(lpi_id_bits));
}
lpi_pend_table = (uintptr_t)k_aligned_alloc(64 * 1024, LPI_PENDBASE_SZ(lpi_id_bits));
memset((void *)lpi_pend_table, 0, LPI_PENDBASE_SZ(lpi_id_bits));
ctlr = sys_read32(rdist + GICR_CTLR);
ctlr &= ~GICR_CTLR_ENABLE_LPIS;
sys_write32(ctlr, rdist + GICR_CTLR);
/* PROPBASE */
reg = (GIC_BASER_SHARE_INNER << GITR_PROPBASER_SHAREABILITY_SHIFT) |
(GIC_BASER_CACHE_RAWAWB << GITR_PROPBASER_INNER_CACHE_SHIFT) |
(lpi_prop_table & (GITR_PROPBASER_ADDR_MASK << GITR_PROPBASER_ADDR_SHIFT)) |
(GIC_BASER_CACHE_INNERLIKE << GITR_PROPBASER_OUTER_CACHE_SHIFT) |
((lpi_id_bits - 1) & GITR_PROPBASER_ID_BITS_MASK);
sys_write64(reg, rdist + GICR_PROPBASER);
/* TOFIX: check SHAREABILITY validity */
/* PENDBASE */
reg = (GIC_BASER_SHARE_INNER << GITR_PENDBASER_SHAREABILITY_SHIFT) |
(GIC_BASER_CACHE_RAWAWB << GITR_PENDBASER_INNER_CACHE_SHIFT) |
(lpi_pend_table & (GITR_PENDBASER_ADDR_MASK << GITR_PENDBASER_ADDR_SHIFT)) |
(GIC_BASER_CACHE_INNERLIKE << GITR_PENDBASER_OUTER_CACHE_SHIFT) |
GITR_PENDBASER_PTZ;
sys_write64(reg, rdist + GICR_PENDBASER);
/* TOFIX: check SHAREABILITY validity */
ctlr = sys_read32(rdist + GICR_CTLR);
ctlr |= GICR_CTLR_ENABLE_LPIS;
sys_write32(ctlr, rdist + GICR_CTLR);
barrier_dsync_fence_full();
}
#endif
/*
* Initialize the cpu interface. This should be called by each core.
*/
static void gicv3_cpuif_init(void)
{
uint32_t icc_sre;
uint32_t intid;
mem_addr_t base = gic_get_rdist() + GICR_SGI_BASE_OFF;
/* Disable all sgi ppi */
sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICENABLER(base, 0));
/* Any sgi/ppi intid ie. 0-31 will select GICR_CTRL */
gic_wait_rwp(0);
/* Clear pending */
sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICPENDR(base, 0));
/* Configure all SGIs/PPIs as G1S or G1NS depending on Zephyr
* is run in EL1S or EL1NS respectively.
* All interrupts will be delivered as irq
*/
sys_write32(IGROUPR_VAL, IGROUPR(base, 0));
sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), IGROUPMODR(base, 0));
/*
* Configure default priorities for SGI 0:15 and PPI 0:15.
*/
for (intid = 0; intid < GIC_SPI_INT_BASE;
intid += GIC_NUM_PRI_PER_REG) {
sys_write32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
}
/* Configure PPIs as level triggered */
sys_write32(0, ICFGR(base, 1));
/*
* Check if system interface can be enabled.
* 'icc_sre_el3' needs to be configured at 'EL3'
* to allow access to 'icc_sre_el1' at 'EL1'
* eg: z_arch_el3_plat_init can be used by platform.
*/
icc_sre = read_sysreg(ICC_SRE_EL1);
if (!(icc_sre & ICC_SRE_ELx_SRE_BIT)) {
icc_sre = (icc_sre | ICC_SRE_ELx_SRE_BIT |
ICC_SRE_ELx_DIB_BIT | ICC_SRE_ELx_DFB_BIT);
write_sysreg(icc_sre, ICC_SRE_EL1);
icc_sre = read_sysreg(ICC_SRE_EL1);
__ASSERT_NO_MSG(icc_sre & ICC_SRE_ELx_SRE_BIT);
}
write_sysreg(GIC_IDLE_PRIO, ICC_PMR_EL1);
/* Allow group1 interrupts */
write_sysreg(1, ICC_IGRPEN1_EL1);
}
/*
* TODO: Consider Zephyr in EL1NS.
*/
static void gicv3_dist_init(void)
{
unsigned int num_ints;
unsigned int intid;
unsigned int idx;
mem_addr_t base = GIC_DIST_BASE;
num_ints = sys_read32(GICD_TYPER);
num_ints &= GICD_TYPER_ITLINESNUM_MASK;
num_ints = (num_ints + 1) << 5;
/* Disable the distributor */
sys_write32(0, GICD_CTLR);
gic_wait_rwp(GIC_SPI_INT_BASE);
#ifdef CONFIG_GIC_SINGLE_SECURITY_STATE
/*
* Before configuration, we need to check whether
* the GIC single security state mode is supported.
* Make sure GICD_CTRL_NS is 1.
*/
sys_set_bit(GICD_CTLR, GICD_CTRL_NS);
__ASSERT(sys_test_bit(GICD_CTLR, GICD_CTRL_NS),
"Current GIC does not support single security state");
#endif
/*
* Default configuration of all SPIs
*/
for (intid = GIC_SPI_INT_BASE; intid < num_ints;
intid += GIC_NUM_INTR_PER_REG) {
idx = intid / GIC_NUM_INTR_PER_REG;
/* Disable interrupt */
sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
ICENABLER(base, idx));
/* Clear pending */
sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
ICPENDR(base, idx));
sys_write32(IGROUPR_VAL, IGROUPR(base, idx));
sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
IGROUPMODR(base, idx));
}
/* wait for rwp on GICD */
gic_wait_rwp(GIC_SPI_INT_BASE);
/* Configure default priorities for all SPIs. */
for (intid = GIC_SPI_INT_BASE; intid < num_ints;
intid += GIC_NUM_PRI_PER_REG) {
sys_write32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
}
/* Configure all SPIs as active low, level triggered by default */
for (intid = GIC_SPI_INT_BASE; intid < num_ints;
intid += GIC_NUM_CFG_PER_REG) {
idx = intid / GIC_NUM_CFG_PER_REG;
sys_write32(0, ICFGR(base, idx));
}
#ifdef CONFIG_ARMV8_A_NS
/* Enable distributor with ARE */
sys_write32(BIT(GICD_CTRL_ARE_NS) | BIT(GICD_CTLR_ENABLE_G1NS),
GICD_CTLR);
#elif defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
/*
* For GIC single security state, the config GIC_SINGLE_SECURITY_STATE
* means the GIC is under single security state which has only two
* groups: group 0 and group 1.
* Then set GICD_CTLR_ARE and GICD_CTLR_ENABLE_G1 to enable Group 1
* interrupt.
* Since the GICD_CTLR_ARE and GICD_CTRL_ARE_S share BIT(4), and
* similarly the GICD_CTLR_ENABLE_G1 and GICD_CTLR_ENABLE_G1NS share
* BIT(1), we can reuse them.
*/
sys_write32(BIT(GICD_CTRL_ARE_S) | BIT(GICD_CTLR_ENABLE_G1NS),
GICD_CTLR);
#else
/* enable Group 1 secure interrupts */
sys_set_bit(GICD_CTLR, GICD_CTLR_ENABLE_G1S);
#endif
}
static uint64_t arm_gic_mpidr_to_affinity(uint64_t mpidr)
{
uint64_t aff3, aff2, aff1, aff0;
#if defined(CONFIG_ARM)
/* There is no Aff3 in AArch32 MPIDR */
aff3 = 0;
#else
aff3 = MPIDR_AFFLVL(mpidr, 3);
#endif
aff2 = MPIDR_AFFLVL(mpidr, 2);
aff1 = MPIDR_AFFLVL(mpidr, 1);
aff0 = MPIDR_AFFLVL(mpidr, 0);
return (aff3 << 24 | aff2 << 16 | aff1 << 8 | aff0);
}
static bool arm_gic_aff_matching(uint64_t gicr_aff, uint64_t aff)
{
#if defined(CONFIG_GIC_V3_RDIST_MATCHING_AFF0_ONLY)
uint64_t mask = BIT64_MASK(8);
return (gicr_aff & mask) == (aff & mask);
#else
return gicr_aff == aff;
#endif
}
static inline uint64_t arm_gic_get_typer(mem_addr_t addr)
{
uint64_t val;
#if defined(CONFIG_ARM)
val = sys_read32(addr);
val |= (uint64_t)sys_read32(addr + 4) << 32;
#else
val = sys_read64(addr);
#endif
return val;
}
static mem_addr_t arm_gic_iterate_rdists(void)
{
uint64_t aff = arm_gic_mpidr_to_affinity(GET_MPIDR());
for (mem_addr_t rdist_addr = GIC_RDIST_BASE;
rdist_addr < GIC_RDIST_BASE + GIC_RDIST_SIZE;
rdist_addr += 0x20000) {
uint64_t val = arm_gic_get_typer(rdist_addr + GICR_TYPER);
uint64_t gicr_aff = GICR_TYPER_AFFINITY_VALUE_GET(val);
if (arm_gic_aff_matching(gicr_aff, aff)) {
return rdist_addr;
}
if (GICR_TYPER_LAST_GET(val) == 1) {
return (mem_addr_t)NULL;
}
}
return (mem_addr_t)NULL;
}
static void __arm_gic_init(void)
{
uint8_t cpu;
mem_addr_t gic_rd_base;
cpu = arch_curr_cpu()->id;
gic_rd_base = arm_gic_iterate_rdists();
__ASSERT(gic_rd_base != (mem_addr_t)NULL, "");
gic_rdists[cpu] = gic_rd_base;
#ifdef CONFIG_GIC_V3_ITS
/* Enable LPIs in Redistributor */
gicv3_rdist_setup_lpis(gic_get_rdist());
#endif
gicv3_rdist_enable(gic_get_rdist());
gicv3_cpuif_init();
}
int arm_gic_init(const struct device *dev)
{
gicv3_dist_init();
__arm_gic_init();
return 0;
}
DEVICE_DT_INST_DEFINE(0, arm_gic_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
#ifdef CONFIG_SMP
void arm_gic_secondary_init(void)
{
__arm_gic_init();
#ifdef CONFIG_GIC_V3_ITS
/* Map this CPU Redistributor in all the ITS Collection tables */
its_rdist_map();
#endif
}
#endif
``` | /content/code_sandbox/drivers/interrupt_controller/intc_gicv3.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,318 |
```c
/*
*
*/
#define DT_DRV_COMPAT intel_cavs_intc
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/devicetree/interrupt_controller.h>
#include <zephyr/irq.h>
#include <zephyr/irq_nextlevel.h>
#include <zephyr/arch/arch_interface.h>
#include <zephyr/sw_isr_table.h>
#include "intc_cavs.h"
#if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
#if defined(CONFIG_SOC_INTEL_CAVS_V25)
#define PER_CPU_OFFSET(x) (0x40 * x)
#else
#error "Must define PER_CPU_OFFSET(x) for SoC"
#endif
#else
#define PER_CPU_OFFSET(x) 0
#endif
static ALWAYS_INLINE
struct cavs_registers *get_base_address(struct cavs_ictl_runtime *context)
{
#if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
return UINT_TO_POINTER(context->base_addr +
PER_CPU_OFFSET(arch_curr_cpu()->id));
#else
return UINT_TO_POINTER(context->base_addr);
#endif
}
static ALWAYS_INLINE void cavs_ictl_dispatch_child_isrs(uint32_t intr_status,
uint32_t isr_base_offset)
{
uint32_t intr_bitpos, intr_offset;
/* Dispatch lower level ISRs depending upon the bit set */
while (intr_status) {
intr_bitpos = find_lsb_set(intr_status) - 1;
intr_status &= ~(1 << intr_bitpos);
intr_offset = isr_base_offset + intr_bitpos;
_sw_isr_table[intr_offset].isr(
_sw_isr_table[intr_offset].arg);
}
}
static void cavs_ictl_isr(const struct device *port)
{
struct cavs_ictl_runtime *context = port->data;
const struct cavs_ictl_config *config = port->config;
volatile struct cavs_registers * const regs = get_base_address(context);
cavs_ictl_dispatch_child_isrs(regs->status_il,
config->isr_table_offset);
}
static void cavs_ictl_irq_enable(const struct device *dev,
unsigned int irq)
{
struct cavs_ictl_runtime *context = dev->data;
volatile struct cavs_registers * const regs = get_base_address(context);
regs->enable_il = 1 << irq;
}
static void cavs_ictl_irq_disable(const struct device *dev,
unsigned int irq)
{
struct cavs_ictl_runtime *context = dev->data;
volatile struct cavs_registers * const regs = get_base_address(context);
regs->disable_il = 1 << irq;
}
static unsigned int cavs_ictl_irq_get_state(const struct device *dev)
{
struct cavs_ictl_runtime *context = dev->data;
volatile struct cavs_registers * const regs = get_base_address(context);
/* When the bits of this register are set, it means the
* corresponding interrupts are disabled. This function
* returns 0 only if ALL the interrupts are disabled.
*/
return regs->disable_state_il != 0xFFFFFFFF;
}
static int cavs_ictl_irq_get_line_state(const struct device *dev,
unsigned int irq)
{
struct cavs_ictl_runtime *context = dev->data;
volatile struct cavs_registers * const regs = get_base_address(context);
if ((regs->disable_state_il & BIT(irq)) == 0) {
return 1;
}
return 0;
}
static const struct irq_next_level_api cavs_apis = {
.intr_enable = cavs_ictl_irq_enable,
.intr_disable = cavs_ictl_irq_disable,
.intr_get_state = cavs_ictl_irq_get_state,
.intr_get_line_state = cavs_ictl_irq_get_line_state,
};
#define CAVS_ICTL_INIT(n) \
static int cavs_ictl_##n##_initialize(const struct device *port) \
{ \
struct cavs_ictl_runtime *context = port->data; \
volatile struct cavs_registers * const regs = \
get_base_address(context); \
regs->disable_il = ~0; \
\
return 0; \
} \
\
static void cavs_config_##n##_irq(const struct device *port); \
\
static const struct cavs_ictl_config cavs_config_##n = { \
.irq_num = DT_INST_IRQN(n), \
.isr_table_offset = CONFIG_CAVS_ISR_TBL_OFFSET + \
CONFIG_MAX_IRQ_PER_AGGREGATOR*n, \
.config_func = cavs_config_##n##_irq, \
}; \
\
static struct cavs_ictl_runtime cavs_##n##_runtime = { \
.base_addr = DT_INST_REG_ADDR(n), \
}; \
DEVICE_DT_INST_DEFINE(n, \
cavs_ictl_##n##_initialize, \
NULL, \
&cavs_##n##_runtime, &cavs_config_##n, \
PRE_KERNEL_1, \
CONFIG_CAVS_ICTL_INIT_PRIORITY, &cavs_apis);\
\
static void cavs_config_##n##_irq(const struct device *port) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
cavs_ictl_isr, DEVICE_DT_INST_GET(n), \
DT_INST_IRQ(n, sense)); \
} \
IRQ_PARENT_ENTRY_DEFINE( \
intc_cavs_##n, DEVICE_DT_INST_GET(n), DT_INST_IRQN(n), \
INTC_INST_ISR_TBL_OFFSET(n), \
DT_INST_INTC_GET_AGGREGATOR_LEVEL(n));
DT_INST_FOREACH_STATUS_OKAY(CAVS_ICTL_INIT)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_cavs.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,296 |
```c
/*
*
*/
#define DT_DRV_COMPAT intel_ace_intc
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/devicetree/interrupt_controller.h>
#include <zephyr/irq_nextlevel.h>
#include <zephyr/arch/xtensa/irq.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/drivers/interrupt_controller/dw_ace.h>
#include <soc.h>
#include <adsp_interrupt.h>
#include <zephyr/irq.h>
#include "intc_dw.h"
/* ACE device interrupts are all packed into a single line on Xtensa's
* architectural IRQ 4 (see below), run by a Designware interrupt
* controller with 28 lines instantiated. They get numbered
* immediately after the Xtensa interrupt space in the numbering
* (i.e. interrupts 0-31 are Xtensa IRQs, 32 represents DW input 0,
* etc...).
*
* That IRQ 4 indeed has an interrupt type of "EXTERN_LEVEL" and an
* interrupt level of 2. The CPU has a level 1 external interrupt on
* IRQ 1 and a level 3 on IRQ 6, but nothing seems wired there. Note
* that this level 2 ISR is also shared with the CCOUNT timer on IRQ3.
* This interrupt is a very busy place!
*
* But, because there can never be a situation where all interrupts on
* the Synopsys controller are disabled (such a system would halt
* forever if it reached idle!), we at least can take advantage to
* implement a simplified masking architecture. Xtensa INTENABLE
* always has the line active, and we do all masking of external
* interrupts on the single controller.
*
* Finally: note that there is an extra layer of masking on ACE. The
* ACE_DINT registers provide separately maskable interrupt delivery
* for each core, and with some devices for different internal
* interrupt sources. Responsibility for these mask bits is left with
* the driver.
*
* Thus, the masking architecture picked here is:
*
* + Drivers manage ACE_DINT themselves, as there are device-specific
* mask indexes that only the driver can interpret. If
* core-asymmetric interrupt routing needs to happen, it happens
* here.
*
* + The DW layer is en/disabled uniformly across all cores. This is
* the layer toggled by arch_irq_en/disable().
*
* + Index 4 in the INTENABLE SR is set at core startup and stays
* enabled always.
*/
/* ACE also has per-core instantiations of a Synopsys interrupt
* controller. These inputs (with the same indices as ACE_INTL_*
* above) are downstream of the DINT layer, and must be independently
* masked/enabled. The core Zephyr intc_dw driver unfortunately
* doesn't understand this kind of MP implementation. Note also that
* as instantiated (there are only 28 sources), the high 32 bit
* registers don't exist and aren't named here. Access via e.g.:
*
* ACE_INTC[core_id].irq_inten_l |= interrupt_bit;
*/
#define ACE_INTC ((volatile struct dw_ictl_registers *)DT_INST_REG_ADDR(0))
static inline bool is_dw_irq(uint32_t irq)
{
if (((irq & XTENSA_IRQ_NUM_MASK) == ACE_INTC_IRQ)
&& ((irq & ~XTENSA_IRQ_NUM_MASK) != 0)) {
return true;
}
return false;
}
void dw_ace_irq_enable(const struct device *dev, uint32_t irq)
{
ARG_UNUSED(dev);
if (is_dw_irq(irq)) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
ACE_INTC[i].irq_inten_l |= BIT(ACE_IRQ_FROM_ZEPHYR(irq));
ACE_INTC[i].irq_intmask_l &= ~BIT(ACE_IRQ_FROM_ZEPHYR(irq));
}
} else if ((irq & ~XTENSA_IRQ_NUM_MASK) == 0U) {
xtensa_irq_enable(XTENSA_IRQ_NUMBER(irq));
}
}
void dw_ace_irq_disable(const struct device *dev, uint32_t irq)
{
ARG_UNUSED(dev);
if (is_dw_irq(irq)) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
ACE_INTC[i].irq_inten_l &= ~BIT(ACE_IRQ_FROM_ZEPHYR(irq));
ACE_INTC[i].irq_intmask_l |= BIT(ACE_IRQ_FROM_ZEPHYR(irq));
}
} else if ((irq & ~XTENSA_IRQ_NUM_MASK) == 0U) {
xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
}
}
int dw_ace_irq_is_enabled(const struct device *dev, unsigned int irq)
{
ARG_UNUSED(dev);
if (is_dw_irq(irq)) {
return ACE_INTC[0].irq_inten_l & BIT(ACE_IRQ_FROM_ZEPHYR(irq));
} else if ((irq & ~XTENSA_IRQ_NUM_MASK) == 0U) {
return xtensa_irq_is_enabled(XTENSA_IRQ_NUMBER(irq));
}
return false;
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
int dw_ace_irq_connect_dynamic(const struct device *dev, unsigned int irq,
unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags)
{
/* Simple architecture means that the Zephyr irq number and
* the index into the ISR table are identical.
*/
ARG_UNUSED(dev);
ARG_UNUSED(flags);
ARG_UNUSED(priority);
z_isr_install(irq, routine, parameter);
return irq;
}
#endif
static void dwint_isr(const void *arg)
{
uint32_t fs = ACE_INTC[arch_proc_id()].irq_finalstatus_l;
while (fs) {
uint32_t bit = find_lsb_set(fs) - 1;
uint32_t offset = CONFIG_2ND_LVL_ISR_TBL_OFFSET + bit;
struct _isr_table_entry *ent = &_sw_isr_table[offset];
fs &= ~BIT(bit);
ent->isr(ent->arg);
}
}
static int dw_ace_init(const struct device *dev)
{
ARG_UNUSED(dev);
IRQ_CONNECT(ACE_INTC_IRQ, 0, dwint_isr, 0, 0);
xtensa_irq_enable(ACE_INTC_IRQ);
return 0;
}
static const struct dw_ace_v1_ictl_driver_api dw_ictl_ace_v1x_apis = {
.intr_enable = dw_ace_irq_enable,
.intr_disable = dw_ace_irq_disable,
.intr_is_enabled = dw_ace_irq_is_enabled,
#ifdef CONFIG_DYNAMIC_INTERRUPTS
.intr_connect_dynamic = dw_ace_irq_connect_dynamic,
#endif
};
DEVICE_DT_INST_DEFINE(0, dw_ace_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY,
&dw_ictl_ace_v1x_apis);
IRQ_PARENT_ENTRY_DEFINE(ace_intc, DEVICE_DT_INST_GET(0), DT_INST_IRQN(0),
INTC_BASE_ISR_TBL_OFFSET(DT_DRV_INST(0)),
DT_INST_INTC_GET_AGGREGATOR_LEVEL(0));
``` | /content/code_sandbox/drivers/interrupt_controller/intc_dw_ace.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,612 |
```unknown
# interrupt controller configuration options
menu "Interrupt controller drivers"
config ARCV2_INTERRUPT_UNIT
bool "ARCv2 Interrupt Unit"
default y
depends on ARC
help
The ARCv2 interrupt unit has 16 allocated exceptions associated with
vectors 0 to 15 and 240 interrupts associated with vectors 16 to 255.
The interrupt unit is optional in the ARCv2-based processors. When
building a processor, you can configure the processor to include an
interrupt unit. The ARCv2 interrupt unit is highly programmable.
config SWERV_PIC
bool "SweRV EH1 Programmable Interrupt Controller (PIC)"
default y
depends on DT_HAS_SWERV_PIC_ENABLED
help
Programmable Interrupt Controller for the SweRV EH1 RISC-V CPU.
config VEXRISCV_LITEX_IRQ
bool "VexRiscv LiteX Interrupt controller"
default y
depends on DT_HAS_LITEX_VEXRISCV_INTC0_ENABLED
help
IRQ implementation for LiteX VexRiscv
config LEON_IRQMP
bool "GRLIB IRQMP interrupt controller"
default y
depends on DT_HAS_GAISLER_IRQMP_ENABLED
help
GRLIB IRQMP and IRQAMP
config INTC_INIT_PRIORITY
int "Interrupt controller init priority"
default KERNEL_INIT_PRIORITY_DEFAULT
help
Interrupt controller device initialization priority.
if MCHP_ECIA_XEC
config XEC_GIRQ_INIT_PRIORITY
int "XEX GIRQ Interrupt controller init priority"
default 41
help
XEC GIRQ Interrupt controller device initialization priority.
The priority value needs to be greater than INTC_INIT_PRIORITY
So that the XEC GIRQ controllers are initialized after the
xec_ecia.
endif
module = INTC
module-str = intc
source "subsys/logging/Kconfig.template.log_config"
source "drivers/interrupt_controller/Kconfig.multilevel"
source "drivers/interrupt_controller/Kconfig.loapic"
source "drivers/interrupt_controller/Kconfig.dw"
source "drivers/interrupt_controller/Kconfig.it8xxx2"
source "drivers/interrupt_controller/Kconfig.stm32"
source "drivers/interrupt_controller/Kconfig.cavs"
source "drivers/interrupt_controller/Kconfig.rv32m1"
source "drivers/interrupt_controller/Kconfig.sam0"
source "drivers/interrupt_controller/Kconfig.gic"
source "drivers/interrupt_controller/Kconfig.npcx"
source "drivers/interrupt_controller/Kconfig.intel_vtd"
source "drivers/interrupt_controller/Kconfig.esp32"
source "drivers/interrupt_controller/Kconfig.esp32c3"
source "drivers/interrupt_controller/Kconfig.xec"
source "drivers/interrupt_controller/Kconfig.clic"
source "drivers/interrupt_controller/Kconfig.gd32_exti"
source "drivers/interrupt_controller/Kconfig.plic"
source "drivers/interrupt_controller/Kconfig.nxp_s32"
source "drivers/interrupt_controller/Kconfig.xmc4xxx"
source "drivers/interrupt_controller/Kconfig.nxp_pint"
source "drivers/interrupt_controller/Kconfig.vim"
source "drivers/interrupt_controller/Kconfig.renesas_ra"
source "drivers/interrupt_controller/Kconfig.nxp_irqsteer"
source "drivers/interrupt_controller/Kconfig.mtk_adsp"
endmenu
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 698 |
```unknown
# ARM Generic Interrupt Controller (GIC) configuration
if CPU_CORTEX
config GIC
bool
config GIC_V1
def_bool DT_HAS_ARM_GIC_V1_ENABLED
select GIC
help
The ARM Generic Interrupt Controller v1 (e.g. PL390) works with the
ARM Cortex-family processors.
config GIC_V2
def_bool DT_HAS_ARM_GIC_V2_ENABLED
select GIC
help
The ARM Generic Interrupt Controller v2 (e.g. GIC-400) works with the
ARM Cortex-family processors.
config GIC_V3
def_bool DT_HAS_ARM_GIC_V3_ENABLED
select GIC
help
The ARM Generic Interrupt Controller v3 (e.g. GIC-500 and GIC-600)
works with the ARM Cortex-family processors.
config GIC_VER
int
depends on GIC
default 1 if GIC_V1
default 2 if GIC_V2
default 3 if GIC_V3
config GIC_SINGLE_SECURITY_STATE
bool
depends on GIC_V3
help
Some ARM Cortex-family processors only supports single security
state.
config GIC_V3_RDIST_MATCHING_AFF0_ONLY
bool
depends on GIC_V3
default y if CPU_CORTEX_R52
help
Some platforms only use aff0 to match mpdir and GICR.aff. With this
enabled, we find the target redistributor by comparing the aff0 only.
config GIC_V3_ITS
bool "GIC v3 Interrupt Translation Service"
depends on GIC_V3
# ITS generates Non-secure Group 1 LPI interrupts, requiring EL1NS
select ARMV8_A_NS
select DYNAMIC_INTERRUPTS
help
Support for the optional Interrupt Translation Service used to translate
hardware interrupt from PCIe MSI messages for example. Please note
that ITS uses dynamic memory, so HEAP_MEM_POOL_SIZE should be
enough to allocate ITS tables (size is probed at runtime).
endif # CPU_CORTEX
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.gic | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 440 |
```c
/*
*
*/
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(intc_gicv3_its, LOG_LEVEL_ERR);
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/interrupt_controller/gicv3_its.h>
#include <zephyr/sys/barrier.h>
#include "intc_gic_common_priv.h"
#include "intc_gicv3_priv.h"
#define DT_DRV_COMPAT arm_gic_v3_its
/*
* Current ITS implementation only handle GICv3 ITS physical interruption generation
* Implementation is designed for the PCIe MSI/MSI-X use-case in mind.
*/
#define GITS_BASER_NR_REGS 8
/* convenient access to all redistributors base address */
extern mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
#define SIZE_256 256
#define SIZE_4K KB(4)
#define SIZE_16K KB(16)
#define SIZE_64K KB(64)
struct its_cmd_block {
uint64_t raw_cmd[4];
};
#define ITS_CMD_QUEUE_SIZE SIZE_64K
#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SIZE / sizeof(struct its_cmd_block))
struct gicv3_its_data {
mm_reg_t base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
bool dev_table_is_indirect;
uint64_t *indirect_dev_lvl1_table;
size_t indirect_dev_lvl1_width;
size_t indirect_dev_lvl2_width;
size_t indirect_dev_page_size;
};
struct gicv3_its_config {
uintptr_t base_addr;
size_t base_size;
struct its_cmd_block *cmd_queue;
size_t cmd_queue_size;
};
static inline int fls_z(unsigned int x)
{
unsigned int bits = sizeof(x) * 8;
unsigned int cmp = 1 << (bits - 1);
while (bits) {
if (x & cmp) {
return bits;
}
cmp >>= 1;
bits--;
}
return 0;
}
/* wait 500ms & wakeup every millisecond */
#define WAIT_QUIESCENT 500
static int its_force_quiescent(struct gicv3_its_data *data)
{
unsigned int count = WAIT_QUIESCENT;
uint32_t reg = sys_read32(data->base + GITS_CTLR);
if (GITS_CTLR_ENABLED_GET(reg)) {
/* Disable ITS */
reg &= ~MASK(GITS_CTLR_ENABLED);
sys_write32(reg, data->base + GITS_CTLR);
}
while (1) {
if (GITS_CTLR_QUIESCENT_GET(reg)) {
return 0;
}
count--;
if (!count) {
return -EBUSY;
}
k_msleep(1);
reg = sys_read32(data->base + GITS_CTLR);
}
return 0;
}
static const char *const its_base_type_string[] = {
[GITS_BASER_TYPE_DEVICE] = "Devices",
[GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
};
/* Probe the BASER(i) to get the largest supported page size */
static size_t its_probe_baser_page_size(struct gicv3_its_data *data, int i)
{
uint64_t page_size = GITS_BASER_PAGE_SIZE_64K;
while (page_size > GITS_BASER_PAGE_SIZE_4K) {
uint64_t reg = sys_read64(data->base + GITS_BASER(i));
reg &= ~MASK(GITS_BASER_PAGE_SIZE);
reg |= MASK_SET(page_size, GITS_BASER_PAGE_SIZE);
sys_write64(reg, data->base + GITS_BASER(i));
reg = sys_read64(data->base + GITS_BASER(i));
if (MASK_GET(reg, GITS_BASER_PAGE_SIZE) == page_size) {
break;
}
switch (page_size) {
case GITS_BASER_PAGE_SIZE_64K:
page_size = GITS_BASER_PAGE_SIZE_16K;
break;
default:
page_size = GITS_BASER_PAGE_SIZE_4K;
}
}
switch (page_size) {
case GITS_BASER_PAGE_SIZE_64K:
return SIZE_64K;
case GITS_BASER_PAGE_SIZE_16K:
return SIZE_16K;
default:
return SIZE_4K;
}
}
static int its_alloc_tables(struct gicv3_its_data *data)
{
unsigned int device_ids = GITS_TYPER_DEVBITS_GET(sys_read64(data->base + GITS_TYPER)) + 1;
int i;
for (i = 0; i < GITS_BASER_NR_REGS; ++i) {
uint64_t reg = sys_read64(data->base + GITS_BASER(i));
unsigned int type = GITS_BASER_TYPE_GET(reg);
size_t page_size, entry_size, page_cnt, lvl2_width = 0;
bool indirect = false;
void *alloc_addr;
entry_size = GITS_BASER_ENTRY_SIZE_GET(reg) + 1;
switch (GITS_BASER_PAGE_SIZE_GET(reg)) {
case GITS_BASER_PAGE_SIZE_4K:
page_size = SIZE_4K;
break;
case GITS_BASER_PAGE_SIZE_16K:
page_size = SIZE_16K;
break;
case GITS_BASER_PAGE_SIZE_64K:
page_size = SIZE_64K;
break;
default:
page_size = SIZE_4K;
}
switch (type) {
case GITS_BASER_TYPE_DEVICE:
if (device_ids > 16) {
/* Use the largest possible page size for indirect */
page_size = its_probe_baser_page_size(data, i);
/*
* lvl1 table size:
* subtract ID bits that sparse lvl2 table from 'ids'
* which is reported by ITS hardware times lvl1 table
* entry size.
*/
lvl2_width = fls_z(page_size / entry_size) - 1;
device_ids -= lvl2_width + 1;
/* The level 1 entry size is a 64bit pointer */
entry_size = sizeof(uint64_t);
indirect = true;
}
page_cnt = ROUND_UP(entry_size << device_ids, page_size) / page_size;
break;
case GITS_BASER_TYPE_COLLECTION:
page_cnt =
ROUND_UP(entry_size * CONFIG_MP_MAX_NUM_CPUS, page_size)/page_size;
break;
default:
continue;
}
LOG_INF("Allocating %s table of %ldx%ldK pages (%ld bytes entry)",
its_base_type_string[type], page_cnt, page_size / 1024, entry_size);
alloc_addr = k_aligned_alloc(page_size, page_size * page_cnt);
if (!alloc_addr) {
return -ENOMEM;
}
memset(alloc_addr, 0, page_size * page_cnt);
switch (page_size) {
case SIZE_4K:
reg = MASK_SET(GITS_BASER_PAGE_SIZE_4K, GITS_BASER_PAGE_SIZE);
break;
case SIZE_16K:
reg = MASK_SET(GITS_BASER_PAGE_SIZE_16K, GITS_BASER_PAGE_SIZE);
break;
case SIZE_64K:
reg = MASK_SET(GITS_BASER_PAGE_SIZE_64K, GITS_BASER_PAGE_SIZE);
break;
}
reg |= MASK_SET(page_cnt - 1, GITS_BASER_SIZE);
reg |= MASK_SET(GIC_BASER_SHARE_INNER, GITS_BASER_SHAREABILITY);
reg |= MASK_SET((uintptr_t)alloc_addr >> GITS_BASER_ADDR_SHIFT, GITS_BASER_ADDR);
reg |= MASK_SET(GIC_BASER_CACHE_INNERLIKE, GITS_BASER_OUTER_CACHE);
reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_BASER_INNER_CACHE);
reg |= MASK_SET(indirect ? 1 : 0, GITS_BASER_INDIRECT);
reg |= MASK_SET(1, GITS_BASER_VALID);
sys_write64(reg, data->base + GITS_BASER(i));
/* TOFIX: check page size & SHAREABILITY validity after write */
if (type == GITS_BASER_TYPE_DEVICE && indirect) {
data->dev_table_is_indirect = indirect;
data->indirect_dev_lvl1_table = alloc_addr;
data->indirect_dev_lvl1_width = device_ids;
data->indirect_dev_lvl2_width = lvl2_width;
data->indirect_dev_page_size = page_size;
LOG_DBG("%s table Indirection enabled", its_base_type_string[type]);
}
}
return 0;
}
static bool its_queue_full(struct gicv3_its_data *data)
{
int widx;
int ridx;
widx = data->cmd_write - data->cmd_base;
ridx = sys_read32(data->base + GITS_CREADR) / sizeof(struct its_cmd_block);
/* This is incredibly unlikely to happen, unless the ITS locks up. */
return (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx);
}
static struct its_cmd_block *its_allocate_entry(struct gicv3_its_data *data)
{
struct its_cmd_block *cmd;
unsigned int count = 1000000; /* 1s! */
while (its_queue_full(data)) {
count--;
if (!count) {
LOG_ERR("ITS queue not draining");
return NULL;
}
k_usleep(1);
}
cmd = data->cmd_write++;
/* Handle queue wrapping */
if (data->cmd_write == (data->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) {
data->cmd_write = data->cmd_base;
}
/* Clear command */
cmd->raw_cmd[0] = 0;
cmd->raw_cmd[1] = 0;
cmd->raw_cmd[2] = 0;
cmd->raw_cmd[3] = 0;
return cmd;
}
static int its_post_command(struct gicv3_its_data *data, struct its_cmd_block *cmd)
{
uint64_t wr_idx, rd_idx, idx;
unsigned int count = 1000000; /* 1s! */
wr_idx = (data->cmd_write - data->cmd_base) * sizeof(*cmd);
rd_idx = sys_read32(data->base + GITS_CREADR);
barrier_dsync_fence_full();
sys_write32(wr_idx, data->base + GITS_CWRITER);
while (1) {
idx = sys_read32(data->base + GITS_CREADR);
if (idx == wr_idx) {
break;
}
count--;
if (!count) {
LOG_ERR("ITS queue timeout (rd %lld => %lld => wr %lld)",
rd_idx, idx, wr_idx);
return -ETIMEDOUT;
}
k_usleep(1);
}
return 0;
}
static int its_send_sync_cmd(struct gicv3_its_data *data, uintptr_t rd_addr)
{
struct its_cmd_block *cmd = its_allocate_entry(data);
if (!cmd) {
return -EBUSY;
}
cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_SYNC, GITS_CMD_ID);
cmd->raw_cmd[2] = MASK_SET(rd_addr >> GITS_CMD_RDBASE_ALIGN, GITS_CMD_RDBASE);
return its_post_command(data, cmd);
}
static int its_send_mapc_cmd(struct gicv3_its_data *data, uint32_t icid,
uintptr_t rd_addr, bool valid)
{
struct its_cmd_block *cmd = its_allocate_entry(data);
if (!cmd) {
return -EBUSY;
}
cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPC, GITS_CMD_ID);
cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID) |
MASK_SET(rd_addr >> GITS_CMD_RDBASE_ALIGN, GITS_CMD_RDBASE) |
MASK_SET(valid ? 1 : 0, GITS_CMD_VALID);
return its_post_command(data, cmd);
}
static int its_send_mapd_cmd(struct gicv3_its_data *data, uint32_t device_id,
uint32_t size, uintptr_t itt_addr, bool valid)
{
struct its_cmd_block *cmd = its_allocate_entry(data);
if (!cmd) {
return -EBUSY;
}
cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPD, GITS_CMD_ID) |
MASK_SET(device_id, GITS_CMD_DEVICEID);
cmd->raw_cmd[1] = MASK_SET(size, GITS_CMD_SIZE);
cmd->raw_cmd[2] = MASK_SET(itt_addr >> GITS_CMD_ITTADDR_ALIGN, GITS_CMD_ITTADDR) |
MASK_SET(valid ? 1 : 0, GITS_CMD_VALID);
return its_post_command(data, cmd);
}
static int its_send_mapti_cmd(struct gicv3_its_data *data, uint32_t device_id,
uint32_t event_id, uint32_t intid, uint32_t icid)
{
struct its_cmd_block *cmd = its_allocate_entry(data);
if (!cmd) {
return -EBUSY;
}
cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPTI, GITS_CMD_ID) |
MASK_SET(device_id, GITS_CMD_DEVICEID);
cmd->raw_cmd[1] = MASK_SET(event_id, GITS_CMD_EVENTID) |
MASK_SET(intid, GITS_CMD_PINTID);
cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID);
return its_post_command(data, cmd);
}
static int its_send_int_cmd(struct gicv3_its_data *data, uint32_t device_id,
uint32_t event_id)
{
struct its_cmd_block *cmd = its_allocate_entry(data);
if (!cmd) {
return -EBUSY;
}
cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_INT, GITS_CMD_ID) |
MASK_SET(device_id, GITS_CMD_DEVICEID);
cmd->raw_cmd[1] = MASK_SET(event_id, GITS_CMD_EVENTID);
return its_post_command(data, cmd);
}
static int its_send_invall_cmd(struct gicv3_its_data *data, uint32_t icid)
{
struct its_cmd_block *cmd = its_allocate_entry(data);
if (!cmd) {
return -EBUSY;
}
cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_INVALL, GITS_CMD_ID);
cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID);
return its_post_command(data, cmd);
}
static int gicv3_its_send_int(const struct device *dev, uint32_t device_id, uint32_t event_id)
{
struct gicv3_its_data *data = dev->data;
/* TOFIX check device_id & event_id bounds */
return its_send_int_cmd(data, device_id, event_id);
}
static void its_setup_cmd_queue(const struct device *dev)
{
const struct gicv3_its_config *cfg = dev->config;
struct gicv3_its_data *data = dev->data;
uint64_t reg = 0;
/* Zero out cmd table */
memset(cfg->cmd_queue, 0, cfg->cmd_queue_size);
reg |= MASK_SET(cfg->cmd_queue_size / SIZE_4K, GITS_CBASER_SIZE);
reg |= MASK_SET(GIC_BASER_SHARE_INNER, GITS_CBASER_SHAREABILITY);
reg |= MASK_SET((uintptr_t)cfg->cmd_queue >> GITS_CBASER_ADDR_SHIFT, GITS_CBASER_ADDR);
reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_CBASER_OUTER_CACHE);
reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_CBASER_INNER_CACHE);
reg |= MASK_SET(1, GITS_CBASER_VALID);
sys_write64(reg, data->base + GITS_CBASER);
data->cmd_base = (struct its_cmd_block *)cfg->cmd_queue;
data->cmd_write = data->cmd_base;
LOG_INF("Allocated %ld entries for command table", ITS_CMD_QUEUE_NR_ENTRIES);
sys_write64(0, data->base + GITS_CWRITER);
}
static uintptr_t gicv3_rdist_get_rdbase(const struct device *dev, unsigned int cpuid)
{
struct gicv3_its_data *data = dev->data;
uint64_t typer = sys_read64(data->base + GITS_TYPER);
if (GITS_TYPER_PTA_GET(typer)) {
return gic_rdists[cpuid];
} else {
return GICR_TYPER_PROCESSOR_NUMBER_GET(sys_read64(gic_rdists[cpuid] + GICR_TYPER));
}
}
static int gicv3_its_map_intid(const struct device *dev, uint32_t device_id, uint32_t event_id,
unsigned int intid)
{
struct gicv3_its_data *data = dev->data;
int ret;
/* TOFIX check device_id, event_id & intid bounds */
if (intid < 8192) {
return -EINVAL;
}
/* The CPU id directly maps as ICID for the current CPU redistributor */
ret = its_send_mapti_cmd(data, device_id, event_id, intid, arch_curr_cpu()->id);
if (ret) {
LOG_ERR("Failed to map eventid %d to intid %d for deviceid %x",
event_id, intid, device_id);
return ret;
}
return its_send_sync_cmd(data, gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id));
}
static int gicv3_its_init_device_id(const struct device *dev, uint32_t device_id,
unsigned int nites)
{
struct gicv3_its_data *data = dev->data;
size_t entry_size, alloc_size;
int nr_ites;
void *itt;
int ret;
/* TOFIX check device_id & nites bounds */
entry_size = GITS_TYPER_ITT_ENTRY_SIZE_GET(sys_read64(data->base + GITS_TYPER)) + 1;
if (data->dev_table_is_indirect) {
size_t offset = device_id >> data->indirect_dev_lvl2_width;
/* Check if DeviceID can fit in the Level 1 table */
if (offset > (1 << data->indirect_dev_lvl1_width)) {
return -EINVAL;
}
/* Check if a Level 2 table has already been allocated for the DeviceID */
if (!data->indirect_dev_lvl1_table[offset]) {
void *alloc_addr;
LOG_INF("Allocating Level 2 Device %ldK table",
data->indirect_dev_page_size / 1024);
alloc_addr = k_aligned_alloc(data->indirect_dev_page_size,
data->indirect_dev_page_size);
if (!alloc_addr) {
return -ENOMEM;
}
memset(alloc_addr, 0, data->indirect_dev_page_size);
data->indirect_dev_lvl1_table[offset] = (uintptr_t)alloc_addr |
MASK_SET(1, GITS_BASER_VALID);
barrier_dsync_fence_full();
}
}
/* ITT must be of power of 2 */
nr_ites = MAX(2, nites);
alloc_size = ROUND_UP(nr_ites * entry_size, 256);
LOG_INF("Allocating ITT for DeviceID %x and %d vectors (%ld bytes entry)",
device_id, nr_ites, entry_size);
itt = k_aligned_alloc(256, alloc_size);
if (!itt) {
return -ENOMEM;
}
/* size is log2(ites) - 1, equivalent to (fls(ites) - 1) - 1 */
ret = its_send_mapd_cmd(data, device_id, fls_z(nr_ites) - 2, (uintptr_t)itt, true);
if (ret) {
LOG_ERR("Failed to map device id %x ITT table", device_id);
return ret;
}
return 0;
}
static unsigned int gicv3_its_alloc_intid(const struct device *dev)
{
return atomic_inc(&nlpi_intid);
}
static uint32_t gicv3_its_get_msi_addr(const struct device *dev)
{
const struct gicv3_its_config *cfg = (const struct gicv3_its_config *)dev->config;
return cfg->base_addr + GITS_TRANSLATER;
}
#define ITS_RDIST_MAP(n) \
{ \
const struct device *const dev = DEVICE_DT_INST_GET(n); \
struct gicv3_its_data *data; \
int ret; \
\
if (dev) { \
data = (struct gicv3_its_data *) dev->data; \
ret = its_send_mapc_cmd(data, arch_curr_cpu()->id, \
gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id), \
true); \
if (ret) { \
LOG_ERR("Failed to map CPU%d redistributor", \
arch_curr_cpu()->id); \
} \
} \
}
void its_rdist_map(void)
{
DT_INST_FOREACH_STATUS_OKAY(ITS_RDIST_MAP)
}
#define ITS_RDIST_INVALL(n) \
{ \
const struct device *const dev = DEVICE_DT_INST_GET(n); \
struct gicv3_its_data *data; \
int ret; \
\
if (dev) { \
data = (struct gicv3_its_data *) dev->data; \
ret = its_send_invall_cmd(data, arch_curr_cpu()->id); \
if (ret) { \
LOG_ERR("Failed to sync RDIST LPI cache for CPU%d", \
arch_curr_cpu()->id); \
} \
\
its_send_sync_cmd(data, \
gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id)); \
} \
}
void its_rdist_invall(void)
{
DT_INST_FOREACH_STATUS_OKAY(ITS_RDIST_INVALL)
}
static int gicv3_its_init(const struct device *dev)
{
const struct gicv3_its_config *cfg = dev->config;
struct gicv3_its_data *data = dev->data;
uint32_t reg;
int ret;
device_map(&data->base, cfg->base_addr, cfg->base_size, K_MEM_CACHE_NONE);
ret = its_force_quiescent(data);
if (ret) {
LOG_ERR("Failed to quiesce, giving up");
return ret;
}
ret = its_alloc_tables(data);
if (ret) {
LOG_ERR("Failed to allocate tables, giving up");
return ret;
}
its_setup_cmd_queue(dev);
reg = sys_read32(data->base + GITS_CTLR);
reg |= MASK_SET(1, GITS_CTLR_ENABLED);
sys_write32(reg, data->base + GITS_CTLR);
/* Map the boot CPU id to the CPU redistributor */
ret = its_send_mapc_cmd(data, arch_curr_cpu()->id,
gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id), true);
if (ret) {
LOG_ERR("Failed to map boot CPU redistributor");
return ret;
}
return 0;
}
struct its_driver_api gicv3_its_api = {
.alloc_intid = gicv3_its_alloc_intid,
.setup_deviceid = gicv3_its_init_device_id,
.map_intid = gicv3_its_map_intid,
.send_int = gicv3_its_send_int,
.get_msi_addr = gicv3_its_get_msi_addr,
};
#define GICV3_ITS_INIT(n) \
static struct its_cmd_block gicv3_its_cmd##n[ITS_CMD_QUEUE_NR_ENTRIES] \
__aligned(ITS_CMD_QUEUE_SIZE); \
static struct gicv3_its_data gicv3_its_data##n; \
static const struct gicv3_its_config gicv3_its_config##n = { \
.base_addr = DT_INST_REG_ADDR(n), \
.base_size = DT_INST_REG_SIZE(n), \
.cmd_queue = gicv3_its_cmd##n, \
.cmd_queue_size = sizeof(gicv3_its_cmd##n), \
}; \
DEVICE_DT_INST_DEFINE(n, &gicv3_its_init, NULL, \
&gicv3_its_data##n, \
&gicv3_its_config##n, \
PRE_KERNEL_1, \
CONFIG_INTC_INIT_PRIORITY, \
&gicv3_its_api);
DT_INST_FOREACH_STATUS_OKAY(GICV3_ITS_INIT)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_gicv3_its.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,557 |
```c
/*
*
*/
#define DT_DRV_COMPAT infineon_xmc4xxx_intc
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/dt-bindings/interrupt-controller/infineon-xmc4xxx-intc.h>
#include <zephyr/irq.h>
#include <xmc_eru.h>
/* In Infineon XMC4XXX SoCs, gpio interrupts are triggered via an Event Request Unit (ERU) */
/* module. A subset of the GPIOs are connected to the ERU. The ERU monitors edge triggers */
/* and creates a SR. */
/* This driver configures the ERU for a target port/pin combination for rising/falling */
/* edge events. Note that the ERU module does not generate SR based on the gpio level. */
/* Internally the ERU tracks the *status* of an event. The status is set on a positive edge and */
/* unset on a negative edge (or vice-versa depending on the configuration). The value of */
/* the status is used to implement a level triggered interrupt; The ISR checks the status */
/* flag and calls the callback function if the status is set. */
/* The ERU configurations for supported port/pin combinations are stored in a devicetree file */
/* dts/arm/infineon/xmc4xxx_x_x-intc.dtsi. The configurations are stored in the opaque array */
/* uint16 port_line_mapping[]. The bitfields for the opaque entries are defined in */
/* dt-bindings/interrupt-controller/infineon-xmc4xxx-intc.h. */
struct isr_cb {
/* if fn is NULL it implies the interrupt line has not been allocated */
void (*fn)(const struct device *dev, int pin);
void *data;
enum gpio_int_mode mode;
uint8_t port_id;
uint8_t pin;
};
#define MAX_ISR_NUM 8
struct intc_xmc4xxx_data {
struct isr_cb cb[MAX_ISR_NUM];
};
#define NUM_ERUS 2
struct intc_xmc4xxx_config {
XMC_ERU_t *eru_regs[NUM_ERUS];
};
static const uint16_t port_line_mapping[DT_INST_PROP_LEN(0, port_line_mapping)] =
DT_INST_PROP(0, port_line_mapping);
int intc_xmc4xxx_gpio_enable_interrupt(int port_id, int pin, enum gpio_int_mode mode,
enum gpio_int_trig trig,
void (*fn)(const struct device *, int), void *user_data)
{
const struct device *dev = DEVICE_DT_INST_GET(0);
struct intc_xmc4xxx_data *data = dev->data;
const struct intc_xmc4xxx_config *config = dev->config;
int ret = -ENOTSUP;
for (int i = 0; i < ARRAY_SIZE(port_line_mapping); i++) {
XMC_ERU_ETL_CONFIG_t etl_config = {0};
XMC_ERU_OGU_CONFIG_t isr_config = {0};
XMC_ERU_ETL_EDGE_DETECTION_t trig_xmc;
XMC_ERU_t *eru;
int port_map, pin_map, line, eru_src, eru_ch;
struct isr_cb *cb;
port_map = XMC4XXX_INTC_GET_PORT(port_line_mapping[i]);
pin_map = XMC4XXX_INTC_GET_PIN(port_line_mapping[i]);
if (port_map != port_id || pin_map != pin) {
continue;
}
line = XMC4XXX_INTC_GET_LINE(port_line_mapping[i]);
cb = &data->cb[line];
if (cb->fn) {
/* It's already used. Continue search for available line */
/* with same port/pin */
ret = -EBUSY;
continue;
}
eru_src = XMC4XXX_INTC_GET_ERU_SRC(port_line_mapping[i]);
eru_ch = line & 0x3;
if (trig == GPIO_INT_TRIG_HIGH) {
trig_xmc = XMC_ERU_ETL_EDGE_DETECTION_RISING;
} else if (trig == GPIO_INT_TRIG_LOW) {
trig_xmc = XMC_ERU_ETL_EDGE_DETECTION_FALLING;
} else if (trig == GPIO_INT_TRIG_BOTH) {
trig_xmc = XMC_ERU_ETL_EDGE_DETECTION_BOTH;
} else {
return -EINVAL;
}
cb->port_id = port_id;
cb->pin = pin;
cb->mode = mode;
cb->fn = fn;
cb->data = user_data;
/* setup the eru */
etl_config.edge_detection = trig_xmc;
etl_config.input_a = eru_src;
etl_config.input_b = eru_src;
etl_config.source = eru_src >> 2;
etl_config.status_flag_mode = XMC_ERU_ETL_STATUS_FLAG_MODE_HWCTRL;
etl_config.enable_output_trigger = 1;
etl_config.output_trigger_channel = eru_ch;
eru = config->eru_regs[line >> 2];
XMC_ERU_ETL_Init(eru, eru_ch, &etl_config);
isr_config.service_request = XMC_ERU_OGU_SERVICE_REQUEST_ON_TRIGGER;
XMC_ERU_OGU_Init(eru, eru_ch, &isr_config);
/* if the gpio level is already set then we must manually set the interrupt to */
/* pending */
if (mode == GPIO_INT_MODE_LEVEL) {
ret = gpio_pin_get_raw(user_data, pin);
if (ret < 0) {
return ret;
}
#define NVIC_ISPR_BASE 0xe000e200u
if ((ret == 0 && trig == GPIO_INT_TRIG_LOW) ||
(ret == 1 && trig == GPIO_INT_TRIG_HIGH)) {
eru->EXICON_b[eru_ch].FL = 1;
/* put interrupt into pending state */
*(uint32_t *)(NVIC_ISPR_BASE) |= BIT(line + 1);
}
}
return 0;
}
return ret;
}
int intc_xmc4xxx_gpio_disable_interrupt(int port_id, int pin)
{
const struct device *dev = DEVICE_DT_INST_GET(0);
const struct intc_xmc4xxx_config *config = dev->config;
struct intc_xmc4xxx_data *data = dev->data;
int eru_ch;
for (int line = 0; line < ARRAY_SIZE(data->cb); line++) {
struct isr_cb *cb;
cb = &data->cb[line];
eru_ch = line & 0x3;
if (cb->fn && cb->port_id == port_id && cb->pin == pin) {
XMC_ERU_t *eru = config->eru_regs[line >> 2];
cb->fn = NULL;
/* disable the SR */
eru->EXICON_b[eru_ch].PE = 0;
/* unset the status flag */
eru->EXICON_b[eru_ch].FL = 0;
/* no need to clear other variables in cb*/
return 0;
}
}
return -EINVAL;
}
static void intc_xmc4xxx_isr(void *arg)
{
int line = (int)arg;
const struct device *dev = DEVICE_DT_INST_GET(0);
struct intc_xmc4xxx_data *data = dev->data;
const struct intc_xmc4xxx_config *config = dev->config;
struct isr_cb *cb = &data->cb[line];
XMC_ERU_t *eru = config->eru_regs[line >> 2];
int eru_ch = line & 0x3;
/* The callback function may actually disable the interrupt and set cb->fn = NULL */
/* as is done in tests/drivers/gpio/gpio_api_1pin. Assume that the callback function */
/* will NOT disable the interrupt and then enable another port/pin */
/* in the same callback which could potentially set cb->fn again. */
while (cb->fn) {
cb->fn(cb->data, cb->pin);
/* for level triggered interrupts we have to manually check the status. */
if (cb->mode == GPIO_INT_MODE_LEVEL && eru->EXICON_b[eru_ch].FL == 1) {
continue;
}
/* break for edge triggered interrupts */
break;
}
}
#define INTC_IRQ_CONNECT_ENABLE(name, line_number) \
COND_CODE_1(DT_INST_IRQ_HAS_NAME(0, name), \
(IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, name, irq), \
DT_INST_IRQ_BY_NAME(0, name, priority), intc_xmc4xxx_isr, (void *)line_number, 0); \
irq_enable(DT_INST_IRQ_BY_NAME(0, name, irq));), ())
static int intc_xmc4xxx_init(const struct device *dev)
{
/* connect irqs only if they defined by name in the dts */
INTC_IRQ_CONNECT_ENABLE(eru0sr0, 0);
INTC_IRQ_CONNECT_ENABLE(eru0sr1, 1);
INTC_IRQ_CONNECT_ENABLE(eru0sr2, 2);
INTC_IRQ_CONNECT_ENABLE(eru0sr3, 3);
INTC_IRQ_CONNECT_ENABLE(eru1sr0, 4);
INTC_IRQ_CONNECT_ENABLE(eru1sr1, 5);
INTC_IRQ_CONNECT_ENABLE(eru1sr2, 6);
INTC_IRQ_CONNECT_ENABLE(eru1sr3, 7);
return 0;
}
struct intc_xmc4xxx_data intc_xmc4xxx_data0;
struct intc_xmc4xxx_config intc_xmc4xxx_config0 = {
.eru_regs = {
(XMC_ERU_t *)DT_INST_REG_ADDR_BY_NAME(0, eru0),
(XMC_ERU_t *)DT_INST_REG_ADDR_BY_NAME(0, eru1),
},
};
DEVICE_DT_INST_DEFINE(0, intc_xmc4xxx_init, NULL,
&intc_xmc4xxx_data0, &intc_xmc4xxx_config0, PRE_KERNEL_1,
CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_xmc4xxx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,217 |
```unknown
# XMC4XXX INTC configuration
config XMC4XXX_INTC
bool "Interrupt Controller Driver for XMC4XXX series devices"
default y
depends on DT_HAS_INFINEON_XMC4XXX_INTC_ENABLED
help
Enable interrupt controller driver for XMC4XXX series of devices. This is required for
GPIO interrupt support.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.xmc4xxx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 76 |
```unknown
# SAM0 EIC configuration
config SAM0_EIC
bool "External Interrupt Controller (EIC) Driver for SAM0 series devices"
default y
depends on DT_HAS_ATMEL_SAM0_EIC_ENABLED
help
Enable EIC driver for SAM0 series of devices. This is required for
GPIO interrupt support.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.sam0 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 73 |
```unknown
config GD32_EXTI
bool "GD32 Extended Interrupts and Events (EXTI) Controller"
default y
depends on DT_HAS_GD_GD32_EXTI_ENABLED
help
Enable the GigaDevice GD32 Extended Interrupts and Events (EXTI)
controller driver.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.gd32_exti | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```unknown
config INTC_ESP32C3
bool "ESP32C3 interrupt controller driver"
depends on SOC_FAMILY_ESPRESSIF_ESP32
depends on SOC_SERIES_ESP32C2 || SOC_SERIES_ESP32C3 || SOC_SERIES_ESP32C6
default y
help
Enables the esp32c3 interrupt controller driver to handle ISR
management at SoC level.
config INTC_ESP32C3_DECISIONS_LOG
bool "Espressif's interrupt allocator logging"
depends on INTC_ESP32C3
select LOG
help
Enable this option to visualize information on decisions made by the
interrupt allocator. This has no impact on the interrupt allocator usage
but may be valuable for debugging purposes. When enabled, messages are
print to the serial console.
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.esp32c3 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 177 |
```c
/*
*
*/
/**
* @brief Driver for Nuclie's Extended Core Interrupt Controller
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/util.h>
#include <zephyr/device.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/drivers/interrupt_controller/riscv_clic.h>
#define DT_DRV_COMPAT nuclei_eclic
union CLICCFG {
struct {
uint8_t _reserved0 : 1;
/** number of interrupt level bits */
uint8_t nlbits : 4;
uint8_t _reserved1 : 2;
uint8_t _reserved2 : 1;
} b;
uint8_t w;
};
union CLICINFO {
struct {
/** number of max supported interrupts */
uint32_t numint : 13;
/** architecture version */
uint32_t version : 8;
/** supported bits in the clicintctl */
uint32_t intctlbits : 4;
uint32_t _reserved0 : 7;
} b;
uint32_t qw;
};
union CLICMTH {
uint8_t w;
};
union CLICINTIP {
struct {
/** Interrupt Pending */
uint8_t IP : 1;
uint8_t reserved0 : 7;
} b;
uint8_t w;
};
union CLICINTIE {
struct {
/** Interrupt Enabled */
uint8_t IE : 1;
uint8_t reserved0 : 7;
} b;
uint8_t w;
};
union CLICINTATTR {
struct {
/** 0: non-vectored 1:vectored */
uint8_t shv : 1;
/** 0: level 1: rising edge 2: falling edge */
uint8_t trg : 2;
uint8_t reserved0 : 3;
uint8_t reserved1 : 2;
} b;
uint8_t w;
};
struct CLICCTRL {
volatile union CLICINTIP INTIP;
volatile union CLICINTIE INTIE;
volatile union CLICINTATTR INTATTR;
volatile uint8_t INTCTRL;
};
/** CLIC INTATTR: TRIG Mask */
#define CLIC_INTATTR_TRIG_Msk 0x3U
#define ECLIC_CFG (*((volatile union CLICCFG *)(DT_REG_ADDR_BY_IDX(DT_NODELABEL(eclic), 0))))
#define ECLIC_INFO (*((volatile union CLICINFO *)(DT_REG_ADDR_BY_IDX(DT_NODELABEL(eclic), 1))))
#define ECLIC_MTH (*((volatile union CLICMTH *)(DT_REG_ADDR_BY_IDX(DT_NODELABEL(eclic), 2))))
#define ECLIC_CTRL ((volatile struct CLICCTRL *)(DT_REG_ADDR_BY_IDX(DT_NODELABEL(eclic), 3)))
#define ECLIC_CTRL_SIZE (DT_REG_SIZE_BY_IDX(DT_NODELABEL(eclic), 3))
static uint8_t nlbits;
static uint8_t intctlbits;
static uint8_t max_prio;
static uint8_t max_level;
static uint8_t intctrl_mask;
static inline uint8_t leftalign8(uint8_t val, uint8_t shift)
{
return (val << (8U - shift));
}
static inline uint8_t mask8(uint8_t len)
{
return ((1 << len) - 1) & 0xFFFFU;
}
/**
* @brief Enable interrupt
*/
void riscv_clic_irq_enable(uint32_t irq)
{
ECLIC_CTRL[irq].INTIE.b.IE = 1;
}
/**
* @brief Disable interrupt
*/
void riscv_clic_irq_disable(uint32_t irq)
{
ECLIC_CTRL[irq].INTIE.b.IE = 0;
}
/**
* @brief Get enable status of interrupt
*/
int riscv_clic_irq_is_enabled(uint32_t irq)
{
return ECLIC_CTRL[irq].INTIE.b.IE;
}
/**
* @brief Set priority and level of interrupt
*/
void riscv_clic_irq_priority_set(uint32_t irq, uint32_t pri, uint32_t flags)
{
const uint8_t prio = leftalign8(MIN(pri, max_prio), intctlbits);
const uint8_t level = leftalign8(max_level, nlbits);
const uint8_t intctrl = (prio | level) | (~intctrl_mask);
ECLIC_CTRL[irq].INTCTRL = intctrl;
union CLICINTATTR intattr = {.w = 0};
#if defined(CONFIG_RISCV_VECTORED_MODE) && !defined(CONFIG_LEGACY_CLIC)
/*
* Set Selective Hardware Vectoring.
* Legacy SiFive does not implement smclicshv extension and vectoring is
* enabled in the mode bits of mtvec.
*/
intattr.b.shv = 1;
#else
intattr.b.shv = 0;
#endif
intattr.b.trg = (uint8_t)(flags & CLIC_INTATTR_TRIG_Msk);
ECLIC_CTRL[irq].INTATTR = intattr;
}
/**
* @brief Set pending bit of an interrupt
*/
void riscv_clic_irq_set_pending(uint32_t irq)
{
ECLIC_CTRL[irq].INTIP.b.IP = 1;
}
static int nuclei_eclic_init(const struct device *dev)
{
ECLIC_MTH.w = 0;
ECLIC_CFG.w = 0;
ECLIC_CFG.b.nlbits = 0;
for (int i = 0; i < ECLIC_CTRL_SIZE; i++) {
ECLIC_CTRL[i] = (struct CLICCTRL) { 0 };
}
nlbits = ECLIC_CFG.b.nlbits;
intctlbits = ECLIC_INFO.b.intctlbits;
max_prio = mask8(intctlbits - nlbits);
max_level = mask8(nlbits);
intctrl_mask = leftalign8(mask8(intctlbits), intctlbits);
return 0;
}
DEVICE_DT_INST_DEFINE(0, nuclei_eclic_init, NULL, NULL, NULL,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_nuclei_eclic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,318 |
```unknown
# Multilevel interrupt configuration
config MULTI_LEVEL_INTERRUPTS
bool "Multi-level interrupt support"
depends on GEN_SW_ISR_TABLE
help
Multiple levels of interrupts are normally used to increase the
number of addressable interrupts in a system. For example, if two
levels are used, a second level interrupt aggregator would combine
all interrupts routed to it into one IRQ line in the first level
interrupt controller. If three levels are used, a third level
aggregator combines its input interrupts into one IRQ line at the
second level. The number of interrupt levels is usually determined
by the hardware. (The term "aggregator" here means "interrupt
controller".)
if MULTI_LEVEL_INTERRUPTS
config 1ST_LEVEL_INTERRUPT_BITS
int "Total number of first level interrupt bits"
range 1 32
default 8
help
The number of bits to use of the 32 bit interrupt mask for first
tier interrupts.
config MAX_IRQ_PER_AGGREGATOR
int "Max IRQs per interrupt aggregator"
default 0
help
The maximum number of interrupt inputs to any aggregator in the
system.
config 2ND_LEVEL_INTERRUPTS
bool "Second-level interrupt support"
help
Second level interrupts are used to increase the number of
addressable interrupts in a system.
config 2ND_LVL_ISR_TBL_OFFSET
int "Offset in _sw_isr_table for level 2 interrupts"
default 0
depends on 2ND_LEVEL_INTERRUPTS
help
This is the offset in _sw_isr_table, the generated ISR handler table,
where storage for 2nd level interrupt ISRs begins. This is
typically allocated after ISRs for level 1 interrupts.
config NUM_2ND_LEVEL_AGGREGATORS
int "Total number of second level interrupt aggregators"
range 1 8
default 1
depends on 2ND_LEVEL_INTERRUPTS
help
The number of level 2 interrupt aggregators to support. Each
aggregator can manage at most MAX_IRQ_PER_AGGREGATOR level 2
interrupts.
config 2ND_LEVEL_INTERRUPT_BITS
int "Total number of second level interrupt bits"
range 0 31
default 8
help
The number of bits to use of the 32 bit interrupt mask for second
tier interrupts.
prev-level-num = 1
cur-level-num = 2
cur-level = 2ND
aggregator = 0
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 1
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 2
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 3
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 4
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 5
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 6
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 7
rsource "Kconfig.multilevel.aggregator_template"
config 3RD_LEVEL_INTERRUPTS
bool "Third-level interrupt support"
depends on 2ND_LEVEL_INTERRUPTS
help
Third level interrupts are used to increase the number of
addressable interrupts in a system.
config NUM_3RD_LEVEL_AGGREGATORS
int "Total number of third level interrupt aggregators"
range 1 8
default 1
depends on 3RD_LEVEL_INTERRUPTS
help
The number of level 3 interrupt aggregators to support. Each
aggregator can manage at most MAX_IRQ_PER_AGGREGATOR level 3
interrupts.
config 3RD_LVL_ISR_TBL_OFFSET
int "Offset in _sw_isr_table for level 3 interrupts"
default 0
depends on 3RD_LEVEL_INTERRUPTS
help
This is the offset in _sw_isr_table, the generated ISR handler table,
where storage for 3rd level interrupt ISRs begins. This is
typically allocated after ISRs for level 2 interrupts.
config 3RD_LEVEL_INTERRUPT_BITS
int "Total number of third level interrupt bits"
range 0 30
default 8
help
The number of bits to use of the 32 bit interrupt mask for third
tier interrupts.
prev-level-num = 2
cur-level-num = 3
cur-level = 3RD
aggregator = 0
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 1
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 2
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 3
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 4
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 5
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 6
rsource "Kconfig.multilevel.aggregator_template"
aggregator = 7
rsource "Kconfig.multilevel.aggregator_template"
endif
``` | /content/code_sandbox/drivers/interrupt_controller/Kconfig.multilevel | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,089 |
```c
/*
*
*/
/**
* @brief Driver for External interrupt controller in Microchip XEC devices
*
* Driver is currently implemented to support MEC172x ECIA GIRQs
*/
#define DT_DRV_COMPAT microchip_xec_ecia
#include <zephyr/arch/cpu.h>
#include <cmsis_core.h>
#include <zephyr/device.h>
#include <soc.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
#include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
#include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h>
#include <zephyr/irq.h>
/* defined at the SoC layer */
#define MCHP_FIRST_GIRQ MCHP_FIRST_GIRQ_NOS
#define MCHP_LAST_GIRQ MCHP_LAST_GIRQ_NOS
#define MCHP_XEC_DIRECT_CAPABLE MCHP_ECIA_DIRECT_BITMAP
#define GIRQ_ID_TO_BITPOS(id) ((id) + 8)
/*
* MEC SoC's have one and only one instance of ECIA. GIRQ8 register are located
* at the beginning of the ECIA block.
*/
#define ECIA_XEC_REG_BASE \
((struct ecia_regs *)(DT_REG_ADDR(DT_NODELABEL(ecia))))
#define ECS_XEC_REG_BASE \
((struct ecs_regs *)(DT_REG_ADDR(DT_NODELABEL(ecs))))
#define PCR_XEC_REG_BASE \
((struct pcr_regs *)(DT_REG_ADDR(DT_NODELABEL(pcr))))
#define ECIA_XEC_PCR_REG_IDX DT_INST_CLOCKS_CELL(0, regidx)
#define ECIA_XEC_PCR_BITPOS DT_INST_CLOCKS_CELL(0, bitpos)
#define ECIA_XEC_PCR_INFO \
MCHP_XEC_PCR_SCR_ENCODE(DT_INST_CLOCKS_CELL(0, regidx), \
DT_INST_CLOCKS_CELL(0, bitpos), \
DT_INST_CLOCKS_CELL(0, domain))
struct xec_girq_config {
uintptr_t base;
uint8_t girq_id;
uint8_t num_srcs;
uint8_t sources[32];
};
struct xec_ecia_config {
uintptr_t ecia_base;
struct mchp_xec_pcr_clk_ctrl clk_ctrl;
const struct device *girq_node_handles[32];
};
struct xec_girq_src_data {
mchp_xec_ecia_callback_t cb;
void *data;
};
#define DEV_ECIA_CFG(ecia_dev) \
((const struct xec_ecia_config *const)(ecia_dev)->config)
#define DEV_GIRQ_CFG(girq_dev) \
((const struct xec_girq_config *const)(girq_dev)->config)
#define DEV_GIRQ_DATA(girq_dev) \
((struct xec_girq_src_data *const)(girq_dev)->data)
/*
* Enable/disable specified GIRQ's aggregated output. Aggregated output is the
* bit-wise or of all the GIRQ's result bits.
*/
void mchp_xec_ecia_girq_aggr_en(uint8_t girq_num, uint8_t enable)
{
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
if (enable) {
regs->BLK_EN_SET = BIT(girq_num);
} else {
regs->BLK_EN_CLR = BIT(girq_num);
}
}
void mchp_xec_ecia_girq_src_clr(uint8_t girq_num, uint8_t src_bit_pos)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to clear */
regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].SRC = BIT(src_bit_pos);
}
void mchp_xec_ecia_girq_src_en(uint8_t girq_num, uint8_t src_bit_pos)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to set */
regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].EN_SET = BIT(src_bit_pos);
}
void mchp_xec_ecia_girq_src_dis(uint8_t girq_num, uint8_t src_bit_pos)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to clear */
regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].EN_CLR = BIT(src_bit_pos);
}
void mchp_xec_ecia_girq_src_clr_bitmap(uint8_t girq_num, uint32_t bitmap)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to clear */
regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].SRC = bitmap;
}
void mchp_xec_ecia_girq_src_en_bitmap(uint8_t girq_num, uint32_t bitmap)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to clear */
regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].EN_SET = bitmap;
}
void mchp_xec_ecia_girq_src_dis_bitmap(uint8_t girq_num, uint32_t bitmap)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to clear */
regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].EN_CLR = bitmap;
}
/*
* Return read-only GIRQ result register. Result is bit-wise and of source
* and enable registers.
*/
uint32_t mchp_xec_ecia_girq_result(uint8_t girq_num)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return 0U;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
return regs->GIRQ[girq_num - MCHP_FIRST_GIRQ].RESULT;
}
/* Clear NVIC pending given the external NVIC input number (zero based) */
void mchp_xec_ecia_nvic_clr_pend(uint32_t nvic_num)
{
if (nvic_num >= ((SCnSCB->ICTR + 1) * 32)) {
return;
}
NVIC_ClearPendingIRQ(nvic_num);
}
/* API taking input encoded with MCHP_XEC_ECIA(g, gb, na, nd) macro */
void mchp_xec_ecia_info_girq_aggr_en(int ecia_info, uint8_t enable)
{
uint8_t girq_num = MCHP_XEC_ECIA_GIRQ(ecia_info);
mchp_xec_ecia_girq_aggr_en(girq_num, enable);
}
void mchp_xec_ecia_info_girq_src_clr(int ecia_info)
{
uint8_t girq_num = MCHP_XEC_ECIA_GIRQ(ecia_info);
uint8_t bitpos = MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
mchp_xec_ecia_girq_src_clr(girq_num, bitpos);
}
void mchp_xec_ecia_info_girq_src_en(int ecia_info)
{
uint8_t girq_num = MCHP_XEC_ECIA_GIRQ(ecia_info);
uint8_t bitpos = MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
mchp_xec_ecia_girq_src_en(girq_num, bitpos);
}
void mchp_xec_ecia_info_girq_src_dis(int ecia_info)
{
uint8_t girq_num = MCHP_XEC_ECIA_GIRQ(ecia_info);
uint8_t bitpos = MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
mchp_xec_ecia_girq_src_dis(girq_num, bitpos);
}
uint32_t mchp_xec_ecia_info_girq_result(int ecia_info)
{
uint8_t girq_num = MCHP_XEC_ECIA_GIRQ(ecia_info);
return mchp_xec_ecia_girq_result(girq_num);
}
/*
* Clear NVIC pending status given GIRQ source information encoded by macro
* MCHP_XEC_ECIA. For aggregated only sources the encoding sets direct NVIC
* number equal to aggregated NVIC number.
*/
void mchp_xec_ecia_info_nvic_clr_pend(int ecia_info)
{
uint8_t nvic_num = MCHP_XEC_ECIA_NVIC_DIRECT(ecia_info);
mchp_xec_ecia_nvic_clr_pend(nvic_num);
}
/**
* @brief enable GIRQn interrupt for specific source
*
* @param girq is the GIRQ number (8 - 26)
* @param src is the interrupt source in the GIRQ (0 - 31)
*/
int mchp_xec_ecia_enable(int girq, int src)
{
if ((girq < MCHP_FIRST_GIRQ) || (girq > MCHP_LAST_GIRQ) ||
(src < 0) || (src > 31)) {
return -EINVAL;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to set */
regs->GIRQ[girq - MCHP_FIRST_GIRQ].EN_SET = BIT(src);
return 0;
}
/**
* @brief enable EXTI interrupt for specific line specified by parameter
* encoded with MCHP_XEC_ECIA macro.
*
* @param ecia_info is GIRQ connection encoded with MCHP_XEC_ECIA
*/
int mchp_xec_ecia_info_enable(int ecia_info)
{
uint8_t girq = (uint8_t)MCHP_XEC_ECIA_GIRQ(ecia_info);
uint8_t src = (uint8_t)MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
return mchp_xec_ecia_enable(girq, src);
}
/**
* @brief disable EXTI interrupt for specific line
*
* @param girq is the GIRQ number (8 - 26)
* @param src is the interrupt source in the GIRQ (0 - 31)
*/
int mchp_xec_ecia_disable(int girq, int src)
{
if ((girq < MCHP_FIRST_GIRQ) || (girq > MCHP_LAST_GIRQ) ||
(src < 0) || (src > 31)) {
return -EINVAL;
}
struct ecia_regs *regs = ECIA_XEC_REG_BASE;
/* write 1 to clear */
regs->GIRQ[girq - MCHP_FIRST_GIRQ].EN_CLR = BIT(src);
return 0;
}
/**
* @brief disable EXTI interrupt for specific line specified by parameter
* encoded with MCHP_XEC_ECIA macro.
*
* @param ecia_info is GIRQ connection encoded with MCHP_XEC_ECIA
*/
int mchp_xec_ecia_info_disable(int ecia_info)
{
uint8_t girq = (uint8_t)MCHP_XEC_ECIA_GIRQ(ecia_info);
uint8_t src = (uint8_t)MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
return mchp_xec_ecia_disable(girq, src);
}
/* forward reference */
static const struct device *get_girq_dev(int girq_num);
/**
* @brief set GIRQn interrupt source callback
*
* @param dev_girq is the GIRQn device handle
* @param src is the interrupt source in the GIRQ (0 - 31)
* @param cb user callback
* @param data user data
*/
int mchp_xec_ecia_set_callback_by_dev(const struct device *dev_girq, int src,
mchp_xec_ecia_callback_t cb, void *data)
{
if ((dev_girq == NULL) || (src < 0) || (src > 31)) {
return -EINVAL;
}
const struct xec_girq_config *const cfg = DEV_GIRQ_CFG(dev_girq);
struct xec_girq_src_data *girq_data = DEV_GIRQ_DATA(dev_girq);
/* source exists in this GIRQ? */
if (!(cfg->sources[src] & BIT(7))) {
return -EINVAL;
}
/* obtain the callback array index for the source */
int idx = (int)(cfg->sources[src] & ~BIT(7));
girq_data[idx].cb = cb;
girq_data[idx].data = data;
return 0;
}
/**
* @brief set GIRQn interrupt source callback
*
* @param girq is the GIRQ number (8 - 26)
* @param src is the interrupt source in the GIRQ (0 - 31)
* @param cb user callback
* @param data user data
*/
int mchp_xec_ecia_set_callback(int girq_num, int src,
mchp_xec_ecia_callback_t cb, void *data)
{
const struct device *dev = get_girq_dev(girq_num);
return mchp_xec_ecia_set_callback_by_dev(dev, src, cb, data);
}
/**
* @brief set GIRQn interrupt source callback
*
* @param ecia_info is GIRQ connection encoded with MCHP_XEC_ECIA
* @param cb user callback
* @param data user data
*/
int mchp_xec_ecia_info_set_callback(int ecia_info, mchp_xec_ecia_callback_t cb,
void *data)
{
const struct device *dev = get_girq_dev(MCHP_XEC_ECIA_GIRQ(ecia_info));
uint8_t src = MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
return mchp_xec_ecia_set_callback_by_dev(dev, src, cb, data);
}
/**
* @brief unset GIRQn interrupt source callback by device handle
*
* @param dev_girq is the GIRQn device handle
* @param src is the interrupt source in the GIRQ (0 - 31)
*/
int mchp_ecia_unset_callback_by_dev(const struct device *dev_girq, int src)
{
if ((dev_girq == NULL) || (src < 0) || (src > 31)) {
return -EINVAL;
}
const struct xec_girq_config *const cfg = DEV_GIRQ_CFG(dev_girq);
struct xec_girq_src_data *girq_data = DEV_GIRQ_DATA(dev_girq);
/* source exists in this GIRQ? */
if (!(cfg->sources[src] & BIT(7))) {
return -EINVAL;
}
/* obtain the callback array index for the source */
int idx = (int)(cfg->sources[src] & ~BIT(7));
girq_data[idx].cb = NULL;
girq_data[idx].data = NULL;
return 0;
}
/**
* @brief unset GIRQn interrupt source callback
*
* @param girq is the GIRQ number (8 - 26)
* @param src is the interrupt source in the GIRQ (0 - 31)
*/
int mchp_ecia_unset_callback(int girq_num, int src)
{
const struct device *dev = get_girq_dev(girq_num);
return mchp_ecia_unset_callback_by_dev(dev, src);
}
/**
* @brief unset GIRQn interrupt source callback
*
* @param ecia_info is GIRQ connection encoded with MCHP_XEC_ECIA
*/
int mchp_ecia_info_unset_callback(int ecia_info)
{
const struct device *dev = get_girq_dev(MCHP_XEC_ECIA_GIRQ(ecia_info));
uint8_t src = MCHP_XEC_ECIA_GIRQ_POS(ecia_info);
return mchp_ecia_unset_callback_by_dev(dev, src);
}
/*
* Create a build time flag to know if any aggregated GIRQ has been enabled.
* We make use of DT FOREACH macro to check GIRQ node status.
* Enabling a GIRQ node (status = "okay") implies you want it used in
* aggregated mode. Note, GIRQ 8-12, 24-26 are aggregated only by HW design.
* If a GIRQ node is disabled(status = "disabled") and is direct capable the
* other driver/application may use IRQ_CONNECT, irq_enable, and the helper
* functions in this driver to set/clear GIRQ enable bits and status.
* Leaving a node disabled also allows another driver/application to take over
* aggregation by managing the GIRQ itself.
*/
#define XEC_CHK_REQ_AGGR(n) DT_NODE_HAS_STATUS(n, okay) |
#define XEC_ECIA_REQUIRE_AGGR_ISR \
( \
DT_FOREACH_CHILD(DT_NODELABEL(ecia), XEC_CHK_REQ_AGGR) \
0)
/* static const uint32_t xec_chk_req = (XEC_ECIA_REQUIRE_AGGR_ISR); */
#if XEC_ECIA_REQUIRE_AGGR_ISR
/*
* Generic ISR for aggregated GIRQ's.
* GIRQ source(status) bits are latched (R/W1C). The peripheral status
* connected to the GIRQ source bit must be cleared first by the callback
* and this routine will clear the GIRQ source bit. If a callback was not
* registered for a source the enable will also be cleared to prevent
* interrupt storms.
* NOTE: dev_girq is a pointer to a GIRQ child device instance.
*/
static void xec_girq_isr(const struct device *dev_girq)
{
const struct xec_girq_config *const cfg = DEV_GIRQ_CFG(dev_girq);
struct xec_girq_src_data *data = DEV_GIRQ_DATA(dev_girq);
struct girq_regs *girq = (struct girq_regs *)cfg->base;
int girq_id = GIRQ_ID_TO_BITPOS(cfg->girq_id);
uint32_t idx = 0;
uint32_t result = girq->RESULT;
for (int i = 0; result && i < 32; i++) {
uint8_t bitpos = 31 - (__builtin_clz(result) & 0x1f);
/* clear GIRQ latched status */
girq->SRC = BIT(bitpos);
result &= ~BIT(bitpos);
/* is it an implemented source? */
if (cfg->sources[bitpos] & BIT(7)) {
/* yes, get the index by removing bit[7] flag */
idx = (uint32_t)cfg->sources[bitpos] & ~BIT(7);
/* callback registered? */
if (data[idx].cb) {
data[idx].cb(girq_id, bitpos, data[idx].data);
} else { /* no callback, clear the enable */
girq->EN_CLR = BIT(bitpos);
}
} else { /* paranoia, we should not get here... */
girq->EN_CLR = BIT(bitpos);
}
}
}
#endif
/**
* @brief initialize XEC ECIA driver
* NOTE: GIRQ22 is special used for waking the PLL from deep sleep when a
* peripheral receives data from an external entity (eSPI, I2C, etc). Once
* the data transfer is complete the system re-enters deep sleep unless the
* peripheral was configured to wake CPU after reception of data or event.
* GIRQ22 aggregated output and sources are not connected to the NVIC.
* We enable GIRQ22 aggregated output to ensure clock asynchronous wake
* functionality is operational.
*/
static int xec_ecia_init(const struct device *dev)
{
const struct xec_ecia_config *cfg =
(const struct xec_ecia_config *const) (dev->config);
const struct device *const clk_dev = DEVICE_DT_GET(DT_NODELABEL(pcr));
struct ecs_regs *const ecs = ECS_XEC_REG_BASE;
struct ecia_regs *const ecia = (struct ecia_regs *)cfg->ecia_base;
uint32_t n = 0, nr = 0;
int ret;
if (!device_is_ready(clk_dev)) {
return -ENODEV;
}
ret = clock_control_on(clk_dev,
(clock_control_subsys_t)&cfg->clk_ctrl);
if (ret < 0) {
return ret;
}
/* Enable all direct NVIC connections */
ecs->INTR_CTRL |= BIT(0);
/* gate off all aggregated outputs */
ecia->BLK_EN_CLR = UINT32_MAX;
/* connect aggregated only GIRQs to NVIC */
ecia->BLK_EN_SET = MCHP_ECIA_AGGR_BITMAP;
/* Clear all GIRQn source enables */
for (n = 0; n < MCHP_GIRQS; n++) {
ecia->GIRQ[n].EN_CLR = UINT32_MAX;
}
/* Clear all external NVIC enables and pending status */
nr = SCnSCB->ICTR;
for (n = 0u; n <= nr; n++) {
NVIC->ICER[n] = UINT32_MAX;
NVIC->ICPR[n] = UINT32_MAX;
}
/* ecia->BLK_ACTIVE = xec_chk_req; */
return 0;
}
/* xec_config_girq_xxx.sources[] entries from GIRQ node */
#define XEC_GIRQ_SOURCES2(node_id, prop, idx) \
.sources[DT_PROP_BY_IDX(node_id, prop, idx)] = \
((uint8_t)(idx) | BIT(7)),
/* Parameter n is a child node-id */
#define GIRQ_XEC_DEVICE(n) \
static int xec_girq_init_##n(const struct device *dev); \
\
static struct xec_girq_src_data \
xec_data_girq_##n[DT_PROP_LEN(n, sources)]; \
\
static const struct xec_girq_config xec_config_girq_##n = { \
.base = DT_REG_ADDR(n), \
.girq_id = DT_PROP(n, girq_id), \
.num_srcs = DT_PROP_LEN(n, sources), \
DT_FOREACH_PROP_ELEM(n, sources, XEC_GIRQ_SOURCES2) \
}; \
\
DEVICE_DT_DEFINE(n, xec_girq_init_##n, \
NULL, &xec_data_girq_##n, &xec_config_girq_##n, \
PRE_KERNEL_1, CONFIG_XEC_GIRQ_INIT_PRIORITY, \
NULL); \
\
static int xec_girq_init_##n(const struct device *dev) \
{ \
mchp_xec_ecia_girq_aggr_en( \
GIRQ_ID_TO_BITPOS(DT_PROP(n, girq_id)), 1); \
\
IRQ_CONNECT(DT_IRQN(n), \
DT_IRQ(n, priority), \
xec_girq_isr, \
DEVICE_DT_GET(n), 0); \
\
irq_enable(DT_IRQN(n)); \
\
return 0; \
}
/*
* iterate over each enabled child node of ECIA
* Enable means property status = "okay"
*/
DT_FOREACH_CHILD_STATUS_OKAY(DT_NODELABEL(ecia), GIRQ_XEC_DEVICE)
/* n = GIRQ node id */
#define XEC_GIRQ_HANDLE(n) \
.girq_node_handles[DT_PROP(n, girq_id)] = (DEVICE_DT_GET(n)),
static const struct xec_ecia_config xec_config_ecia = {
.ecia_base = DT_REG_ADDR(DT_NODELABEL(ecia)),
.clk_ctrl = {
.pcr_info = ECIA_XEC_PCR_INFO,
},
DT_FOREACH_CHILD_STATUS_OKAY(DT_NODELABEL(ecia), XEC_GIRQ_HANDLE)
};
DEVICE_DT_DEFINE(DT_NODELABEL(ecia), xec_ecia_init,
NULL, NULL, &xec_config_ecia,
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY,
NULL);
/* look up GIRQ node handle from ECIA configuration */
static const struct device *get_girq_dev(int girq_num)
{
if ((girq_num < MCHP_FIRST_GIRQ) || (girq_num > MCHP_LAST_GIRQ)) {
return NULL;
}
/* safe to convert to zero based index */
girq_num -= MCHP_FIRST_GIRQ;
return xec_config_ecia.girq_node_handles[girq_num];
}
``` | /content/code_sandbox/drivers/interrupt_controller/intc_mchp_ecia_xec.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,451 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_SAM0_EIC_PRIV_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_SAM0_EIC_PRIV_H_
#include <errno.h>
#include <zephyr/types.h>
#include <soc.h>
/*
* Used in the ASF headers, but not always defined by them (looks like they
* sometimes define __L instead).
*/
#ifndef _L
#define _L(i) i ## L
#endif
/*
* Unfortunately the ASF headers define the EIC mappings somewhat painfully:
* the macros have both the port letter and are only defined if that pin
* has an EIC channel. So we can't just use a macro expansion here, because
* some of them might be undefined for a port and we can't test for another
* macro definition inside a macro.
*/
static const uint8_t sam0_eic_channels[PORT_GROUPS][32] = {
#if PORT_GROUPS >= 1
{
#ifdef PIN_PA00A_EIC_EXTINT_NUM
PIN_PA00A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA01A_EIC_EXTINT_NUM
PIN_PA01A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA02A_EIC_EXTINT_NUM
PIN_PA02A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA03A_EIC_EXTINT_NUM
PIN_PA03A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA04A_EIC_EXTINT_NUM
PIN_PA04A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA05A_EIC_EXTINT_NUM
PIN_PA05A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA06A_EIC_EXTINT_NUM
PIN_PA06A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA07A_EIC_EXTINT_NUM
PIN_PA07A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA08A_EIC_EXTINT_NUM
PIN_PA08A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA09A_EIC_EXTINT_NUM
PIN_PA09A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA10A_EIC_EXTINT_NUM
PIN_PA10A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA11A_EIC_EXTINT_NUM
PIN_PA11A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA12A_EIC_EXTINT_NUM
PIN_PA12A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA13A_EIC_EXTINT_NUM
PIN_PA13A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA14A_EIC_EXTINT_NUM
PIN_PA14A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA15A_EIC_EXTINT_NUM
PIN_PA15A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA16A_EIC_EXTINT_NUM
PIN_PA16A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA17A_EIC_EXTINT_NUM
PIN_PA17A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA18A_EIC_EXTINT_NUM
PIN_PA18A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA19A_EIC_EXTINT_NUM
PIN_PA19A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA20A_EIC_EXTINT_NUM
PIN_PA20A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA21A_EIC_EXTINT_NUM
PIN_PA21A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA22A_EIC_EXTINT_NUM
PIN_PA22A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA23A_EIC_EXTINT_NUM
PIN_PA23A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA24A_EIC_EXTINT_NUM
PIN_PA24A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA25A_EIC_EXTINT_NUM
PIN_PA25A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA26A_EIC_EXTINT_NUM
PIN_PA26A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA27A_EIC_EXTINT_NUM
PIN_PA27A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA28A_EIC_EXTINT_NUM
PIN_PA28A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA29A_EIC_EXTINT_NUM
PIN_PA29A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA30A_EIC_EXTINT_NUM
PIN_PA30A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PA31A_EIC_EXTINT_NUM
PIN_PA31A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
},
#endif
#if PORT_GROUPS >= 2
{
#ifdef PIN_PB00A_EIC_EXTINT_NUM
PIN_PB00A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB01A_EIC_EXTINT_NUM
PIN_PB01A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB02A_EIC_EXTINT_NUM
PIN_PB02A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB03A_EIC_EXTINT_NUM
PIN_PB03A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB04A_EIC_EXTINT_NUM
PIN_PB04A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB05A_EIC_EXTINT_NUM
PIN_PB05A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB06A_EIC_EXTINT_NUM
PIN_PB06A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB07A_EIC_EXTINT_NUM
PIN_PB07A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB08A_EIC_EXTINT_NUM
PIN_PB08A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB09A_EIC_EXTINT_NUM
PIN_PB09A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB10A_EIC_EXTINT_NUM
PIN_PB10A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB11A_EIC_EXTINT_NUM
PIN_PB11A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB12A_EIC_EXTINT_NUM
PIN_PB12A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB13A_EIC_EXTINT_NUM
PIN_PB13A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB14A_EIC_EXTINT_NUM
PIN_PB14A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB15A_EIC_EXTINT_NUM
PIN_PB15A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB16A_EIC_EXTINT_NUM
PIN_PB16A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB17A_EIC_EXTINT_NUM
PIN_PB17A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB18A_EIC_EXTINT_NUM
PIN_PB18A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB19A_EIC_EXTINT_NUM
PIN_PB19A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB20A_EIC_EXTINT_NUM
PIN_PB20A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB21A_EIC_EXTINT_NUM
PIN_PB21A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB22A_EIC_EXTINT_NUM
PIN_PB22A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB23A_EIC_EXTINT_NUM
PIN_PB23A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB24A_EIC_EXTINT_NUM
PIN_PB24A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB25A_EIC_EXTINT_NUM
PIN_PB25A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB26A_EIC_EXTINT_NUM
PIN_PB26A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB27A_EIC_EXTINT_NUM
PIN_PB27A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB28A_EIC_EXTINT_NUM
PIN_PB28A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB29A_EIC_EXTINT_NUM
PIN_PB29A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB30A_EIC_EXTINT_NUM
PIN_PB30A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PB31A_EIC_EXTINT_NUM
PIN_PB31A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
},
#endif
#if PORT_GROUPS >= 3
{
#ifdef PIN_PC00A_EIC_EXTINT_NUM
PIN_PC00A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC01A_EIC_EXTINT_NUM
PIN_PC01A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC02A_EIC_EXTINT_NUM
PIN_PC02A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC03A_EIC_EXTINT_NUM
PIN_PC03A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC04A_EIC_EXTINT_NUM
PIN_PC04A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC05A_EIC_EXTINT_NUM
PIN_PC05A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC06A_EIC_EXTINT_NUM
PIN_PC06A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC07A_EIC_EXTINT_NUM
PIN_PC07A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC08A_EIC_EXTINT_NUM
PIN_PC08A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC09A_EIC_EXTINT_NUM
PIN_PC09A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC10A_EIC_EXTINT_NUM
PIN_PC10A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC11A_EIC_EXTINT_NUM
PIN_PC11A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC12A_EIC_EXTINT_NUM
PIN_PC12A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC13A_EIC_EXTINT_NUM
PIN_PC13A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC14A_EIC_EXTINT_NUM
PIN_PC14A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC15A_EIC_EXTINT_NUM
PIN_PC15A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC16A_EIC_EXTINT_NUM
PIN_PC16A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC17A_EIC_EXTINT_NUM
PIN_PC17A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC18A_EIC_EXTINT_NUM
PIN_PC18A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC19A_EIC_EXTINT_NUM
PIN_PC19A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC20A_EIC_EXTINT_NUM
PIN_PC20A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC21A_EIC_EXTINT_NUM
PIN_PC21A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC22A_EIC_EXTINT_NUM
PIN_PC22A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC23A_EIC_EXTINT_NUM
PIN_PC23A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC24A_EIC_EXTINT_NUM
PIN_PC24A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC25A_EIC_EXTINT_NUM
PIN_PC25A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC26A_EIC_EXTINT_NUM
PIN_PC26A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC27A_EIC_EXTINT_NUM
PIN_PC27A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC28A_EIC_EXTINT_NUM
PIN_PC28A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC29A_EIC_EXTINT_NUM
PIN_PC29A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC30A_EIC_EXTINT_NUM
PIN_PC30A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PC31A_EIC_EXTINT_NUM
PIN_PC31A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
},
#endif
#if PORT_GROUPS >= 4
{
#ifdef PIN_PD00A_EIC_EXTINT_NUM
PIN_PD00A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD01A_EIC_EXTINT_NUM
PIN_PD01A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD02A_EIC_EXTINT_NUM
PIN_PD02A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD03A_EIC_EXTINT_NUM
PIN_PD03A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD04A_EIC_EXTINT_NUM
PIN_PD04A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD05A_EIC_EXTINT_NUM
PIN_PD05A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD06A_EIC_EXTINT_NUM
PIN_PD06A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD07A_EIC_EXTINT_NUM
PIN_PD07A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD08A_EIC_EXTINT_NUM
PIN_PD08A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD09A_EIC_EXTINT_NUM
PIN_PD09A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD10A_EIC_EXTINT_NUM
PIN_PD10A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD11A_EIC_EXTINT_NUM
PIN_PD11A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD12A_EIC_EXTINT_NUM
PIN_PD12A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD13A_EIC_EXTINT_NUM
PIN_PD13A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD14A_EIC_EXTINT_NUM
PIN_PD14A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD15A_EIC_EXTINT_NUM
PIN_PD15A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD16A_EIC_EXTINT_NUM
PIN_PD16A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD17A_EIC_EXTINT_NUM
PIN_PD17A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD18A_EIC_EXTINT_NUM
PIN_PD18A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD19A_EIC_EXTINT_NUM
PIN_PD19A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD20A_EIC_EXTINT_NUM
PIN_PD20A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD21A_EIC_EXTINT_NUM
PIN_PD21A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD22A_EIC_EXTINT_NUM
PIN_PD22A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD23A_EIC_EXTINT_NUM
PIN_PD23A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD24A_EIC_EXTINT_NUM
PIN_PD24A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD25A_EIC_EXTINT_NUM
PIN_PD25A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD26A_EIC_EXTINT_NUM
PIN_PD26A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD27A_EIC_EXTINT_NUM
PIN_PD27A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD28A_EIC_EXTINT_NUM
PIN_PD28A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD29A_EIC_EXTINT_NUM
PIN_PD29A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD30A_EIC_EXTINT_NUM
PIN_PD30A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
#ifdef PIN_PD31A_EIC_EXTINT_NUM
PIN_PD31A_EIC_EXTINT_NUM,
#else
0xFF,
#endif
},
#endif
};
static inline int sam0_eic_map_to_line(int port, int pin)
{
uint8_t ch = sam0_eic_channels[port][pin];
if (ch == 0xFF) {
return -ENOTSUP;
}
return ch;
}
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_SAM0_EIC_PRIV_H_ */
``` | /content/code_sandbox/drivers/interrupt_controller/intc_sam0_eic_priv.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,156 |
```c
/*
*
*/
#define DT_DRV_COMPAT gd_gd32_exti
#include <errno.h>
#include <zephyr/device.h>
#include <zephyr/drivers/interrupt_controller/gd32_exti.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/irq.h>
#include <zephyr/sys/util_macro.h>
#include <gd32_exti.h>
/** Unsupported line indicator */
#define EXTI_NOTSUP 0xFFU
/** Number of EXTI lines. */
#define NUM_EXTI_LINES DT_INST_PROP(0, num_lines)
/** @brief EXTI line ranges hold by a single ISR */
struct gd32_exti_range {
/** Start of the range */
uint8_t min;
/** End of the range */
uint8_t max;
};
/** @brief EXTI line interrupt callback. */
struct gd32_cb_data {
/** Callback function */
gd32_exti_cb_t cb;
/** User data. */
void *user;
};
/** EXTI driver data. */
struct gd32_exti_data {
/** Array of callbacks. */
struct gd32_cb_data cbs[NUM_EXTI_LINES];
};
#ifdef CONFIG_GPIO_GD32
static const struct gd32_exti_range line0_range = {0U, 0U};
static const struct gd32_exti_range line1_range = {1U, 1U};
static const struct gd32_exti_range line2_range = {2U, 2U};
static const struct gd32_exti_range line3_range = {3U, 3U};
static const struct gd32_exti_range line4_range = {4U, 4U};
static const struct gd32_exti_range line5_9_range = {5U, 9U};
static const struct gd32_exti_range line10_15_range = {10U, 15U};
#endif /* CONFIG_GPIO_GD32 */
/** @brief Obtain line IRQ number if enabled. */
#define EXTI_LINE_IRQ_COND(enabled, line) \
COND_CODE_1(enabled, (DT_INST_IRQ_BY_NAME(0, line, irq)), (EXTI_NOTSUP))
static const uint8_t line2irq[NUM_EXTI_LINES] = {
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line0),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line1),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line2),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line3),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line4),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line5_9),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line5_9),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line5_9),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line5_9),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line5_9),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line10_15),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line10_15),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line10_15),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line10_15),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line10_15),
EXTI_LINE_IRQ_COND(CONFIG_GPIO_GD32, line10_15),
EXTI_NOTSUP,
EXTI_NOTSUP,
EXTI_NOTSUP,
#ifdef CONFIG_SOC_SERIES_GD32F4XX
EXTI_NOTSUP,
EXTI_NOTSUP,
EXTI_NOTSUP,
EXTI_NOTSUP,
#endif /* CONFIG_SOC_SERIES_GD32F4XX */
};
__unused static void gd32_exti_isr(const void *isr_data)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct gd32_exti_data *data = dev->data;
const struct gd32_exti_range *range = isr_data;
for (uint8_t i = range->min; i <= range->max; i++) {
if ((EXTI_PD & BIT(i)) != 0U) {
EXTI_PD = BIT(i);
if (data->cbs[i].cb != NULL) {
data->cbs[i].cb(i, data->cbs[i].user);
}
}
}
}
void gd32_exti_enable(uint8_t line)
{
__ASSERT_NO_MSG(line < NUM_EXTI_LINES);
__ASSERT_NO_MSG(line2irq[line] != EXTI_NOTSUP);
EXTI_INTEN |= BIT(line);
irq_enable(line2irq[line]);
}
void gd32_exti_disable(uint8_t line)
{
__ASSERT_NO_MSG(line < NUM_EXTI_LINES);
__ASSERT_NO_MSG(line2irq[line] != EXTI_NOTSUP);
EXTI_INTEN &= ~BIT(line);
}
void gd32_exti_trigger(uint8_t line, uint8_t trigger)
{
__ASSERT_NO_MSG(line < NUM_EXTI_LINES);
__ASSERT_NO_MSG(line2irq[line] != EXTI_NOTSUP);
if ((trigger & GD32_EXTI_TRIG_RISING) != 0U) {
EXTI_RTEN |= BIT(line);
} else {
EXTI_RTEN &= ~BIT(line);
}
if ((trigger & GD32_EXTI_TRIG_FALLING) != 0U) {
EXTI_FTEN |= BIT(line);
} else {
EXTI_FTEN &= ~BIT(line);
}
}
int gd32_exti_configure(uint8_t line, gd32_exti_cb_t cb, void *user)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct gd32_exti_data *data = dev->data;
__ASSERT_NO_MSG(line < NUM_EXTI_LINES);
__ASSERT_NO_MSG(line2irq[line] != EXTI_NOTSUP);
if ((data->cbs[line].cb != NULL) && (cb != NULL)) {
return -EALREADY;
}
data->cbs[line].cb = cb;
data->cbs[line].user = user;
return 0;
}
static int gd32_exti_init(const struct device *dev)
{
#ifdef CONFIG_GPIO_GD32
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line0, irq),
DT_INST_IRQ_BY_NAME(0, line0, priority),
gd32_exti_isr, &line0_range, 0);
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line1, irq),
DT_INST_IRQ_BY_NAME(0, line1, priority),
gd32_exti_isr, &line1_range, 0);
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line2, irq),
DT_INST_IRQ_BY_NAME(0, line2, priority),
gd32_exti_isr, &line2_range, 0);
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line3, irq),
DT_INST_IRQ_BY_NAME(0, line3, priority),
gd32_exti_isr, &line3_range, 0);
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line4, irq),
DT_INST_IRQ_BY_NAME(0, line4, priority),
gd32_exti_isr, &line4_range, 0);
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line5_9, irq),
DT_INST_IRQ_BY_NAME(0, line5_9, priority),
gd32_exti_isr, &line5_9_range, 0);
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, line10_15, irq),
DT_INST_IRQ_BY_NAME(0, line10_15, priority),
gd32_exti_isr, &line10_15_range, 0);
#endif /* CONFIG_GPIO_GD32 */
return 0;
}
static struct gd32_exti_data data;
DEVICE_DT_INST_DEFINE(0, gd32_exti_init, NULL, &data, NULL, PRE_KERNEL_1,
CONFIG_INTC_INIT_PRIORITY, NULL);
``` | /content/code_sandbox/drivers/interrupt_controller/intc_gd32_exti.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,713 |
```objective-c
/*
*
*/
#ifndef BDMA_STM32_H_
#define BDMA_STM32_H_
#include <soc.h>
#include <stm32_ll_bdma.h>
#include <zephyr/drivers/dma.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include <zephyr/drivers/dma/dma_stm32.h>
/* Maximum data sent in single transfer (Bytes) */
#define BDMA_STM32_MAX_DATA_ITEMS 0xffff
struct bdma_stm32_channel {
uint32_t direction;
#ifdef CONFIG_DMAMUX_STM32
int mux_channel; /* stores the dmamux channel */
#endif /* CONFIG_DMAMUX_STM32 */
bool source_periph;
bool hal_override;
volatile bool busy;
uint32_t src_size;
uint32_t dst_size;
void *user_data; /* holds the client data */
dma_callback_t bdma_callback;
};
struct bdma_stm32_data {
struct dma_context dma_ctx;
};
struct bdma_stm32_config {
struct stm32_pclken pclken;
void (*config_irq)(const struct device *dev);
bool support_m2m;
uint32_t base;
uint32_t max_channels;
#ifdef CONFIG_DMAMUX_STM32
uint8_t offset; /* position in the list of bdmamux channel list */
#endif
struct bdma_stm32_channel *channels;
};
uint32_t bdma_stm32_id_to_channel(uint32_t id);
#if !defined(CONFIG_DMAMUX_STM32)
uint32_t bdma_stm32_slot_to_channel(uint32_t id);
#endif
typedef void (*bdma_stm32_clear_flag_func)(BDMA_TypeDef *DMAx);
typedef uint32_t (*bdma_stm32_check_flag_func)(BDMA_TypeDef *DMAx);
bool bdma_stm32_is_gi_active(BDMA_TypeDef *DMAx, uint32_t id);
void bdma_stm32_clear_gi(BDMA_TypeDef *DMAx, uint32_t id);
void bdma_stm32_clear_tc(BDMA_TypeDef *DMAx, uint32_t id);
void bdma_stm32_clear_ht(BDMA_TypeDef *DMAx, uint32_t id);
bool bdma_stm32_is_te_active(BDMA_TypeDef *DMAx, uint32_t id);
void bdma_stm32_clear_te(BDMA_TypeDef *DMAx, uint32_t id);
bool stm32_bdma_is_irq_active(BDMA_TypeDef *dma, uint32_t id);
bool stm32_bdma_is_ht_irq_active(BDMA_TypeDef *ma, uint32_t id);
bool stm32_bdma_is_tc_irq_active(BDMA_TypeDef *ma, uint32_t id);
void stm32_bdma_dump_channel_irq(BDMA_TypeDef *dma, uint32_t id);
void stm32_bdma_clear_channel_irq(BDMA_TypeDef *dma, uint32_t id);
bool stm32_bdma_is_irq_happened(BDMA_TypeDef *dma, uint32_t id);
void stm32_bdma_enable_channel(BDMA_TypeDef *dma, uint32_t id);
int stm32_bdma_disable_channel(BDMA_TypeDef *dma, uint32_t id);
#if !defined(CONFIG_DMAMUX_STM32)
void stm32_dma_config_channel_function(BDMA_TypeDef *dma, uint32_t id,
uint32_t slot);
#endif
#ifdef CONFIG_DMAMUX_STM32
/* bdma_stm32_ api functions are exported to the bdmamux_stm32 */
#define BDMA_STM32_EXPORT_API
int bdma_stm32_configure(const struct device *dev, uint32_t id,
struct dma_config *config);
int bdma_stm32_reload(const struct device *dev, uint32_t id,
uint32_t src, uint32_t dst, size_t size);
int bdma_stm32_start(const struct device *dev, uint32_t id);
int bdma_stm32_stop(const struct device *dev, uint32_t id);
int bdma_stm32_get_status(const struct device *dev, uint32_t id,
struct dma_status *stat);
#else
#define BDMA_STM32_EXPORT_API static
#endif /* CONFIG_DMAMUX_STM32 */
#endif /* BDMA_STM32_H_*/
``` | /content/code_sandbox/drivers/dma/dma_stm32_bdma.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 879 |
```objective-c
/*
*
*/
#ifndef DMA_IPROC_PAX_V1
#define DMA_IPROC_PAX_V1
#include "dma_iproc_pax.h"
/* Register RM_CONTROL fields */
#define RM_COMM_MSI_INTERRUPT_STATUS_MASK 0x30d0
#define RM_COMM_MSI_INTERRUPT_STATUS_CLEAR 0x30d4
#define RM_COMM_CONTROL_MODE_MASK 0x3
#define RM_COMM_CONTROL_MODE_SHIFT 0
#define RM_COMM_CONTROL_MODE_TOGGLE 0x2
#define RM_COMM_CONTROL_CONFIG_DONE BIT(2)
#define RM_COMM_CONTROL_LINE_INTR_EN_SHIFT 4
#define RM_COMM_CONTROL_LINE_INTR_EN BIT(4)
#define RM_COMM_CONTROL_AE_TIMEOUT_EN_SHIFT 5
#define RM_COMM_CONTROL_AE_TIMEOUT_EN BIT(5)
#define RM_COMM_MSI_DISABLE_VAL 3
#define PAX_DMA_TYPE_DMA_DESC 0x3
#define PAX_DMA_NUM_BD_BUFFS 8
/* DMA desc count: 3 entries per packet */
#define PAX_DMA_RM_DESC_BDCOUNT 3
/* 1 DMA packet desc takes 3 BDs */
#define PAX_DMA_DMA_DESC_SIZE (PAX_DMA_RM_DESC_BDWIDTH * \
PAX_DMA_RM_DESC_BDCOUNT)
/* Max size of transfer in single packet */
#define PAX_DMA_MAX_DMA_SIZE_PER_BD (16 * 1024 * 1024)
/* ascii signature 'V' 'K' */
#define PAX_DMA_WRITE_SYNC_SIGNATURE 0x564B
/* DMA transfers supported from 4 bytes thru 16M, size aligned to 4 bytes */
#define PAX_DMA_MIN_SIZE 4
#define PAX_DMA_MAX_SIZE (16 * 1024 * 1024)
/* Bits 0:1 ignored by PAX DMA, i.e. 4-byte address alignment */
#define PAX_DMA_PCI_ADDR_LS_IGNORE_BITS 2
#define PAX_DMA_PCI_ADDR_ALIGNMT_SHIFT PAX_DMA_PCI_ADDR_LS_IGNORE_BITS
/* s/w payload struct, enough space for 1020 sglist elements */
#define PAX_DMA_PAYLOAD_BUFF_SIZE (32 * 1024)
/*
* Per-ring memory, with 8K & 4K alignment
* Alignment may not be ensured by allocator
* s/w need to allocate extra upto 8K to
* ensure aligned memory space.
*/
#define PAX_DMA_PER_RING_ALLOC_SIZE (PAX_DMA_RM_CMPL_RING_SIZE * 2 + \
PAX_DMA_NUM_BD_BUFFS * \
PAX_DMA_RM_DESC_RING_SIZE + \
PAX_DMA_PAYLOAD_BUFF_SIZE)
/* RM header desc field */
struct rm_header {
uint64_t opq : 16; /*pkt_id 15:0*/
uint64_t res1 : 20; /*reserved 35:16*/
uint64_t bdcount : 5; /*bdcount 40:36*/
uint64_t prot : 2; /*prot 41:40*/
uint64_t res2 : 13; /*reserved 55:43*/
uint64_t start : 1; /*start pkt :56*/
uint64_t end : 1; /*end pkt :57*/
uint64_t toggle : 1; /*toggle :58*/
uint64_t res3 : 1; /*reserved :59*/
uint64_t type : 4; /*type 63:60*/
} __attribute__ ((__packed__));
/* dma desc header field */
struct dma_header_desc {
uint64_t length : 25; /*transfer length in bytes 24:0*/
uint64_t res1: 31; /*reserved 55:25*/
uint64_t opcode : 4; /*opcode 59:56*/
uint64_t res2: 2; /*reserved 61:60*/
uint64_t type : 2; /*type 63:62 set to b'11*/
} __attribute__ ((__packed__));
/* dma desc AXI addr field */
struct axi_addr_desc {
uint64_t axi_addr : 48; /*axi_addr[47:0]*/
uint64_t res : 14; /*reserved 48:61*/
uint64_t type : 2; /*63:62 set to b'11*/
} __attribute__ ((__packed__));
/* dma desc PCI addr field */
struct pci_addr_desc {
uint64_t pcie_addr : 62; /*pcie_addr[63:2]*/
uint64_t type : 2; /*63:62 set to b'11*/
} __attribute__ ((__packed__));
/* DMA descriptor */
struct dma_desc {
struct dma_header_desc hdr;
struct axi_addr_desc axi;
struct pci_addr_desc pci;
} __attribute__ ((__packed__));
struct next_ptr_desc {
uint64_t addr : 44; /*Address 43:0*/
uint64_t res1 : 14;/*Reserved*/
uint64_t toggle : 1; /*Toggle Bit:58*/
uint64_t res2 : 1;/*Reserved 59:59*/
uint64_t type : 4;/*descriptor type 63:60*/
} __attribute__ ((__packed__));
#endif
``` | /content/code_sandbox/drivers/dma/dma_iproc_pax_v1.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,079 |
```c
/*
* Contributors: 2018 Antmicro <www.antmicro.com>
*
*/
#define DT_DRV_COMPAT sifive_plic_1_0_0
/**
* @brief Platform Level Interrupt Controller (PLIC) driver
* for RISC-V processors
*/
#include <stdlib.h>
#include "sw_isr_common.h"
#include <zephyr/debug/symtab.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/devicetree/interrupt_controller.h>
#include <zephyr/shell/shell.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/drivers/interrupt_controller/riscv_plic.h>
#include <zephyr/irq.h>
#define PLIC_BASE_ADDR(n) DT_INST_REG_ADDR(n)
/*
* These registers' offset are defined in the RISCV PLIC specs, see:
* path_to_url
*/
#define CONTEXT_BASE 0x200000
#define CONTEXT_SIZE 0x1000
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
#define CONTEXT_ENABLE_BASE 0x2000
#define CONTEXT_ENABLE_SIZE 0x80
/*
* Trigger type is mentioned, but not defined in the RISCV PLIC specs.
* However, it is defined and supported by at least the Andes & Telink datasheet, and supported
* in Linux's SiFive PLIC driver
*/
#define PLIC_TRIG_LEVEL ((uint32_t)0)
#define PLIC_TRIG_EDGE ((uint32_t)1)
#define PLIC_DRV_HAS_COMPAT(compat) \
DT_NODE_HAS_COMPAT(DT_COMPAT_GET_ANY_STATUS_OKAY(DT_DRV_COMPAT), compat)
#if PLIC_DRV_HAS_COMPAT(andestech_nceplic100)
#define PLIC_SUPPORTS_TRIG_TYPE 1
#define PLIC_REG_TRIG_TYPE_WIDTH 1
#define PLIC_REG_TRIG_TYPE_OFFSET 0x1080
#else
/* Trigger-type not supported */
#define PLIC_REG_TRIG_TYPE_WIDTH 0
#endif
/* PLIC registers are 32-bit memory-mapped */
#define PLIC_REG_SIZE 32
#define PLIC_REG_MASK BIT_MASK(LOG2(PLIC_REG_SIZE))
#ifdef CONFIG_TEST_INTC_PLIC
#define INTC_PLIC_STATIC
#else
#define INTC_PLIC_STATIC static inline
#endif
typedef void (*riscv_plic_irq_config_func_t)(void);
struct plic_config {
mem_addr_t prio;
mem_addr_t irq_en;
mem_addr_t reg;
mem_addr_t trig;
uint32_t max_prio;
uint32_t num_irqs;
riscv_plic_irq_config_func_t irq_config_func;
struct _isr_table_entry *isr_table;
};
struct plic_stats {
uint16_t *const irq_count;
const int irq_count_len;
};
struct plic_data {
struct plic_stats stats;
};
static uint32_t save_irq;
static const struct device *save_dev;
INTC_PLIC_STATIC uint32_t local_irq_to_reg_index(uint32_t local_irq)
{
return local_irq >> LOG2(PLIC_REG_SIZE);
}
INTC_PLIC_STATIC uint32_t local_irq_to_reg_offset(uint32_t local_irq)
{
return local_irq_to_reg_index(local_irq) * sizeof(uint32_t);
}
static inline uint32_t get_plic_enabled_size(const struct device *dev)
{
const struct plic_config *config = dev->config;
return local_irq_to_reg_index(config->num_irqs) + 1;
}
static inline uint32_t get_first_context(uint32_t hartid)
{
return hartid == 0 ? 0 : (hartid * 2) - 1;
}
static inline mem_addr_t get_context_en_addr(const struct device *dev, uint32_t cpu_num)
{
const struct plic_config *config = dev->config;
uint32_t hartid;
/*
* We want to return the irq_en address for the context of given hart.
* If hartid is 0, we return the devices irq_en property, job done. If it is
* greater than zero, we assume that there are two context's associated with
* each hart: M mode enable, followed by S mode enable. We return the M mode
* enable address.
*/
#if CONFIG_SMP
hartid = _kernel.cpus[cpu_num].arch.hartid;
#else
hartid = arch_proc_id();
#endif
return config->irq_en + get_first_context(hartid) * CONTEXT_ENABLE_SIZE;
}
static inline mem_addr_t get_claim_complete_addr(const struct device *dev)
{
const struct plic_config *config = dev->config;
/*
* We want to return the claim complete addr for the hart's context.
* We are making a few assumptions here:
* 1. for hart 0, return the first context claim complete.
* 2. for any other hart, we assume they have two privileged mode contexts
* which are contiguous, where the m mode context is first.
* We return the m mode context.
*/
return config->reg + get_first_context(arch_proc_id()) * CONTEXT_SIZE +
CONTEXT_CLAIM;
}
static inline mem_addr_t get_threshold_priority_addr(const struct device *dev, uint32_t cpu_num)
{
const struct plic_config *config = dev->config;
uint32_t hartid;
#if CONFIG_SMP
hartid = _kernel.cpus[cpu_num].arch.hartid;
#else
hartid = arch_proc_id();
#endif
return config->reg + (get_first_context(hartid) * CONTEXT_SIZE);
}
/**
* @brief Determine the PLIC device from the IRQ
*
* @param irq IRQ number
*
* @return PLIC device of that IRQ
*/
static inline const struct device *get_plic_dev_from_irq(uint32_t irq)
{
#ifdef CONFIG_DYNAMIC_INTERRUPTS
return z_get_sw_isr_device_from_irq(irq);
#else
return DEVICE_DT_INST_GET(0);
#endif
}
/**
* @brief Return the value of the trigger type register for the IRQ
*
* In the event edge irq is enable this will return the trigger
* value of the irq. In the event edge irq is not supported this
* routine will return 0
*
* @param dev PLIC-instance device
* @param local_irq PLIC-instance IRQ number to add to the trigger
*
* @return Trigger type register value if PLIC supports trigger type, PLIC_TRIG_LEVEL otherwise
*/
static uint32_t __maybe_unused riscv_plic_irq_trig_val(const struct device *dev, uint32_t local_irq)
{
if (!IS_ENABLED(PLIC_SUPPORTS_TRIG_TYPE)) {
return PLIC_TRIG_LEVEL;
}
const struct plic_config *config = dev->config;
mem_addr_t trig_addr = config->trig + local_irq_to_reg_offset(local_irq);
uint32_t offset = local_irq * PLIC_REG_TRIG_TYPE_WIDTH;
return sys_read32(trig_addr) & GENMASK(offset + PLIC_REG_TRIG_TYPE_WIDTH - 1, offset);
}
static void plic_irq_enable_set_state(uint32_t irq, bool enable)
{
const struct device *dev = get_plic_dev_from_irq(irq);
const uint32_t local_irq = irq_from_level_2(irq);
for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
mem_addr_t en_addr =
get_context_en_addr(dev, cpu_num) + local_irq_to_reg_offset(local_irq);
uint32_t en_value;
uint32_t key;
key = irq_lock();
en_value = sys_read32(en_addr);
WRITE_BIT(en_value, local_irq & PLIC_REG_MASK, enable);
sys_write32(en_value, en_addr);
irq_unlock(key);
}
}
/**
* @brief Enable a riscv PLIC-specific interrupt line
*
* This routine enables a RISCV PLIC-specific interrupt line.
* riscv_plic_irq_enable is called by RISCV_PRIVILEGED
* arch_irq_enable function to enable external interrupts for
* IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
*
* @param irq IRQ number to enable
*/
void riscv_plic_irq_enable(uint32_t irq)
{
plic_irq_enable_set_state(irq, true);
}
/**
* @brief Disable a riscv PLIC-specific interrupt line
*
* This routine disables a RISCV PLIC-specific interrupt line.
* riscv_plic_irq_disable is called by RISCV_PRIVILEGED
* arch_irq_disable function to disable external interrupts, for
* IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
*
* @param irq IRQ number to disable
*/
void riscv_plic_irq_disable(uint32_t irq)
{
plic_irq_enable_set_state(irq, false);
}
/**
* @brief Check if a riscv PLIC-specific interrupt line is enabled
*
* This routine checks if a RISCV PLIC-specific interrupt line is enabled.
* @param irq IRQ number to check
*
* @return 1 or 0
*/
int riscv_plic_irq_is_enabled(uint32_t irq)
{
const struct device *dev = get_plic_dev_from_irq(irq);
const struct plic_config *config = dev->config;
const uint32_t local_irq = irq_from_level_2(irq);
mem_addr_t en_addr = config->irq_en + local_irq_to_reg_offset(local_irq);
uint32_t en_value;
en_value = sys_read32(en_addr);
en_value &= BIT(local_irq & PLIC_REG_MASK);
return !!en_value;
}
/**
* @brief Set priority of a riscv PLIC-specific interrupt line
*
* This routine set the priority of a RISCV PLIC-specific interrupt line.
* riscv_plic_irq_set_prio is called by riscv arch_irq_priority_set to set
* the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set.
*
* @param irq IRQ number for which to set priority
* @param priority Priority of IRQ to set to
*/
void riscv_plic_set_priority(uint32_t irq, uint32_t priority)
{
const struct device *dev = get_plic_dev_from_irq(irq);
const struct plic_config *config = dev->config;
const uint32_t local_irq = irq_from_level_2(irq);
mem_addr_t prio_addr = config->prio + (local_irq * sizeof(uint32_t));
if (priority > config->max_prio) {
priority = config->max_prio;
}
sys_write32(priority, prio_addr);
}
/**
* @brief Get riscv PLIC-specific interrupt line causing an interrupt
*
* This routine returns the RISCV PLIC-specific interrupt line causing an
* interrupt.
*
* @param dev Optional device pointer to get the interrupt line's controller
*
* @return PLIC-specific interrupt line causing an interrupt.
*/
unsigned int riscv_plic_get_irq(void)
{
return save_irq;
}
const struct device *riscv_plic_get_dev(void)
{
return save_dev;
}
static void plic_irq_handler(const struct device *dev)
{
const struct plic_config *config = dev->config;
mem_addr_t claim_complete_addr = get_claim_complete_addr(dev);
struct _isr_table_entry *ite;
uint32_t __maybe_unused trig_val;
/* Get the IRQ number generating the interrupt */
const uint32_t local_irq = sys_read32(claim_complete_addr);
#ifdef CONFIG_PLIC_SHELL
const struct plic_data *data = dev->data;
struct plic_stats stat = data->stats;
/* Cap the count at __UINT16_MAX__ */
if (stat.irq_count[local_irq] != __UINT16_MAX__) {
stat.irq_count[local_irq]++;
}
#endif /* CONFIG_PLIC_SHELL */
/*
* Save IRQ in save_irq. To be used, if need be, by
* subsequent handlers registered in the _sw_isr_table table,
* as IRQ number held by the claim_complete register is
* cleared upon read.
*/
save_irq = local_irq;
save_dev = dev;
/*
* If the IRQ is out of range, call z_irq_spurious.
* A call to z_irq_spurious will not return.
*/
if (local_irq == 0U || local_irq >= config->num_irqs) {
z_irq_spurious(NULL);
}
#if PLIC_DRV_HAS_COMPAT(andestech_nceplic100)
trig_val = riscv_plic_irq_trig_val(dev, local_irq);
/*
* Edge-triggered interrupts on Andes NCEPLIC100 have to be acknowledged first before
* getting handled so that we don't miss on the next edge-triggered interrupt.
*/
if (trig_val == PLIC_TRIG_EDGE) {
sys_write32(local_irq, claim_complete_addr);
}
#endif
/* Call the corresponding IRQ handler in _sw_isr_table */
ite = &config->isr_table[local_irq];
ite->isr(ite->arg);
/*
* Write to claim_complete register to indicate to
* PLIC controller that the IRQ has been handled
* for level triggered interrupts.
*/
#if PLIC_DRV_HAS_COMPAT(andestech_nceplic100)
/* For NCEPLIC100, handle only if level-triggered */
if (trig_val == PLIC_TRIG_LEVEL) {
sys_write32(local_irq, claim_complete_addr);
}
#else
sys_write32(local_irq, claim_complete_addr);
#endif
}
/**
* @brief Initialize the Platform Level Interrupt Controller
*
* @param dev PLIC device struct
*
* @retval 0 on success.
*/
static int plic_init(const struct device *dev)
{
const struct plic_config *config = dev->config;
mem_addr_t en_addr, thres_prio_addr;
mem_addr_t prio_addr = config->prio;
/* Iterate through each of the contexts, HART + PRIV */
for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
en_addr = get_context_en_addr(dev, cpu_num);
thres_prio_addr = get_threshold_priority_addr(dev, cpu_num);
/* Ensure that all interrupts are disabled initially */
for (uint32_t i = 0; i < get_plic_enabled_size(dev); i++) {
sys_write32(0U, en_addr + (i * sizeof(uint32_t)));
}
/* Set threshold priority to 0 */
sys_write32(0U, thres_prio_addr);
}
/* Set priority of each interrupt line to 0 initially */
for (uint32_t i = 0; i < config->num_irqs; i++) {
sys_write32(0U, prio_addr + (i * sizeof(uint32_t)));
}
/* Configure IRQ for PLIC driver */
config->irq_config_func();
return 0;
}
#ifdef CONFIG_PLIC_SHELL
static inline int parse_device(const struct shell *sh, size_t argc, char *argv[],
const struct device **plic)
{
ARG_UNUSED(argc);
*plic = device_get_binding(argv[1]);
if (*plic == NULL) {
shell_error(sh, "PLIC device (%s) not found!\n", argv[1]);
return -ENODEV;
}
return 0;
}
static int cmd_get_stats(const struct shell *sh, size_t argc, char *argv[])
{
const struct device *dev;
int ret = parse_device(sh, argc, argv, &dev);
uint16_t min_hit = 0;
if (ret != 0) {
return ret;
}
const struct plic_config *config = dev->config;
const struct plic_data *data = dev->data;
struct plic_stats stat = data->stats;
if (argc > 2) {
min_hit = (uint16_t)atoi(argv[2]);
shell_print(sh, "IRQ line with > %d hits:", min_hit);
}
shell_print(sh, " IRQ Hits\tISR(ARG)");
for (int i = 0; i < stat.irq_count_len; i++) {
if (stat.irq_count[i] > min_hit) {
#ifdef CONFIG_SYMTAB
const char *name =
symtab_find_symbol_name((uintptr_t)config->isr_table[i].isr, NULL);
shell_print(sh, " %4d %10d\t%s(%p)", i, stat.irq_count[i], name,
config->isr_table[i].arg);
#else
shell_print(sh, " %4d %10d\t%p(%p)", i, stat.irq_count[i],
(void *)config->isr_table[i].isr, config->isr_table[i].arg);
#endif /* CONFIG_SYMTAB */
}
}
shell_print(sh, "");
return 0;
}
static int cmd_clear_stats(const struct shell *sh, size_t argc, char *argv[])
{
const struct device *dev;
int ret = parse_device(sh, argc, argv, &dev);
if (ret != 0) {
return ret;
}
const struct plic_data *data = dev->data;
struct plic_stats stat = data->stats;
memset(stat.irq_count, 0, stat.irq_count_len * sizeof(uint16_t));
shell_print(sh, "Cleared stats of %s.\n", dev->name);
return 0;
}
/* Device name autocompletion support */
static void device_name_get(size_t idx, struct shell_static_entry *entry)
{
const struct device *dev = shell_device_lookup(idx, NULL);
entry->syntax = (dev != NULL) ? dev->name : NULL;
entry->handler = NULL;
entry->help = NULL;
entry->subcmd = NULL;
}
SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get);
SHELL_STATIC_SUBCMD_SET_CREATE(plic_stats_cmds,
SHELL_CMD_ARG(get, &dsub_device_name,
"Read PLIC's stats.\n"
"Usage: plic stats get <device> [minimum hits]",
cmd_get_stats, 2, 1),
SHELL_CMD_ARG(clear, &dsub_device_name,
"Reset PLIC's stats.\n"
"Usage: plic stats clear <device>",
cmd_clear_stats, 2, 0),
SHELL_SUBCMD_SET_END
);
SHELL_STATIC_SUBCMD_SET_CREATE(plic_cmds,
SHELL_CMD_ARG(stats, &plic_stats_cmds, "PLIC stats", NULL, 3, 0),
SHELL_SUBCMD_SET_END
);
static int cmd_plic(const struct shell *sh, size_t argc, char **argv)
{
shell_error(sh, "%s:unknown parameter: %s", argv[0], argv[1]);
return -EINVAL;
}
SHELL_CMD_ARG_REGISTER(plic, &plic_cmds, "PLIC shell commands",
cmd_plic, 2, 0);
#define PLIC_MIN_IRQ_NUM(n) MIN(DT_INST_PROP(n, riscv_ndev), CONFIG_MAX_IRQ_PER_AGGREGATOR)
#define PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n) \
static uint16_t local_irq_count_##n[PLIC_MIN_IRQ_NUM(n)];
#define PLIC_INTC_DATA_INIT(n) \
PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n); \
static struct plic_data plic_data_##n = { \
.stats = { \
.irq_count = local_irq_count_##n, \
.irq_count_len = PLIC_MIN_IRQ_NUM(n), \
}, \
};
#define PLIC_INTC_DATA(n) &plic_data_##n
#else
#define PLIC_INTC_DATA_INIT(...)
#define PLIC_INTC_DATA(n) (NULL)
#endif
#define PLIC_INTC_IRQ_FUNC_DECLARE(n) static void plic_irq_config_func_##n(void)
#define PLIC_INTC_IRQ_FUNC_DEFINE(n) \
static void plic_irq_config_func_##n(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), 0, plic_irq_handler, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}
#define PLIC_INTC_CONFIG_INIT(n) \
PLIC_INTC_IRQ_FUNC_DECLARE(n); \
static const struct plic_config plic_config_##n = { \
.prio = PLIC_BASE_ADDR(n), \
.irq_en = PLIC_BASE_ADDR(n) + CONTEXT_ENABLE_BASE, \
.reg = PLIC_BASE_ADDR(n) + CONTEXT_BASE, \
IF_ENABLED(PLIC_SUPPORTS_TRIG_TYPE, \
(.trig = PLIC_BASE_ADDR(n) + PLIC_REG_TRIG_TYPE_OFFSET,)) \
.max_prio = DT_INST_PROP(n, riscv_max_priority), \
.num_irqs = DT_INST_PROP(n, riscv_ndev), \
.irq_config_func = plic_irq_config_func_##n, \
.isr_table = &_sw_isr_table[INTC_INST_ISR_TBL_OFFSET(n)], \
}; \
PLIC_INTC_IRQ_FUNC_DEFINE(n)
#define PLIC_INTC_DEVICE_INIT(n) \
IRQ_PARENT_ENTRY_DEFINE( \
plic##n, DEVICE_DT_INST_GET(n), DT_INST_IRQN(n), \
INTC_INST_ISR_TBL_OFFSET(n), \
DT_INST_INTC_GET_AGGREGATOR_LEVEL(n)); \
PLIC_INTC_CONFIG_INIT(n) \
PLIC_INTC_DATA_INIT(n) \
DEVICE_DT_INST_DEFINE(n, &plic_init, NULL, \
PLIC_INTC_DATA(n), &plic_config_##n, \
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, \
NULL);
DT_INST_FOREACH_STATUS_OKAY(PLIC_INTC_DEVICE_INIT)
``` | /content/code_sandbox/drivers/interrupt_controller/intc_plic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,712 |
```objective-c
/*
*
*/
#ifndef DMA_IPROC_PAX_V2
#define DMA_IPROC_PAX_V2
#include "dma_iproc_pax.h"
#define RING_COMPLETION_INTERRUPT_STAT_MASK 0x088
#define RING_COMPLETION_INTERRUPT_STAT_CLEAR 0x08c
#define RING_COMPLETION_INTERRUPT_STAT 0x090
#define RING_DISABLE_MSI_TIMEOUT 0x0a4
/* Register RM_COMM_CONTROL fields */
#define RM_COMM_CONTROL_MODE_MASK 0x3
#define RM_COMM_CONTROL_MODE_SHIFT 0
#define RM_COMM_CONTROL_MODE_DOORBELL 0x0
#define RM_COMM_CONTROL_MODE_TOGGLE 0x2
#define RM_COMM_CONTROL_MODE_ALL_BD_TOGGLE 0x3
#define RM_COMM_CONTROL_CONFIG_DONE BIT(2)
#define RM_COMM_CONTROL_LINE_INTR_EN BIT(4)
#define RM_COMM_CONTROL_AE_TIMEOUT_EN BIT(5)
#define RING_DISABLE_MSI_TIMEOUT_VALUE 1
#define PAX_DMA_TYPE_SRC_DESC 0x2
#define PAX_DMA_TYPE_DST_DESC 0x3
#define PAX_DMA_TYPE_MEGA_SRC_DESC 0x6
#define PAX_DMA_TYPE_MEGA_DST_DESC 0x7
#define PAX_DMA_TYPE_PCIE_DESC 0xB
#define PAX_DMA_NUM_BD_BUFFS 9
/* PCIE DESC, either DST or SRC DESC */
#define PAX_DMA_RM_DESC_BDCOUNT 2
/* ascii signature 'V' 'P' */
#define PAX_DMA_WRITE_SYNC_SIGNATURE 0x5650
#define PAX_DMA_PCI_ADDR_MSB8_SHIFT 56
#define PAX_DMA_PCI_ADDR_HI_MSB8(pci) ((pci) >> PAX_DMA_PCI_ADDR_MSB8_SHIFT)
#define PAX_DMA_MAX_SZ_PER_BD (512 * 1024)
#define PAX_DMA_MEGA_LENGTH_MULTIPLE 16
/* Maximum DMA block count supported per request */
#define RM_V2_MAX_BLOCK_COUNT 1024
#define MAX_BD_COUNT_PER_HEADER 30
/*
* Sync payload buffer size is of 4 bytes,4096 Bytes allocated here
* to make sure BD memories fall in 4K alignment.
*/
#define PAX_DMA_RM_SYNC_BUFFER_MISC_SIZE 4096
/*
* Per-ring memory, with 8K & 4K alignment
* Alignment may not be ensured by allocator
* s/w need to allocate extra upto 8K to
* ensure aligned memory space.
*/
#define PAX_DMA_PER_RING_ALLOC_SIZE (PAX_DMA_RM_CMPL_RING_SIZE * 2 + \
PAX_DMA_NUM_BD_BUFFS * \
PAX_DMA_RM_DESC_RING_SIZE + \
PAX_DMA_RM_SYNC_BUFFER_MISC_SIZE)
/* RM header desc field */
struct rm_header {
uint64_t opq : 16; /*pkt_id 15:0*/
uint64_t bdf : 16; /*reserved 31:16*/
uint64_t res1 : 4; /*res 32:35*/
uint64_t bdcount : 5; /*bdcount 36:40*/
uint64_t prot : 2; /*prot 41:42*/
uint64_t res2 : 1; /*res :43:43*/
uint64_t pcie_addr_msb : 8; /*pcie addr :44:51*/
uint64_t res3 : 4; /*res :52:55*/
uint64_t start : 1; /*S :56*/
uint64_t end : 1; /*E:57*/
uint64_t res4 : 1; /*res:58*/
uint64_t toggle : 1; /*T:59*/
uint64_t type : 4; /*type:60:63*/
} __attribute__ ((__packed__));
/* pcie desc field */
struct pcie_desc {
uint64_t pcie_addr_lsb : 56; /* pcie_addr_lsb 0:55*/
uint64_t res1: 3; /*reserved 56:58*/
uint64_t toggle : 1; /*T:59*/
uint64_t type : 4; /*type:60:63*/
} __attribute__ ((__packed__));
/* src/dst desc field */
struct src_dst_desc {
uint64_t axi_addr : 44; /*axi_addr[43:0]*/
uint64_t length : 15; /*length[44:58]*/
uint64_t toggle : 1; /*T:59*/
uint64_t type : 4; /*type:60:63*/
} __attribute__ ((__packed__));
struct next_ptr_desc {
uint64_t addr : 44; /*Address 43:0*/
uint64_t res1 : 15;/*Reserved*/
uint64_t toggle : 1; /*Toggle Bit:59*/
uint64_t type : 4;/*descriptor type 63:60*/
} __attribute__ ((__packed__));
#endif
``` | /content/code_sandbox/drivers/dma/dma_iproc_pax_v2.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,038 |
```objective-c
/*
*
*/
/** @file
* @brief Atmel SAM MCU family Direct Memory Access (XDMAC) driver.
*/
#ifndef ZEPHYR_DRIVERS_DMA_DMA_SAM_XDMAC_H_
#define ZEPHYR_DRIVERS_DMA_DMA_SAM_XDMAC_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* XDMA_MBR_UBC */
#define XDMA_UBC_NDE (0x1u << 24)
#define XDMA_UBC_NDE_FETCH_DIS (0x0u << 24)
#define XDMA_UBC_NDE_FETCH_EN (0x1u << 24)
#define XDMA_UBC_NSEN (0x1u << 25)
#define XDMA_UBC_NSEN_UNCHANGED (0x0u << 25)
#define XDMA_UBC_NSEN_UPDATED (0x1u << 25)
#define XDMA_UBC_NDEN (0x1u << 26)
#define XDMA_UBC_NDEN_UNCHANGED (0x0u << 26)
#define XDMA_UBC_NDEN_UPDATED (0x1u << 26)
#define XDMA_UBC_NVIEW_SHIFT 27
#define XDMA_UBC_NVIEW_MASK (0x3u << XDMA_UBC_NVIEW_SHIFT)
#define XDMA_UBC_NVIEW_NDV0 (0x0u << XDMA_UBC_NVIEW_SHIFT)
#define XDMA_UBC_NVIEW_NDV1 (0x1u << XDMA_UBC_NVIEW_SHIFT)
#define XDMA_UBC_NVIEW_NDV2 (0x2u << XDMA_UBC_NVIEW_SHIFT)
#define XDMA_UBC_NVIEW_NDV3 (0x3u << XDMA_UBC_NVIEW_SHIFT)
/** DMA channel configuration parameters */
struct sam_xdmac_channel_config {
/** Configuration Register */
uint32_t cfg;
/** Data Stride / Memory Set Pattern Register */
uint32_t ds_msp;
/** Source Microblock Stride */
uint32_t sus;
/** Destination Microblock Stride */
uint32_t dus;
/** Channel Interrupt Enable */
uint32_t cie;
};
/** DMA transfer configuration parameters */
struct sam_xdmac_transfer_config {
/** Microblock length */
uint32_t ublen;
/** Source Address */
uint32_t sa;
/** Destination Address */
uint32_t da;
/** Block length (The length of the block is (blen+1) microblocks) */
uint32_t blen;
/** Next descriptor address */
uint32_t nda;
/** Next descriptor configuration */
uint32_t ndc;
};
/** DMA Master transfer linked list view 0 structure */
struct sam_xdmac_linked_list_desc_view0 {
/** Next Descriptor Address */
uint32_t mbr_nda;
/** Microblock Control */
uint32_t mbr_ubc;
/** Transfer Address */
uint32_t mbr_ta;
};
/** DMA Master transfer linked list view 1 structure */
struct sam_xdmac_linked_list_desc_view1 {
/** Next Descriptor Address */
uint32_t mbr_nda;
/** Microblock Control */
uint32_t mbr_ubc;
/** Source Address */
uint32_t mbr_sa;
/** Destination Address */
uint32_t mbr_da;
};
/** DMA Master transfer linked list view 2 structure */
struct sam_xdmac_linked_list_desc_view2 {
/** Next Descriptor Address */
uint32_t mbr_nda;
/** Microblock Control */
uint32_t mbr_ubc;
/** Source Address */
uint32_t mbr_sa;
/** Destination Address */
uint32_t mbr_da;
/** Configuration Register */
uint32_t mbr_cfg;
};
/** DMA Master transfer linked list view 3 structure */
struct sam_xdmac_linked_list_desc_view3 {
/** Next Descriptor Address */
uint32_t mbr_nda;
/** Microblock Control */
uint32_t mbr_ubc;
/** Source Address */
uint32_t mbr_sa;
/** Destination Address */
uint32_t mbr_da;
/** Configuration Register */
uint32_t mbr_cfg;
/** Block Control */
uint32_t mbr_bc;
/** Data Stride */
uint32_t mbr_ds;
/** Source Microblock Stride */
uint32_t mbr_sus;
/** Destination Microblock Stride */
uint32_t mbr_dus;
};
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_DMA_DMA_SAM_XDMAC_H_ */
``` | /content/code_sandbox/drivers/dma/dma_sam_xdmac.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 974 |
```unknown
# Intel ADSP HDA configuration options
config DMA_INTEL_ADSP_HDA_HOST_IN
bool "Intel ADSP HDA Host In DMA drivers"
default y
depends on DT_HAS_INTEL_ADSP_HDA_HOST_IN_ENABLED
depends on DMA
select DMA_INTEL_ADSP_HDA
help
Intel ADSP Host HDA DMA driver.
config DMA_INTEL_ADSP_HDA_HOST_OUT
bool "Intel ADSP HDA Host Out DMA drivers"
default y
depends on DT_HAS_INTEL_ADSP_HDA_HOST_OUT_ENABLED
select DMA_INTEL_ADSP_HDA
help
Intel ADSP Host HDA DMA driver.
config DMA_INTEL_ADSP_HDA_LINK_IN
bool "Intel ADSP HDA Link In DMA drivers"
default y
depends on DT_HAS_INTEL_ADSP_HDA_LINK_IN_ENABLED
select DMA_INTEL_ADSP_HDA
help
Intel ADSP Link In HDA DMA driver.
config DMA_INTEL_ADSP_HDA_LINK_OUT
bool "Intel ADSP HDA Link Out DMA drivers"
default y
depends on DT_HAS_INTEL_ADSP_HDA_LINK_OUT_ENABLED
select DMA_INTEL_ADSP_HDA
help
Intel ADSP Link Out HDA DMA driver.
config DMA_INTEL_ADSP_HDA
bool
depends on DMA_INTEL_ADSP_HDA_LINK_OUT || DMA_INTEL_ADSP_HDA_LINK_IN || DMA_INTEL_ADSP_HDA_HOST_OUT || DMA_INTEL_ADSP_HDA_HOST_IN
help
Intel ADSP HDA DMA driver.
config DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT
bool "Intel ADSP HDA Host L1 Exit Interrupt"
default y if SOC_INTEL_ACE15_MTPM
default y if SOC_INTEL_ACE20_LNL
depends on DMA_INTEL_ADSP_HDA_HOST_IN || DMA_INTEL_ADSP_HDA_HOST_OUT
help
Intel ADSP HDA Host Interrupt for L1 exit.
``` | /content/code_sandbox/drivers/dma/Kconfig.intel_adsp_hda | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 420 |
```unknown
config MCUX_PXP
bool "MCUX PXP DMA driver"
default y
depends on DT_HAS_NXP_PXP_ENABLED
depends on DISPLAY
help
PXP DMA driver for NXP SOCs
``` | /content/code_sandbox/drivers/dma/Kconfig.mcux_pxp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 49 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.